Terraform quoted references are now deprecated (#6203)
parent
b98cb74f5e
commit
764a851189
|
@ -3,9 +3,9 @@ terraform {
|
|||
}
|
||||
|
||||
provider "aws" {
|
||||
access_key = "${var.AWS_ACCESS_KEY_ID}"
|
||||
secret_key = "${var.AWS_SECRET_ACCESS_KEY}"
|
||||
region = "${var.AWS_DEFAULT_REGION}"
|
||||
access_key = var.AWS_ACCESS_KEY_ID
|
||||
secret_key = var.AWS_SECRET_ACCESS_KEY
|
||||
region = var.AWS_DEFAULT_REGION
|
||||
}
|
||||
|
||||
data "aws_availability_zones" "available" {}
|
||||
|
@ -18,30 +18,30 @@ data "aws_availability_zones" "available" {}
|
|||
module "aws-vpc" {
|
||||
source = "./modules/vpc"
|
||||
|
||||
aws_cluster_name = "${var.aws_cluster_name}"
|
||||
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
|
||||
aws_avail_zones = "${slice(data.aws_availability_zones.available.names, 0, 2)}"
|
||||
aws_cidr_subnets_private = "${var.aws_cidr_subnets_private}"
|
||||
aws_cidr_subnets_public = "${var.aws_cidr_subnets_public}"
|
||||
default_tags = "${var.default_tags}"
|
||||
aws_cluster_name = var.aws_cluster_name
|
||||
aws_vpc_cidr_block = var.aws_vpc_cidr_block
|
||||
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, 2)
|
||||
aws_cidr_subnets_private = var.aws_cidr_subnets_private
|
||||
aws_cidr_subnets_public = var.aws_cidr_subnets_public
|
||||
default_tags = var.default_tags
|
||||
}
|
||||
|
||||
module "aws-elb" {
|
||||
source = "./modules/elb"
|
||||
|
||||
aws_cluster_name = "${var.aws_cluster_name}"
|
||||
aws_vpc_id = "${module.aws-vpc.aws_vpc_id}"
|
||||
aws_avail_zones = "${slice(data.aws_availability_zones.available.names, 0, 2)}"
|
||||
aws_subnet_ids_public = "${module.aws-vpc.aws_subnet_ids_public}"
|
||||
aws_elb_api_port = "${var.aws_elb_api_port}"
|
||||
k8s_secure_api_port = "${var.k8s_secure_api_port}"
|
||||
default_tags = "${var.default_tags}"
|
||||
aws_cluster_name = var.aws_cluster_name
|
||||
aws_vpc_id = module.aws-vpc.aws_vpc_id
|
||||
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, 2)
|
||||
aws_subnet_ids_public = module.aws-vpc.aws_subnet_ids_public
|
||||
aws_elb_api_port = var.aws_elb_api_port
|
||||
k8s_secure_api_port = var.k8s_secure_api_port
|
||||
default_tags = var.default_tags
|
||||
}
|
||||
|
||||
module "aws-iam" {
|
||||
source = "./modules/iam"
|
||||
|
||||
aws_cluster_name = "${var.aws_cluster_name}"
|
||||
aws_cluster_name = var.aws_cluster_name
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -50,22 +50,22 @@ module "aws-iam" {
|
|||
*/
|
||||
|
||||
resource "aws_instance" "bastion-server" {
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_bastion_size}"
|
||||
count = "${length(var.aws_cidr_subnets_public)}"
|
||||
ami = data.aws_ami.distro.id
|
||||
instance_type = var.aws_bastion_size
|
||||
count = length(var.aws_cidr_subnets_public)
|
||||
associate_public_ip_address = true
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public, count.index)}"
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_public, count.index)
|
||||
|
||||
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
|
||||
"Cluster", "${var.aws_cluster_name}",
|
||||
"Role", "bastion-${var.aws_cluster_name}-${count.index}"
|
||||
))}"
|
||||
))
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -74,71 +74,71 @@ resource "aws_instance" "bastion-server" {
|
|||
*/
|
||||
|
||||
resource "aws_instance" "k8s-master" {
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_kube_master_size}"
|
||||
ami = data.aws_ami.distro.id
|
||||
instance_type = var.aws_kube_master_size
|
||||
|
||||
count = "${var.aws_kube_master_num}"
|
||||
count = var.aws_kube_master_num
|
||||
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}"
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
|
||||
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
iam_instance_profile = "${module.aws-iam.kube-master-profile}"
|
||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||
iam_instance_profile = module.aws-iam.kube-master-profile
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "master"
|
||||
))}"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_elb_attachment" "attach_master_nodes" {
|
||||
count = "${var.aws_kube_master_num}"
|
||||
elb = "${module.aws-elb.aws_elb_api_id}"
|
||||
instance = "${element(aws_instance.k8s-master.*.id, count.index)}"
|
||||
count = var.aws_kube_master_num
|
||||
elb = module.aws-elb.aws_elb_api_id
|
||||
instance = element(aws_instance.k8s-master.*.id, count.index)
|
||||
}
|
||||
|
||||
resource "aws_instance" "k8s-etcd" {
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_etcd_size}"
|
||||
ami = data.aws_ami.distro.id
|
||||
instance_type = var.aws_etcd_size
|
||||
|
||||
count = "${var.aws_etcd_num}"
|
||||
count = var.aws_etcd_num
|
||||
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}"
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
|
||||
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "etcd"
|
||||
))}"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_instance" "k8s-worker" {
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_kube_worker_size}"
|
||||
ami = data.aws_ami.distro.id
|
||||
instance_type = var.aws_kube_worker_size
|
||||
|
||||
count = "${var.aws_kube_worker_num}"
|
||||
count = var.aws_kube_worker_num
|
||||
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}"
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
|
||||
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
iam_instance_profile = "${module.aws-iam.kube-worker-profile}"
|
||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||
iam_instance_profile = module.aws-iam.kube-worker-profile
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "worker"
|
||||
))}"
|
||||
))
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -146,16 +146,16 @@ resource "aws_instance" "k8s-worker" {
|
|||
*
|
||||
*/
|
||||
data "template_file" "inventory" {
|
||||
template = "${file("${path.module}/templates/inventory.tpl")}"
|
||||
template = file("${path.module}/templates/inventory.tpl")
|
||||
|
||||
vars = {
|
||||
public_ip_address_bastion = "${join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip))}"
|
||||
connection_strings_master = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip))}"
|
||||
connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip))}"
|
||||
connection_strings_etcd = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip))}"
|
||||
list_master = "${join("\n", aws_instance.k8s-master.*.private_dns)}"
|
||||
list_node = "${join("\n", aws_instance.k8s-worker.*.private_dns)}"
|
||||
list_etcd = "${join("\n", aws_instance.k8s-etcd.*.private_dns)}"
|
||||
public_ip_address_bastion = join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip))
|
||||
connection_strings_master = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip))
|
||||
connection_strings_node = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip))
|
||||
connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip))
|
||||
list_master = join("\n", aws_instance.k8s-master.*.private_dns)
|
||||
list_node = join("\n", aws_instance.k8s-worker.*.private_dns)
|
||||
list_etcd = join("\n", aws_instance.k8s-etcd.*.private_dns)
|
||||
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||
}
|
||||
}
|
||||
|
@ -166,6 +166,6 @@ resource "null_resource" "inventories" {
|
|||
}
|
||||
|
||||
triggers = {
|
||||
template = "${data.template_file.inventory.rendered}"
|
||||
template = data.template_file.inventory.rendered
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
resource "aws_security_group" "aws-elb" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
vpc_id = "${var.aws_vpc_id}"
|
||||
vpc_id = var.aws_vpc_id
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
))}"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "aws-allow-api-access" {
|
||||
type = "ingress"
|
||||
from_port = "${var.aws_elb_api_port}"
|
||||
to_port = "${var.k8s_secure_api_port}"
|
||||
from_port = var.aws_elb_api_port
|
||||
to_port = var.k8s_secure_api_port
|
||||
protocol = "TCP"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
security_group_id = "${aws_security_group.aws-elb.id}"
|
||||
security_group_id = aws_security_group.aws-elb.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "aws-allow-api-egress" {
|
||||
|
@ -22,19 +22,19 @@ resource "aws_security_group_rule" "aws-allow-api-egress" {
|
|||
to_port = 65535
|
||||
protocol = "TCP"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
security_group_id = "${aws_security_group.aws-elb.id}"
|
||||
security_group_id = aws_security_group.aws-elb.id
|
||||
}
|
||||
|
||||
# Create a new AWS ELB for K8S API
|
||||
resource "aws_elb" "aws-elb-api" {
|
||||
name = "kubernetes-elb-${var.aws_cluster_name}"
|
||||
subnets = var.aws_subnet_ids_public
|
||||
security_groups = ["${aws_security_group.aws-elb.id}"]
|
||||
security_groups = [aws_security_group.aws-elb.id]
|
||||
|
||||
listener {
|
||||
instance_port = "${var.k8s_secure_api_port}"
|
||||
instance_port = var.k8s_secure_api_port
|
||||
instance_protocol = "tcp"
|
||||
lb_port = "${var.aws_elb_api_port}"
|
||||
lb_port = var.aws_elb_api_port
|
||||
lb_protocol = "tcp"
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,7 @@ resource "aws_elb" "aws-elb-api" {
|
|||
connection_draining = true
|
||||
connection_draining_timeout = 400
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-elb-api"
|
||||
))}"
|
||||
))
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
output "aws_elb_api_id" {
|
||||
value = "${aws_elb.aws-elb-api.id}"
|
||||
value = aws_elb.aws-elb-api.id
|
||||
}
|
||||
|
||||
output "aws_elb_api_fqdn" {
|
||||
value = "${aws_elb.aws-elb-api.dns_name}"
|
||||
value = aws_elb.aws-elb-api.dns_name
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ EOF
|
|||
|
||||
resource "aws_iam_role_policy" "kube-master" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||
role = "${aws_iam_role.kube-master.id}"
|
||||
role = aws_iam_role.kube-master.id
|
||||
|
||||
policy = <<EOF
|
||||
{
|
||||
|
@ -77,7 +77,7 @@ EOF
|
|||
|
||||
resource "aws_iam_role_policy" "kube-worker" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-node"
|
||||
role = "${aws_iam_role.kube-worker.id}"
|
||||
role = aws_iam_role.kube-worker.id
|
||||
|
||||
policy = <<EOF
|
||||
{
|
||||
|
@ -132,10 +132,10 @@ EOF
|
|||
|
||||
resource "aws_iam_instance_profile" "kube-master" {
|
||||
name = "kube_${var.aws_cluster_name}_master_profile"
|
||||
role = "${aws_iam_role.kube-master.name}"
|
||||
role = aws_iam_role.kube-master.name
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "kube-worker" {
|
||||
name = "kube_${var.aws_cluster_name}_node_profile"
|
||||
role = "${aws_iam_role.kube-worker.name}"
|
||||
role = aws_iam_role.kube-worker.name
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
output "kube-master-profile" {
|
||||
value = "${aws_iam_instance_profile.kube-master.name}"
|
||||
value = aws_iam_instance_profile.kube-master.name
|
||||
}
|
||||
|
||||
output "kube-worker-profile" {
|
||||
value = "${aws_iam_instance_profile.kube-worker.name}"
|
||||
value = aws_iam_instance_profile.kube-worker.name
|
||||
}
|
||||
|
|
|
@ -1,55 +1,55 @@
|
|||
resource "aws_vpc" "cluster-vpc" {
|
||||
cidr_block = "${var.aws_vpc_cidr_block}"
|
||||
cidr_block = var.aws_vpc_cidr_block
|
||||
|
||||
#DNS Related Entries
|
||||
enable_dns_support = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-vpc"
|
||||
))}"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_eip" "cluster-nat-eip" {
|
||||
count = "${length(var.aws_cidr_subnets_public)}"
|
||||
count = length(var.aws_cidr_subnets_public)
|
||||
vpc = true
|
||||
}
|
||||
|
||||
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-internetgw"
|
||||
))}"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
count = "${length(var.aws_avail_zones)}"
|
||||
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
count = length(var.aws_avail_zones)
|
||||
availability_zone = element(var.aws_avail_zones, count.index)
|
||||
cidr_block = element(var.aws_cidr_subnets_public, count.index)
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member"
|
||||
))}"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_nat_gateway" "cluster-nat-gateway" {
|
||||
count = "${length(var.aws_cidr_subnets_public)}"
|
||||
allocation_id = "${element(aws_eip.cluster-nat-eip.*.id, count.index)}"
|
||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)}"
|
||||
count = length(var.aws_cidr_subnets_public)
|
||||
allocation_id = element(aws_eip.cluster-nat-eip.*.id, count.index)
|
||||
subnet_id = element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)
|
||||
}
|
||||
|
||||
resource "aws_subnet" "cluster-vpc-subnets-private" {
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
count = "${length(var.aws_avail_zones)}"
|
||||
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
count = length(var.aws_avail_zones)
|
||||
availability_zone = element(var.aws_avail_zones, count.index)
|
||||
cidr_block = element(var.aws_cidr_subnets_private, count.index)
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||
))}"
|
||||
))
|
||||
}
|
||||
|
||||
#Routing in VPC
|
||||
|
@ -57,53 +57,53 @@ resource "aws_subnet" "cluster-vpc-subnets-private" {
|
|||
#TODO: Do we need two routing tables for each subnet for redundancy or is one enough?
|
||||
|
||||
resource "aws_route_table" "kubernetes-public" {
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
|
||||
route {
|
||||
cidr_block = "0.0.0.0/0"
|
||||
gateway_id = "${aws_internet_gateway.cluster-vpc-internetgw.id}"
|
||||
gateway_id = aws_internet_gateway.cluster-vpc-internetgw.id
|
||||
}
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||
))}"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_route_table" "kubernetes-private" {
|
||||
count = "${length(var.aws_cidr_subnets_private)}"
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
count = length(var.aws_cidr_subnets_private)
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
|
||||
route {
|
||||
cidr_block = "0.0.0.0/0"
|
||||
nat_gateway_id = "${element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)}"
|
||||
nat_gateway_id = element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)
|
||||
}
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||
))}"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "kubernetes-public" {
|
||||
count = "${length(var.aws_cidr_subnets_public)}"
|
||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)}"
|
||||
route_table_id = "${aws_route_table.kubernetes-public.id}"
|
||||
count = length(var.aws_cidr_subnets_public)
|
||||
subnet_id = element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)
|
||||
route_table_id = aws_route_table.kubernetes-public.id
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "kubernetes-private" {
|
||||
count = "${length(var.aws_cidr_subnets_private)}"
|
||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-private.*.id, count.index)}"
|
||||
route_table_id = "${element(aws_route_table.kubernetes-private.*.id, count.index)}"
|
||||
count = length(var.aws_cidr_subnets_private)
|
||||
subnet_id = element(aws_subnet.cluster-vpc-subnets-private.*.id, count.index)
|
||||
route_table_id = element(aws_route_table.kubernetes-private.*.id, count.index)
|
||||
}
|
||||
|
||||
#Kubernetes Security Groups
|
||||
|
||||
resource "aws_security_group" "kubernetes" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
))}"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow-all-ingress" {
|
||||
|
@ -111,8 +111,8 @@ resource "aws_security_group_rule" "allow-all-ingress" {
|
|||
from_port = 0
|
||||
to_port = 65535
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["${var.aws_vpc_cidr_block}"]
|
||||
security_group_id = "${aws_security_group.kubernetes.id}"
|
||||
cidr_blocks = [var.aws_vpc_cidr_block]
|
||||
security_group_id = aws_security_group.kubernetes.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow-all-egress" {
|
||||
|
@ -121,7 +121,7 @@ resource "aws_security_group_rule" "allow-all-egress" {
|
|||
to_port = 65535
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
security_group_id = "${aws_security_group.kubernetes.id}"
|
||||
security_group_id = aws_security_group.kubernetes.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow-ssh-connections" {
|
||||
|
@ -130,5 +130,5 @@ resource "aws_security_group_rule" "allow-ssh-connections" {
|
|||
to_port = 22
|
||||
protocol = "TCP"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
security_group_id = "${aws_security_group.kubernetes.id}"
|
||||
security_group_id = aws_security_group.kubernetes.id
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
output "aws_vpc_id" {
|
||||
value = "${aws_vpc.cluster-vpc.id}"
|
||||
value = aws_vpc.cluster-vpc.id
|
||||
}
|
||||
|
||||
output "aws_subnet_ids_private" {
|
||||
|
@ -15,5 +15,5 @@ output "aws_security_group" {
|
|||
}
|
||||
|
||||
output "default_tags" {
|
||||
value = "${var.default_tags}"
|
||||
value = var.default_tags
|
||||
}
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
output "bastion_ip" {
|
||||
value = "${join("\n", aws_instance.bastion-server.*.public_ip)}"
|
||||
value = join("\n", aws_instance.bastion-server.*.public_ip)
|
||||
}
|
||||
|
||||
output "masters" {
|
||||
value = "${join("\n", aws_instance.k8s-master.*.private_ip)}"
|
||||
value = join("\n", aws_instance.k8s-master.*.private_ip)
|
||||
}
|
||||
|
||||
output "workers" {
|
||||
value = "${join("\n", aws_instance.k8s-worker.*.private_ip)}"
|
||||
value = join("\n", aws_instance.k8s-worker.*.private_ip)
|
||||
}
|
||||
|
||||
output "etcd" {
|
||||
value = "${join("\n", aws_instance.k8s-etcd.*.private_ip)}"
|
||||
value = join("\n", aws_instance.k8s-etcd.*.private_ip)
|
||||
}
|
||||
|
||||
output "aws_elb_api_fqdn" {
|
||||
|
@ -19,9 +19,9 @@ output "aws_elb_api_fqdn" {
|
|||
}
|
||||
|
||||
output "inventory" {
|
||||
value = "${data.template_file.inventory.rendered}"
|
||||
value = data.template_file.inventory.rendered
|
||||
}
|
||||
|
||||
output "default_tags" {
|
||||
value = "${var.default_tags}"
|
||||
value = var.default_tags
|
||||
}
|
||||
|
|
|
@ -5,102 +5,102 @@ provider "openstack" {
|
|||
module "network" {
|
||||
source = "./modules/network"
|
||||
|
||||
external_net = "${var.external_net}"
|
||||
network_name = "${var.network_name}"
|
||||
subnet_cidr = "${var.subnet_cidr}"
|
||||
cluster_name = "${var.cluster_name}"
|
||||
dns_nameservers = "${var.dns_nameservers}"
|
||||
network_dns_domain = "${var.network_dns_domain}"
|
||||
use_neutron = "${var.use_neutron}"
|
||||
router_id = "${var.router_id}"
|
||||
external_net = var.external_net
|
||||
network_name = var.network_name
|
||||
subnet_cidr = var.subnet_cidr
|
||||
cluster_name = var.cluster_name
|
||||
dns_nameservers = var.dns_nameservers
|
||||
network_dns_domain = var.network_dns_domain
|
||||
use_neutron = var.use_neutron
|
||||
router_id = var.router_id
|
||||
}
|
||||
|
||||
module "ips" {
|
||||
source = "./modules/ips"
|
||||
|
||||
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
||||
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
||||
number_of_k8s_nodes = "${var.number_of_k8s_nodes}"
|
||||
floatingip_pool = "${var.floatingip_pool}"
|
||||
number_of_bastions = "${var.number_of_bastions}"
|
||||
external_net = "${var.external_net}"
|
||||
network_name = "${var.network_name}"
|
||||
router_id = "${module.network.router_id}"
|
||||
k8s_nodes = "${var.k8s_nodes}"
|
||||
number_of_k8s_masters = var.number_of_k8s_masters
|
||||
number_of_k8s_masters_no_etcd = var.number_of_k8s_masters_no_etcd
|
||||
number_of_k8s_nodes = var.number_of_k8s_nodes
|
||||
floatingip_pool = var.floatingip_pool
|
||||
number_of_bastions = var.number_of_bastions
|
||||
external_net = var.external_net
|
||||
network_name = var.network_name
|
||||
router_id = module.network.router_id
|
||||
k8s_nodes = var.k8s_nodes
|
||||
}
|
||||
|
||||
module "compute" {
|
||||
source = "./modules/compute"
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
az_list = "${var.az_list}"
|
||||
az_list_node = "${var.az_list_node}"
|
||||
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
||||
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
||||
number_of_etcd = "${var.number_of_etcd}"
|
||||
number_of_k8s_masters_no_floating_ip = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||
number_of_k8s_masters_no_floating_ip_no_etcd = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||
number_of_k8s_nodes = "${var.number_of_k8s_nodes}"
|
||||
number_of_bastions = "${var.number_of_bastions}"
|
||||
number_of_k8s_nodes_no_floating_ip = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||
number_of_gfs_nodes_no_floating_ip = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
k8s_nodes = "${var.k8s_nodes}"
|
||||
bastion_root_volume_size_in_gb = "${var.bastion_root_volume_size_in_gb}"
|
||||
etcd_root_volume_size_in_gb = "${var.etcd_root_volume_size_in_gb}"
|
||||
master_root_volume_size_in_gb = "${var.master_root_volume_size_in_gb}"
|
||||
node_root_volume_size_in_gb = "${var.node_root_volume_size_in_gb}"
|
||||
gfs_root_volume_size_in_gb = "${var.gfs_root_volume_size_in_gb}"
|
||||
gfs_volume_size_in_gb = "${var.gfs_volume_size_in_gb}"
|
||||
public_key_path = "${var.public_key_path}"
|
||||
image = "${var.image}"
|
||||
image_gfs = "${var.image_gfs}"
|
||||
ssh_user = "${var.ssh_user}"
|
||||
ssh_user_gfs = "${var.ssh_user_gfs}"
|
||||
flavor_k8s_master = "${var.flavor_k8s_master}"
|
||||
flavor_k8s_node = "${var.flavor_k8s_node}"
|
||||
flavor_etcd = "${var.flavor_etcd}"
|
||||
flavor_gfs_node = "${var.flavor_gfs_node}"
|
||||
network_name = "${var.network_name}"
|
||||
flavor_bastion = "${var.flavor_bastion}"
|
||||
k8s_master_fips = "${module.ips.k8s_master_fips}"
|
||||
k8s_master_no_etcd_fips = "${module.ips.k8s_master_no_etcd_fips}"
|
||||
k8s_node_fips = "${module.ips.k8s_node_fips}"
|
||||
k8s_nodes_fips = "${module.ips.k8s_nodes_fips}"
|
||||
bastion_fips = "${module.ips.bastion_fips}"
|
||||
bastion_allowed_remote_ips = "${var.bastion_allowed_remote_ips}"
|
||||
master_allowed_remote_ips = "${var.master_allowed_remote_ips}"
|
||||
k8s_allowed_remote_ips = "${var.k8s_allowed_remote_ips}"
|
||||
k8s_allowed_egress_ips = "${var.k8s_allowed_egress_ips}"
|
||||
supplementary_master_groups = "${var.supplementary_master_groups}"
|
||||
supplementary_node_groups = "${var.supplementary_node_groups}"
|
||||
worker_allowed_ports = "${var.worker_allowed_ports}"
|
||||
wait_for_floatingip = "${var.wait_for_floatingip}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
use_server_groups = "${var.use_server_groups}"
|
||||
cluster_name = var.cluster_name
|
||||
az_list = var.az_list
|
||||
az_list_node = var.az_list_node
|
||||
number_of_k8s_masters = var.number_of_k8s_masters
|
||||
number_of_k8s_masters_no_etcd = var.number_of_k8s_masters_no_etcd
|
||||
number_of_etcd = var.number_of_etcd
|
||||
number_of_k8s_masters_no_floating_ip = var.number_of_k8s_masters_no_floating_ip
|
||||
number_of_k8s_masters_no_floating_ip_no_etcd = var.number_of_k8s_masters_no_floating_ip_no_etcd
|
||||
number_of_k8s_nodes = var.number_of_k8s_nodes
|
||||
number_of_bastions = var.number_of_bastions
|
||||
number_of_k8s_nodes_no_floating_ip = var.number_of_k8s_nodes_no_floating_ip
|
||||
number_of_gfs_nodes_no_floating_ip = var.number_of_gfs_nodes_no_floating_ip
|
||||
k8s_nodes = var.k8s_nodes
|
||||
bastion_root_volume_size_in_gb = var.bastion_root_volume_size_in_gb
|
||||
etcd_root_volume_size_in_gb = var.etcd_root_volume_size_in_gb
|
||||
master_root_volume_size_in_gb = var.master_root_volume_size_in_gb
|
||||
node_root_volume_size_in_gb = var.node_root_volume_size_in_gb
|
||||
gfs_root_volume_size_in_gb = var.gfs_root_volume_size_in_gb
|
||||
gfs_volume_size_in_gb = var.gfs_volume_size_in_gb
|
||||
public_key_path = var.public_key_path
|
||||
image = var.image
|
||||
image_gfs = var.image_gfs
|
||||
ssh_user = var.ssh_user
|
||||
ssh_user_gfs = var.ssh_user_gfs
|
||||
flavor_k8s_master = var.flavor_k8s_master
|
||||
flavor_k8s_node = var.flavor_k8s_node
|
||||
flavor_etcd = var.flavor_etcd
|
||||
flavor_gfs_node = var.flavor_gfs_node
|
||||
network_name = var.network_name
|
||||
flavor_bastion = var.flavor_bastion
|
||||
k8s_master_fips = module.ips.k8s_master_fips
|
||||
k8s_master_no_etcd_fips = module.ips.k8s_master_no_etcd_fips
|
||||
k8s_node_fips = module.ips.k8s_node_fips
|
||||
k8s_nodes_fips = module.ips.k8s_nodes_fips
|
||||
bastion_fips = module.ips.bastion_fips
|
||||
bastion_allowed_remote_ips = var.bastion_allowed_remote_ips
|
||||
master_allowed_remote_ips = var.master_allowed_remote_ips
|
||||
k8s_allowed_remote_ips = var.k8s_allowed_remote_ips
|
||||
k8s_allowed_egress_ips = var.k8s_allowed_egress_ips
|
||||
supplementary_master_groups = var.supplementary_master_groups
|
||||
supplementary_node_groups = var.supplementary_node_groups
|
||||
worker_allowed_ports = var.worker_allowed_ports
|
||||
wait_for_floatingip = var.wait_for_floatingip
|
||||
use_access_ip = var.use_access_ip
|
||||
use_server_groups = var.use_server_groups
|
||||
|
||||
network_id = "${module.network.router_id}"
|
||||
network_id = module.network.router_id
|
||||
}
|
||||
|
||||
output "private_subnet_id" {
|
||||
value = "${module.network.subnet_id}"
|
||||
value = module.network.subnet_id
|
||||
}
|
||||
|
||||
output "floating_network_id" {
|
||||
value = "${var.external_net}"
|
||||
value = var.external_net
|
||||
}
|
||||
|
||||
output "router_id" {
|
||||
value = "${module.network.router_id}"
|
||||
value = module.network.router_id
|
||||
}
|
||||
|
||||
output "k8s_master_fips" {
|
||||
value = "${concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips)}"
|
||||
value = concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips)
|
||||
}
|
||||
|
||||
output "k8s_node_fips" {
|
||||
value = "${var.number_of_k8s_nodes > 0 ? module.ips.k8s_node_fips : [for key, value in module.ips.k8s_nodes_fips : value.address]}"
|
||||
value = var.number_of_k8s_nodes > 0 ? module.ips.k8s_node_fips : [for key, value in module.ips.k8s_nodes_fips : value.address]
|
||||
}
|
||||
|
||||
output "bastion_fips" {
|
||||
value = "${module.ips.bastion_fips}"
|
||||
value = module.ips.bastion_fips
|
||||
}
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
data "openstack_images_image_v2" "vm_image" {
|
||||
name = "${var.image}"
|
||||
name = var.image
|
||||
}
|
||||
|
||||
data "openstack_images_image_v2" "gfs_image" {
|
||||
name = "${var.image_gfs == "" ? var.image : var.image_gfs}"
|
||||
name = var.image_gfs == "" ? var.image : var.image_gfs
|
||||
}
|
||||
|
||||
resource "openstack_compute_keypair_v2" "k8s" {
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = "${chomp(file(var.public_key_path))}"
|
||||
public_key = chomp(file(var.public_key_path))
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "k8s_master" {
|
||||
|
@ -18,32 +18,32 @@ resource "openstack_networking_secgroup_v2" "k8s_master" {
|
|||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "k8s_master" {
|
||||
count = "${length(var.master_allowed_remote_ips)}"
|
||||
count = length(var.master_allowed_remote_ips)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "6443"
|
||||
port_range_max = "6443"
|
||||
remote_ip_prefix = "${var.master_allowed_remote_ips[count.index]}"
|
||||
security_group_id = "${openstack_networking_secgroup_v2.k8s_master.id}"
|
||||
remote_ip_prefix = var.master_allowed_remote_ips[count.index]
|
||||
security_group_id = openstack_networking_secgroup_v2.k8s_master.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion"
|
||||
count = "${var.number_of_bastions != "" ? 1 : 0}"
|
||||
count = var.number_of_bastions != "" ? 1 : 0
|
||||
description = "${var.cluster_name} - Bastion Server"
|
||||
delete_default_rules = true
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "bastion" {
|
||||
count = "${var.number_of_bastions != "" ? length(var.bastion_allowed_remote_ips) : 0}"
|
||||
count = var.number_of_bastions != "" ? length(var.bastion_allowed_remote_ips) : 0
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "22"
|
||||
port_range_max = "22"
|
||||
remote_ip_prefix = "${var.bastion_allowed_remote_ips[count.index]}"
|
||||
security_group_id = "${openstack_networking_secgroup_v2.bastion[0].id}"
|
||||
remote_ip_prefix = var.bastion_allowed_remote_ips[count.index]
|
||||
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "k8s" {
|
||||
|
@ -55,27 +55,27 @@ resource "openstack_networking_secgroup_v2" "k8s" {
|
|||
resource "openstack_networking_secgroup_rule_v2" "k8s" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
remote_group_id = "${openstack_networking_secgroup_v2.k8s.id}"
|
||||
security_group_id = "${openstack_networking_secgroup_v2.k8s.id}"
|
||||
remote_group_id = openstack_networking_secgroup_v2.k8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.k8s.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "k8s_allowed_remote_ips" {
|
||||
count = "${length(var.k8s_allowed_remote_ips)}"
|
||||
count = length(var.k8s_allowed_remote_ips)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "22"
|
||||
port_range_max = "22"
|
||||
remote_ip_prefix = "${var.k8s_allowed_remote_ips[count.index]}"
|
||||
security_group_id = "${openstack_networking_secgroup_v2.k8s.id}"
|
||||
remote_ip_prefix = var.k8s_allowed_remote_ips[count.index]
|
||||
security_group_id = openstack_networking_secgroup_v2.k8s.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "egress" {
|
||||
count = "${length(var.k8s_allowed_egress_ips)}"
|
||||
count = length(var.k8s_allowed_egress_ips)
|
||||
direction = "egress"
|
||||
ethertype = "IPv4"
|
||||
remote_ip_prefix = "${var.k8s_allowed_egress_ips[count.index]}"
|
||||
security_group_id = "${openstack_networking_secgroup_v2.k8s.id}"
|
||||
remote_ip_prefix = var.k8s_allowed_egress_ips[count.index]
|
||||
security_group_id = openstack_networking_secgroup_v2.k8s.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "worker" {
|
||||
|
@ -85,14 +85,14 @@ resource "openstack_networking_secgroup_v2" "worker" {
|
|||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker" {
|
||||
count = "${length(var.worker_allowed_ports)}"
|
||||
count = length(var.worker_allowed_ports)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "${lookup(var.worker_allowed_ports[count.index], "protocol", "tcp")}"
|
||||
port_range_min = "${lookup(var.worker_allowed_ports[count.index], "port_range_min")}"
|
||||
port_range_max = "${lookup(var.worker_allowed_ports[count.index], "port_range_max")}"
|
||||
remote_ip_prefix = "${lookup(var.worker_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")}"
|
||||
security_group_id = "${openstack_networking_secgroup_v2.worker.id}"
|
||||
protocol = lookup(var.worker_allowed_ports[count.index], "protocol", "tcp")
|
||||
port_range_min = lookup(var.worker_allowed_ports[count.index], "port_range_min")
|
||||
port_range_max = lookup(var.worker_allowed_ports[count.index], "port_range_max")
|
||||
remote_ip_prefix = lookup(var.worker_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
}
|
||||
|
||||
resource "openstack_compute_servergroup_v2" "k8s_master" {
|
||||
|
@ -115,17 +115,17 @@ resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
|||
|
||||
resource "openstack_compute_instance_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion-${count.index + 1}"
|
||||
count = "${var.number_of_bastions}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_bastion}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
count = var.number_of_bastions
|
||||
image_name = var.image
|
||||
flavor_id = var.flavor_bastion
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.bastion_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
source_type = "image"
|
||||
volume_size = "${var.bastion_root_volume_size_in_gb}"
|
||||
volume_size = var.bastion_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
|
@ -133,18 +133,18 @@ resource "openstack_compute_instance_v2" "bastion" {
|
|||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||
"${element(openstack_networking_secgroup_v2.bastion.*.name, count.index)}",
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name,
|
||||
element(openstack_networking_secgroup_v2.bastion.*.name, count.index),
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "bastion"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
|
@ -154,19 +154,19 @@ resource "openstack_compute_instance_v2" "bastion" {
|
|||
|
||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
count = var.number_of_k8s_masters
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
source_type = "image"
|
||||
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
|
@ -174,25 +174,25 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
|||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
|
@ -202,19 +202,19 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
|||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}"
|
||||
count = "${var.number_of_k8s_masters_no_etcd}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
count = var.number_of_k8s_masters_no_etcd
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
source_type = "image"
|
||||
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
|
@ -222,25 +222,25 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
|||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
|
@ -250,18 +250,18 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
|||
|
||||
resource "openstack_compute_instance_v2" "etcd" {
|
||||
name = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||
count = "${var.number_of_etcd}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_etcd}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
count = var.number_of_etcd
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image
|
||||
flavor_id = var.flavor_etcd
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.etcd_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
source_type = "image"
|
||||
volume_size = "${var.etcd_root_volume_size_in_gb}"
|
||||
volume_size = var.etcd_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
|
@ -269,40 +269,40 @@ resource "openstack_compute_instance_v2" "etcd" {
|
|||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_etcd[0].id}"
|
||||
group = openstack_compute_servergroup_v2.k8s_etcd[0].id
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
count = var.number_of_k8s_masters_no_floating_ip
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
source_type = "image"
|
||||
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
|
@ -310,42 +310,42 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
|||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
count = var.number_of_k8s_masters_no_floating_ip_no_etcd
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
source_type = "image"
|
||||
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
|
@ -353,42 +353,42 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
|||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
availability_zone = "${element(var.az_list_node, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
count = var.number_of_k8s_nodes
|
||||
availability_zone = element(var.az_list_node, count.index)
|
||||
image_name = var.image
|
||||
flavor_id = var.flavor_k8s_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
source_type = "image"
|
||||
volume_size = "${var.node_root_volume_size_in_gb}"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
|
@ -396,25 +396,25 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
|||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||
"${openstack_networking_secgroup_v2.worker.name}",
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name,
|
||||
openstack_networking_secgroup_v2.worker.name,
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
|
@ -424,18 +424,18 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
|||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}"
|
||||
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||
availability_zone = "${element(var.az_list_node, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
count = var.number_of_k8s_nodes_no_floating_ip
|
||||
availability_zone = element(var.az_list_node, count.index)
|
||||
image_name = var.image
|
||||
flavor_id = var.flavor_k8s_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
source_type = "image"
|
||||
volume_size = "${var.node_root_volume_size_in_gb}"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
|
@ -443,42 +443,42 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
|||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||
"${openstack_networking_secgroup_v2.worker.name}",
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name,
|
||||
openstack_networking_secgroup_v2.worker.name,
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||
availability_zone = "${each.value.az}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${each.value.flavor}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
availability_zone = each.value.az
|
||||
image_name = var.image
|
||||
flavor_id = each.value.flavor
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
source_type = "image"
|
||||
volume_size = "${var.node_root_volume_size_in_gb}"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
|
@ -486,25 +486,25 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
|||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||
"${openstack_networking_secgroup_v2.worker.name}",
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name,
|
||||
openstack_networking_secgroup_v2.worker.name,
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-node,k8s-cluster,%{if each.value.floating_ip == false}no-floating,%{endif}${var.supplementary_node_groups}"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
|
@ -514,18 +514,18 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
|||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image_gfs}"
|
||||
flavor_id = "${var.flavor_gfs_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
count = var.number_of_gfs_nodes_no_floating_ip
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image_gfs
|
||||
flavor_id = var.flavor_gfs_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.gfs_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
source_type = "image"
|
||||
volume_size = "${var.gfs_root_volume_size_in_gb}"
|
||||
volume_size = var.gfs_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
|
@ -533,70 +533,70 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
|||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user_gfs}"
|
||||
ssh_user = var.ssh_user_gfs
|
||||
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "bastion" {
|
||||
count = "${var.number_of_bastions}"
|
||||
floating_ip = "${var.bastion_fips[count.index]}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.bastion.*.id, count.index)}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
count = var.number_of_bastions
|
||||
floating_ip = var.bastion_fips[count.index]
|
||||
instance_id = element(openstack_compute_instance_v2.bastion.*.id, count.index)
|
||||
wait_until_associated = var.wait_for_floatingip
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}"
|
||||
floating_ip = "${var.k8s_master_fips[count.index]}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
count = var.number_of_k8s_masters
|
||||
instance_id = element(openstack_compute_instance_v2.k8s_master.*.id, count.index)
|
||||
floating_ip = var.k8s_master_fips[count.index]
|
||||
wait_until_associated = var.wait_for_floatingip
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd" {
|
||||
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_master_no_etcd.*.id, count.index)}"
|
||||
floating_ip = "${var.k8s_master_no_etcd_fips[count.index]}"
|
||||
count = var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0
|
||||
instance_id = element(openstack_compute_instance_v2.k8s_master_no_etcd.*.id, count.index)
|
||||
floating_ip = var.k8s_master_no_etcd_fips[count.index]
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
|
||||
count = "${var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0}"
|
||||
floating_ip = "${var.k8s_node_fips[count.index]}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_node[*].id, count.index)}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
count = var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0
|
||||
floating_ip = var.k8s_node_fips[count.index]
|
||||
instance_id = element(openstack_compute_instance_v2.k8s_node[*].id, count.index)
|
||||
wait_until_associated = var.wait_for_floatingip
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {}
|
||||
floating_ip = "${var.k8s_nodes_fips[each.key].address}"
|
||||
instance_id = "${openstack_compute_instance_v2.k8s_nodes[each.key].id}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
floating_ip = var.k8s_nodes_fips[each.key].address
|
||||
instance_id = openstack_compute_instance_v2.k8s_nodes[each.key].id
|
||||
wait_until_associated = var.wait_for_floatingip
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||
name = "${var.cluster_name}-glusterfs_volume-${count.index + 1}"
|
||||
count = "${var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
|
||||
count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0
|
||||
description = "Non-ephemeral volume for GlusterFS"
|
||||
size = "${var.gfs_volume_size_in_gb}"
|
||||
size = var.gfs_volume_size_in_gb
|
||||
}
|
||||
|
||||
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
|
||||
count = "${var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)}"
|
||||
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
|
||||
count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0
|
||||
instance_id = element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)
|
||||
volume_id = element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)
|
||||
}
|
||||
|
|
|
@ -1,36 +1,36 @@
|
|||
resource "null_resource" "dummy_dependency" {
|
||||
triggers = {
|
||||
dependency_id = "${var.router_id}"
|
||||
dependency_id = var.router_id
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
depends_on = ["null_resource.dummy_dependency"]
|
||||
count = var.number_of_k8s_masters
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_master_no_etcd" {
|
||||
count = "${var.number_of_k8s_masters_no_etcd}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
depends_on = ["null_resource.dummy_dependency"]
|
||||
count = var.number_of_k8s_masters_no_etcd
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_node" {
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
depends_on = ["null_resource.dummy_dependency"]
|
||||
count = var.number_of_k8s_nodes
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "bastion" {
|
||||
count = "${var.number_of_bastions}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
depends_on = ["null_resource.dummy_dependency"]
|
||||
count = var.number_of_bastions
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {}
|
||||
pool = "${var.floatingip_pool}"
|
||||
depends_on = ["null_resource.dummy_dependency"]
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
output "k8s_master_fips" {
|
||||
value = "${openstack_networking_floatingip_v2.k8s_master[*].address}"
|
||||
value = openstack_networking_floatingip_v2.k8s_master[*].address
|
||||
}
|
||||
|
||||
output "k8s_master_no_etcd_fips" {
|
||||
value = "${openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address}"
|
||||
value = openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address
|
||||
}
|
||||
|
||||
output "k8s_node_fips" {
|
||||
value = "${openstack_networking_floatingip_v2.k8s_node[*].address}"
|
||||
value = openstack_networking_floatingip_v2.k8s_node[*].address
|
||||
}
|
||||
|
||||
output "k8s_nodes_fips" {
|
||||
value = "${openstack_networking_floatingip_v2.k8s_nodes}"
|
||||
value = openstack_networking_floatingip_v2.k8s_nodes
|
||||
}
|
||||
|
||||
output "bastion_fips" {
|
||||
value = "${openstack_networking_floatingip_v2.bastion[*].address}"
|
||||
value = openstack_networking_floatingip_v2.bastion[*].address
|
||||
}
|
||||
|
|
|
@ -1,33 +1,33 @@
|
|||
resource "openstack_networking_router_v2" "k8s" {
|
||||
name = "${var.cluster_name}-router"
|
||||
count = "${var.use_neutron}" == 1 && "${var.router_id}" == null ? 1 : 0
|
||||
count = var.use_neutron == 1 && var.router_id == null ? 1 : 0
|
||||
admin_state_up = "true"
|
||||
external_network_id = "${var.external_net}"
|
||||
external_network_id = var.external_net
|
||||
}
|
||||
|
||||
data "openstack_networking_router_v2" "k8s" {
|
||||
router_id = "${var.router_id}"
|
||||
count = "${var.use_neutron}" == 1 && "${var.router_id}" != null ? 1 : 0
|
||||
router_id = var.router_id
|
||||
count = var.use_neutron == 1 && var.router_id != null ? 1 : 0
|
||||
}
|
||||
|
||||
resource "openstack_networking_network_v2" "k8s" {
|
||||
name = "${var.network_name}"
|
||||
count = "${var.use_neutron}"
|
||||
dns_domain = var.network_dns_domain != null ? "${var.network_dns_domain}" : null
|
||||
name = var.network_name
|
||||
count = var.use_neutron
|
||||
dns_domain = var.network_dns_domain != null ? var.network_dns_domain : null
|
||||
admin_state_up = "true"
|
||||
}
|
||||
|
||||
resource "openstack_networking_subnet_v2" "k8s" {
|
||||
name = "${var.cluster_name}-internal-network"
|
||||
count = "${var.use_neutron}"
|
||||
network_id = "${openstack_networking_network_v2.k8s[count.index].id}"
|
||||
cidr = "${var.subnet_cidr}"
|
||||
count = var.use_neutron
|
||||
network_id = openstack_networking_network_v2.k8s[count.index].id
|
||||
cidr = var.subnet_cidr
|
||||
ip_version = 4
|
||||
dns_nameservers = "${var.dns_nameservers}"
|
||||
dns_nameservers = var.dns_nameservers
|
||||
}
|
||||
|
||||
resource "openstack_networking_router_interface_v2" "k8s" {
|
||||
count = "${var.use_neutron}"
|
||||
count = var.use_neutron
|
||||
router_id = "%{if openstack_networking_router_v2.k8s != []}${openstack_networking_router_v2.k8s[count.index].id}%{else}${var.router_id}%{endif}"
|
||||
subnet_id = "${openstack_networking_subnet_v2.k8s[count.index].id}"
|
||||
subnet_id = openstack_networking_subnet_v2.k8s[count.index].id
|
||||
}
|
||||
|
|
|
@ -3,9 +3,9 @@ output "router_id" {
|
|||
}
|
||||
|
||||
output "router_internal_port_id" {
|
||||
value = "${element(concat(openstack_networking_router_interface_v2.k8s.*.id, [""]), 0)}"
|
||||
value = element(concat(openstack_networking_router_interface_v2.k8s.*.id, [""]), 0)
|
||||
}
|
||||
|
||||
output "subnet_id" {
|
||||
value = "${element(concat(openstack_networking_subnet_v2.k8s.*.id, [""]), 0)}"
|
||||
value = element(concat(openstack_networking_subnet_v2.k8s.*.id, [""]), 0)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue