convert volumes to dynamic blocks, openstack (#5673)
parent
82efd95901
commit
a901b1f0d7
|
@ -115,39 +115,14 @@ resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
|||
|
||||
resource "openstack_compute_instance_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion-${count.index+1}"
|
||||
count = "${var.bastion_root_volume_size_in_gb == 0 ? var.number_of_bastions : 0}"
|
||||
count = "${var.number_of_bastions}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_bastion}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||
"${element(openstack_networking_secgroup_v2.bastion.*.name, count.index)}",
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "bastion"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "bastion_custom_volume_size" {
|
||||
name = "${var.cluster_name}-bastion-${count.index+1}"
|
||||
count = "${var.bastion_root_volume_size_in_gb > 0 ? var.number_of_bastions : 0}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_bastion}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
dynamic "block_device" {
|
||||
for_each = var.bastion_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.bastion_root_volume_size_in_gb}"
|
||||
|
@ -155,6 +130,7 @@ resource "openstack_compute_instance_v2" "bastion_custom_volume_size" {
|
|||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
|
@ -178,48 +154,16 @@ resource "openstack_compute_instance_v2" "bastion_custom_volume_size" {
|
|||
|
||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters : 0}"
|
||||
count = "${var.number_of_k8s_masters }"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_custom_volume_size" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||
|
@ -227,6 +171,7 @@ resource "openstack_compute_instance_v2" "k8s_master_custom_volume_size" {
|
|||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
|
@ -257,48 +202,16 @@ resource "openstack_compute_instance_v2" "k8s_master_custom_volume_size" {
|
|||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
|
||||
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0}"
|
||||
count = "${var.number_of_k8s_masters_no_etcd }"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd_custom_volume_size" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
|
||||
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters_no_etcd : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||
|
@ -306,6 +219,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd_custom_volume_size"
|
|||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
|
@ -336,42 +250,15 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd_custom_volume_size"
|
|||
|
||||
resource "openstack_compute_instance_v2" "etcd" {
|
||||
name = "${var.cluster_name}-etcd-${count.index+1}"
|
||||
count = "${var.etcd_root_volume_size_in_gb == 0 ? var.number_of_etcd : 0}"
|
||||
count = "${var.number_of_etcd }"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_etcd}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
||||
dynamic "block_device" {
|
||||
for_each = var.etcd_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_etcd[0].id}"
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "etcd_custom_volume_size" {
|
||||
name = "${var.cluster_name}-etcd-${count.index+1}"
|
||||
count = "${var.etcd_root_volume_size_in_gb > 0 ? var.number_of_etcd : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_etcd}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.etcd_root_volume_size_in_gb}"
|
||||
|
@ -379,6 +266,7 @@ resource "openstack_compute_instance_v2" "etcd_custom_volume_size" {
|
|||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
|
@ -403,44 +291,15 @@ resource "openstack_compute_instance_v2" "etcd_custom_volume_size" {
|
|||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
||||
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_floating_ip : 0}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_custom_volume_size" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
||||
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters_no_floating_ip : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||
|
@ -448,6 +307,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_custom_volum
|
|||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
|
@ -474,44 +334,15 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_custom_volum
|
|||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
|
||||
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_floating_ip_no_etcd : 0}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd_custom_volume_size" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
|
||||
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters_no_floating_ip_no_etcd : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||
|
@ -519,6 +350,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd_cust
|
|||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
|
@ -545,48 +377,15 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd_cust
|
|||
|
||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||
count = "${var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0}"
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
availability_zone = "${element(var.az_list_node, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||
"${openstack_networking_secgroup_v2.worker.name}",
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_custom_volume_size" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||
count = "${var.node_root_volume_size_in_gb > 0 ? var.number_of_k8s_nodes : 0}"
|
||||
availability_zone = "${element(var.az_list_node, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.node_root_volume_size_in_gb}"
|
||||
|
@ -594,6 +393,7 @@ resource "openstack_compute_instance_v2" "k8s_node_custom_volume_size" {
|
|||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
|
@ -624,44 +424,15 @@ resource "openstack_compute_instance_v2" "k8s_node_custom_volume_size" {
|
|||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
||||
count = "${var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes_no_floating_ip : 0}"
|
||||
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||
availability_zone = "${element(var.az_list_node, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||
"${openstack_networking_secgroup_v2.worker.name}",
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip_custom_volume_size" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
||||
count = "${var.node_root_volume_size_in_gb > 0 ? var.number_of_k8s_nodes_no_floating_ip : 0}"
|
||||
availability_zone = "${element(var.az_list_node, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.node_root_volume_size_in_gb}"
|
||||
|
@ -669,6 +440,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip_custom_volume_
|
|||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
|
@ -693,46 +465,68 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip_custom_volume_
|
|||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image_gfs}"
|
||||
flavor_id = "${var.flavor_gfs_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.gfs_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.gfs_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user_gfs}"
|
||||
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "bastion" {
|
||||
count = "${var.bastion_root_volume_size_in_gb == 0 ? var.number_of_bastions : 0}"
|
||||
count = "${var.number_of_bastions}"
|
||||
floating_ip = "${var.bastion_fips[count.index]}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.bastion.*.id, count.index)}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "bastion_custom_volume_size" {
|
||||
count = "${var.bastion_root_volume_size_in_gb > 0 ? var.number_of_bastions : 0}"
|
||||
floating_ip = "${var.bastion_fips[count.index]}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.bastion_custom_volume_size.*.id, count.index)}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
|
||||
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters : 0}"
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}"
|
||||
floating_ip = "${var.k8s_master_fips[count.index]}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master_custom_volume_size" {
|
||||
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters : 0}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_master_custom_volume_size.*.id, count.index)}"
|
||||
floating_ip = "${var.k8s_master_fips[count.index]}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd" {
|
||||
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_master_no_etcd.*.id, count.index)}"
|
||||
floating_ip = "${var.k8s_master_no_etcd_fips[count.index]}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd_custom_volume_size" {
|
||||
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters_no_etcd : 0}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_master_no_etcd_custom_volume_size.*.id, count.index)}"
|
||||
floating_ip = "${var.k8s_master_no_etcd_fips[count.index]}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
|
||||
count = "${var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0}"
|
||||
floating_ip = "${var.k8s_node_fips[count.index]}"
|
||||
|
@ -740,13 +534,6 @@ resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
|
|||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_node_custom_volume_size" {
|
||||
count = "${var.node_root_volume_size_in_gb > 0 ? var.number_of_k8s_nodes : 0}"
|
||||
floating_ip = "${var.k8s_node_fips[count.index]}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_node_custom_volume_size.*.id, count.index)}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||
name = "${var.cluster_name}-glusterfs_volume-${count.index+1}"
|
||||
count = "${var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
|
||||
|
@ -754,88 +541,8 @@ resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
|||
size = "${var.gfs_volume_size_in_gb}"
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume_custom_volume_size" {
|
||||
name = "${var.cluster_name}-glusterfs_volume-${count.index+1}"
|
||||
count = "${var.gfs_root_volume_size_in_gb > 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
|
||||
description = "Non-ephemeral volume for GlusterFS"
|
||||
size = "${var.gfs_volume_size_in_gb}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
||||
count = "${var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image_gfs}"
|
||||
flavor_id = "${var.flavor_gfs_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user_gfs}"
|
||||
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip_custom_volume_size" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
||||
count = "${var.gfs_root_volume_size_in_gb > 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_gfs_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
uuid = "${data.openstack_images_image_v2.gfs_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.gfs_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user_gfs}"
|
||||
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
|
||||
count = "${var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)}"
|
||||
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_volume_attach_v2" "glusterfs_volume_custom_root_volume_size" {
|
||||
count = "${var.gfs_root_volume_size_in_gb > 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip_custom_volume_size.*.id, count.index)}"
|
||||
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume_custom_volume_size.*.id, count.index)}"
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue