|
|
|
@ -95,6 +95,24 @@ resource "openstack_networking_secgroup_rule_v2" "worker" {
|
|
|
|
|
security_group_id = "${openstack_networking_secgroup_v2.worker.id}"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
resource "openstack_compute_servergroup_v2" "k8s_master" {
|
|
|
|
|
count = "%{ if var.use_server_groups }1%{else}0%{endif}"
|
|
|
|
|
name = "k8s-master-srvgrp"
|
|
|
|
|
policies = ["anti-affinity"]
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
resource "openstack_compute_servergroup_v2" "k8s_node" {
|
|
|
|
|
count = "%{ if var.use_server_groups }1%{else}0%{endif}"
|
|
|
|
|
name = "k8s-node-srvgrp"
|
|
|
|
|
policies = ["anti-affinity"]
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
|
|
|
|
count = "%{ if var.use_server_groups }1%{else}0%{endif}"
|
|
|
|
|
name = "k8s-etcd-srvgrp"
|
|
|
|
|
policies = ["anti-affinity"]
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
resource "openstack_compute_instance_v2" "bastion" {
|
|
|
|
|
name = "${var.cluster_name}-bastion-${count.index+1}"
|
|
|
|
|
count = "${var.bastion_root_volume_size_in_gb == 0 ? var.number_of_bastions : 0}"
|
|
|
|
@ -174,6 +192,13 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
|
|
|
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
dynamic "scheduler_hints" {
|
|
|
|
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
|
|
|
|
content {
|
|
|
|
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metadata = {
|
|
|
|
|
ssh_user = "${var.ssh_user}"
|
|
|
|
|
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
|
|
|
@ -211,6 +236,13 @@ resource "openstack_compute_instance_v2" "k8s_master_custom_volume_size" {
|
|
|
|
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
dynamic "scheduler_hints" {
|
|
|
|
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
|
|
|
|
content {
|
|
|
|
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metadata = {
|
|
|
|
|
ssh_user = "${var.ssh_user}"
|
|
|
|
|
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
|
|
|
@ -239,6 +271,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
|
|
|
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
dynamic "scheduler_hints" {
|
|
|
|
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
|
|
|
|
content {
|
|
|
|
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metadata = {
|
|
|
|
|
ssh_user = "${var.ssh_user}"
|
|
|
|
|
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
|
|
|
@ -276,6 +315,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd_custom_volume_size"
|
|
|
|
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
dynamic "scheduler_hints" {
|
|
|
|
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
|
|
|
|
content {
|
|
|
|
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metadata = {
|
|
|
|
|
ssh_user = "${var.ssh_user}"
|
|
|
|
|
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
|
|
|
@ -302,6 +348,13 @@ resource "openstack_compute_instance_v2" "etcd" {
|
|
|
|
|
|
|
|
|
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
|
|
|
|
|
|
|
|
|
dynamic "scheduler_hints" {
|
|
|
|
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
|
|
|
|
content {
|
|
|
|
|
group = "${openstack_compute_servergroup_v2.k8s_etcd[0].id}"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metadata = {
|
|
|
|
|
ssh_user = "${var.ssh_user}"
|
|
|
|
|
kubespray_groups = "etcd,vault,no-floating"
|
|
|
|
@ -333,6 +386,13 @@ resource "openstack_compute_instance_v2" "etcd_custom_volume_size" {
|
|
|
|
|
|
|
|
|
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
|
|
|
|
|
|
|
|
|
dynamic "scheduler_hints" {
|
|
|
|
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
|
|
|
|
content {
|
|
|
|
|
group = "${openstack_compute_servergroup_v2.k8s_etcd[0].id}"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metadata = {
|
|
|
|
|
ssh_user = "${var.ssh_user}"
|
|
|
|
|
kubespray_groups = "etcd,vault,no-floating"
|
|
|
|
@ -357,6 +417,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
|
|
|
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
dynamic "scheduler_hints" {
|
|
|
|
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
|
|
|
|
content {
|
|
|
|
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metadata = {
|
|
|
|
|
ssh_user = "${var.ssh_user}"
|
|
|
|
|
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
|
|
|
@ -390,6 +457,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_custom_volum
|
|
|
|
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
dynamic "scheduler_hints" {
|
|
|
|
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
|
|
|
|
content {
|
|
|
|
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metadata = {
|
|
|
|
|
ssh_user = "${var.ssh_user}"
|
|
|
|
|
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
|
|
|
@ -414,6 +488,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
|
|
|
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
dynamic "scheduler_hints" {
|
|
|
|
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
|
|
|
|
content {
|
|
|
|
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metadata = {
|
|
|
|
|
ssh_user = "${var.ssh_user}"
|
|
|
|
|
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
|
|
|
@ -447,6 +528,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd_cust
|
|
|
|
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
dynamic "scheduler_hints" {
|
|
|
|
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
|
|
|
|
content {
|
|
|
|
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metadata = {
|
|
|
|
|
ssh_user = "${var.ssh_user}"
|
|
|
|
|
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
|
|
|
@ -471,6 +559,13 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
|
|
|
|
"${openstack_networking_secgroup_v2.worker.name}",
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
dynamic "scheduler_hints" {
|
|
|
|
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
|
|
|
|
content {
|
|
|
|
|
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metadata = {
|
|
|
|
|
ssh_user = "${var.ssh_user}"
|
|
|
|
|
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
|
|
|
|
@ -508,6 +603,13 @@ resource "openstack_compute_instance_v2" "k8s_node_custom_volume_size" {
|
|
|
|
|
"${openstack_networking_secgroup_v2.worker.name}",
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
dynamic "scheduler_hints" {
|
|
|
|
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
|
|
|
|
content {
|
|
|
|
|
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metadata = {
|
|
|
|
|
ssh_user = "${var.ssh_user}"
|
|
|
|
|
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
|
|
|
|
@ -536,6 +638,13 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
|
|
|
|
"${openstack_networking_secgroup_v2.worker.name}",
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
dynamic "scheduler_hints" {
|
|
|
|
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
|
|
|
|
content {
|
|
|
|
|
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metadata = {
|
|
|
|
|
ssh_user = "${var.ssh_user}"
|
|
|
|
|
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
|
|
|
|
@ -569,6 +678,13 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip_custom_volume_
|
|
|
|
|
"${openstack_networking_secgroup_v2.worker.name}",
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
dynamic "scheduler_hints" {
|
|
|
|
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
|
|
|
|
content {
|
|
|
|
|
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metadata = {
|
|
|
|
|
ssh_user = "${var.ssh_user}"
|
|
|
|
|
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
|
|
|
|
@ -659,6 +775,13 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
|
|
|
|
|
|
|
|
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
|
|
|
|
|
|
|
|
|
dynamic "scheduler_hints" {
|
|
|
|
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
|
|
|
|
content {
|
|
|
|
|
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metadata = {
|
|
|
|
|
ssh_user = "${var.ssh_user_gfs}"
|
|
|
|
|
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
|
|
|
@ -690,6 +813,13 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip_custom_v
|
|
|
|
|
|
|
|
|
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
|
|
|
|
|
|
|
|
|
dynamic "scheduler_hints" {
|
|
|
|
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
|
|
|
|
content {
|
|
|
|
|
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metadata = {
|
|
|
|
|
ssh_user = "${var.ssh_user_gfs}"
|
|
|
|
|
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
|
|
|
|