diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md index 8aebbf52b..5d4f2e4dc 100644 --- a/contrib/terraform/openstack/README.md +++ b/contrib/terraform/openstack/README.md @@ -253,6 +253,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`. |`gfs_root_volume_size_in_gb` | Size of the root volume for gluster, 0 to use ephemeral storage | |`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage | |`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage | +|`use_server_group` | Create and use openstack nova servergroups, default: false | #### Terraform state files diff --git a/contrib/terraform/openstack/kubespray.tf b/contrib/terraform/openstack/kubespray.tf index b26961d6f..95c95043e 100644 --- a/contrib/terraform/openstack/kubespray.tf +++ b/contrib/terraform/openstack/kubespray.tf @@ -71,6 +71,7 @@ module "compute" { worker_allowed_ports = "${var.worker_allowed_ports}" wait_for_floatingip = "${var.wait_for_floatingip}" use_access_ip = "${var.use_access_ip}" + use_server_groups = "${var.use_server_groups}" network_id = "${module.network.router_id}" } diff --git a/contrib/terraform/openstack/modules/compute/main.tf b/contrib/terraform/openstack/modules/compute/main.tf index c181ccfe6..9adc22e89 100644 --- a/contrib/terraform/openstack/modules/compute/main.tf +++ b/contrib/terraform/openstack/modules/compute/main.tf @@ -95,6 +95,24 @@ resource "openstack_networking_secgroup_rule_v2" "worker" { security_group_id = "${openstack_networking_secgroup_v2.worker.id}" } +resource "openstack_compute_servergroup_v2" "k8s_master" { + count = "%{ if var.use_server_groups }1%{else}0%{endif}" + name = "k8s-master-srvgrp" + policies = ["anti-affinity"] +} + +resource "openstack_compute_servergroup_v2" "k8s_node" { + count = "%{ if var.use_server_groups }1%{else}0%{endif}" + name = "k8s-node-srvgrp" + policies = ["anti-affinity"] +} + +resource "openstack_compute_servergroup_v2" "k8s_etcd" { + count = "%{ if var.use_server_groups }1%{else}0%{endif}" + name = "k8s-etcd-srvgrp" + policies = ["anti-affinity"] +} + resource "openstack_compute_instance_v2" "bastion" { name = "${var.cluster_name}-bastion-${count.index+1}" count = "${var.bastion_root_volume_size_in_gb == 0 ? var.number_of_bastions : 0}" @@ -174,6 +192,13 @@ resource "openstack_compute_instance_v2" "k8s_master" { "${openstack_networking_secgroup_v2.k8s.name}", ] + dynamic "scheduler_hints" { + for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] + content { + group = "${openstack_compute_servergroup_v2.k8s_master[0].id}" + } + } + metadata = { ssh_user = "${var.ssh_user}" kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault" @@ -210,6 +235,13 @@ resource "openstack_compute_instance_v2" "k8s_master_custom_volume_size" { security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}", "${openstack_networking_secgroup_v2.k8s.name}", ] + + dynamic "scheduler_hints" { + for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] + content { + group = "${openstack_compute_servergroup_v2.k8s_master[0].id}" + } + } metadata = { ssh_user = "${var.ssh_user}" @@ -238,6 +270,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" { security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}", "${openstack_networking_secgroup_v2.k8s.name}", ] + + dynamic "scheduler_hints" { + for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] + content { + group = "${openstack_compute_servergroup_v2.k8s_master[0].id}" + } + } metadata = { ssh_user = "${var.ssh_user}" @@ -276,6 +315,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd_custom_volume_size" "${openstack_networking_secgroup_v2.k8s.name}", ] + dynamic "scheduler_hints" { + for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] + content { + group = "${openstack_compute_servergroup_v2.k8s_master[0].id}" + } + } + metadata = { ssh_user = "${var.ssh_user}" kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault" @@ -302,6 +348,13 @@ resource "openstack_compute_instance_v2" "etcd" { security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"] + dynamic "scheduler_hints" { + for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : [] + content { + group = "${openstack_compute_servergroup_v2.k8s_etcd[0].id}" + } + } + metadata = { ssh_user = "${var.ssh_user}" kubespray_groups = "etcd,vault,no-floating" @@ -332,6 +385,13 @@ resource "openstack_compute_instance_v2" "etcd_custom_volume_size" { } security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"] + + dynamic "scheduler_hints" { + for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : [] + content { + group = "${openstack_compute_servergroup_v2.k8s_etcd[0].id}" + } + } metadata = { ssh_user = "${var.ssh_user}" @@ -356,6 +416,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" { security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}", "${openstack_networking_secgroup_v2.k8s.name}", ] + + dynamic "scheduler_hints" { + for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] + content { + group = "${openstack_compute_servergroup_v2.k8s_master[0].id}" + } + } metadata = { ssh_user = "${var.ssh_user}" @@ -389,6 +456,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_custom_volum security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}", "${openstack_networking_secgroup_v2.k8s.name}", ] + + dynamic "scheduler_hints" { + for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] + content { + group = "${openstack_compute_servergroup_v2.k8s_master[0].id}" + } + } metadata = { ssh_user = "${var.ssh_user}" @@ -413,6 +487,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" { security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}", "${openstack_networking_secgroup_v2.k8s.name}", ] + + dynamic "scheduler_hints" { + for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] + content { + group = "${openstack_compute_servergroup_v2.k8s_master[0].id}" + } + } metadata = { ssh_user = "${var.ssh_user}" @@ -446,6 +527,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd_cust security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}", "${openstack_networking_secgroup_v2.k8s.name}", ] + + dynamic "scheduler_hints" { + for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] + content { + group = "${openstack_compute_servergroup_v2.k8s_master[0].id}" + } + } metadata = { ssh_user = "${var.ssh_user}" @@ -471,6 +559,13 @@ resource "openstack_compute_instance_v2" "k8s_node" { "${openstack_networking_secgroup_v2.worker.name}", ] + dynamic "scheduler_hints" { + for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] + content { + group = "${openstack_compute_servergroup_v2.k8s_node[0].id}" + } + } + metadata = { ssh_user = "${var.ssh_user}" kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}" @@ -507,6 +602,13 @@ resource "openstack_compute_instance_v2" "k8s_node_custom_volume_size" { security_groups = ["${openstack_networking_secgroup_v2.k8s.name}", "${openstack_networking_secgroup_v2.worker.name}", ] + + dynamic "scheduler_hints" { + for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] + content { + group = "${openstack_compute_servergroup_v2.k8s_node[0].id}" + } + } metadata = { ssh_user = "${var.ssh_user}" @@ -535,6 +637,13 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" { security_groups = ["${openstack_networking_secgroup_v2.k8s.name}", "${openstack_networking_secgroup_v2.worker.name}", ] + + dynamic "scheduler_hints" { + for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] + content { + group = "${openstack_compute_servergroup_v2.k8s_node[0].id}" + } + } metadata = { ssh_user = "${var.ssh_user}" @@ -568,6 +677,13 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip_custom_volume_ security_groups = ["${openstack_networking_secgroup_v2.k8s.name}", "${openstack_networking_secgroup_v2.worker.name}", ] + + dynamic "scheduler_hints" { + for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] + content { + group = "${openstack_compute_servergroup_v2.k8s_node[0].id}" + } + } metadata = { ssh_user = "${var.ssh_user}" @@ -658,6 +774,13 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" { } security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"] + + dynamic "scheduler_hints" { + for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] + content { + group = "${openstack_compute_servergroup_v2.k8s_node[0].id}" + } + } metadata = { ssh_user = "${var.ssh_user_gfs}" @@ -689,6 +812,13 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip_custom_v } security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"] + + dynamic "scheduler_hints" { + for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] + content { + group = "${openstack_compute_servergroup_v2.k8s_node[0].id}" + } + } metadata = { ssh_user = "${var.ssh_user_gfs}" diff --git a/contrib/terraform/openstack/modules/compute/variables.tf b/contrib/terraform/openstack/modules/compute/variables.tf index 4a9680e6f..61108059f 100644 --- a/contrib/terraform/openstack/modules/compute/variables.tf +++ b/contrib/terraform/openstack/modules/compute/variables.tf @@ -106,4 +106,8 @@ variable "worker_allowed_ports" { type = "list" } -variable "use_access_ip" {} \ No newline at end of file +variable "use_access_ip" {} + +variable "use_server_groups" { + type = bool +} \ No newline at end of file diff --git a/contrib/terraform/openstack/variables.tf b/contrib/terraform/openstack/variables.tf index 218c82add..1fd294aad 100644 --- a/contrib/terraform/openstack/variables.tf +++ b/contrib/terraform/openstack/variables.tf @@ -210,3 +210,7 @@ variable "worker_allowed_ports" { variable "use_access_ip" { default = 1 } + +variable "use_server_groups" { + default = false +} \ No newline at end of file