[Openstack] master foreach and fixes (#8709)
* [openstack] fix for new network modules * [openstack] for-each master nodespull/8782/head
parent
9605bbaa67
commit
fe66121287
|
@ -248,6 +248,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
|||
|`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. |
|
||||
|`az_list` | List of Availability Zones available in your OpenStack cluster. |
|
||||
|`network_name` | The name to be given to the internal network that will be generated |
|
||||
|`use_existing_network`| Use an existing network with the name of `network_name`. `false` by default |
|
||||
|`network_dns_domain` | (Optional) The dns_domain for the internal network that will be generated |
|
||||
|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. |
|
||||
|`floatingip_pool` | Name of the pool from which floating IPs will be allocated |
|
||||
|
@ -284,7 +285,9 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
|||
|`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) |
|
||||
|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. |
|
||||
|`port_security_enabled` | Allow to disable port security by setting this to `false`. `true` by default |
|
||||
|`force_null_port_security` | Set `null` instead of `true` or `false` for `port_security`. `false` by default |
|
||||
|`k8s_nodes` | Map containing worker node definition, see explanation below |
|
||||
|`k8s_masters` | Map containing master node definition, see explanation for k8s_nodes and `sample-inventory/cluster.tfvars` |
|
||||
|
||||
##### k8s_nodes
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ module "ips" {
|
|||
network_name = var.network_name
|
||||
router_id = module.network.router_id
|
||||
k8s_nodes = var.k8s_nodes
|
||||
k8s_masters = var.k8s_masters
|
||||
k8s_master_fips = var.k8s_master_fips
|
||||
bastion_fips = var.bastion_fips
|
||||
router_internal_port_id = module.network.router_internal_port_id
|
||||
|
@ -44,6 +45,7 @@ module "compute" {
|
|||
number_of_bastions = var.number_of_bastions
|
||||
number_of_k8s_nodes_no_floating_ip = var.number_of_k8s_nodes_no_floating_ip
|
||||
number_of_gfs_nodes_no_floating_ip = var.number_of_gfs_nodes_no_floating_ip
|
||||
k8s_masters = var.k8s_masters
|
||||
k8s_nodes = var.k8s_nodes
|
||||
bastion_root_volume_size_in_gb = var.bastion_root_volume_size_in_gb
|
||||
etcd_root_volume_size_in_gb = var.etcd_root_volume_size_in_gb
|
||||
|
@ -70,6 +72,7 @@ module "compute" {
|
|||
flavor_bastion = var.flavor_bastion
|
||||
k8s_master_fips = module.ips.k8s_master_fips
|
||||
k8s_master_no_etcd_fips = module.ips.k8s_master_no_etcd_fips
|
||||
k8s_masters_fips = module.ips.k8s_masters_fips
|
||||
k8s_node_fips = module.ips.k8s_node_fips
|
||||
k8s_nodes_fips = module.ips.k8s_nodes_fips
|
||||
bastion_fips = module.ips.bastion_fips
|
||||
|
@ -89,8 +92,10 @@ module "compute" {
|
|||
extra_sec_groups_name = var.extra_sec_groups_name
|
||||
group_vars_path = var.group_vars_path
|
||||
port_security_enabled = var.port_security_enabled
|
||||
|
||||
network_id = module.network.router_id
|
||||
force_null_port_security = var.force_null_port_security
|
||||
network_router_id = module.network.router_id
|
||||
network_id = module.network.network_id
|
||||
use_existing_network = var.use_existing_network
|
||||
}
|
||||
|
||||
output "private_subnet_id" {
|
||||
|
|
|
@ -20,7 +20,8 @@ data "template_file" "cloudinit" {
|
|||
}
|
||||
|
||||
data "openstack_networking_network_v2" "k8s_network" {
|
||||
name = var.network_name
|
||||
count = var.use_existing_network ? 1 : 0
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
resource "openstack_compute_keypair_v2" "k8s" {
|
||||
|
@ -158,25 +159,25 @@ resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
|||
locals {
|
||||
# master groups
|
||||
master_sec_groups = compact([
|
||||
openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
var.extra_sec_groups ?openstack_networking_secgroup_v2.k8s_master_extra[0].name : "",
|
||||
openstack_networking_secgroup_v2.k8s_master.id,
|
||||
openstack_networking_secgroup_v2.k8s.id,
|
||||
var.extra_sec_groups ?openstack_networking_secgroup_v2.k8s_master_extra[0].id : "",
|
||||
])
|
||||
# worker groups
|
||||
worker_sec_groups = compact([
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
openstack_networking_secgroup_v2.worker.name,
|
||||
var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].name : "",
|
||||
openstack_networking_secgroup_v2.k8s.id,
|
||||
openstack_networking_secgroup_v2.worker.id,
|
||||
var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].id : "",
|
||||
])
|
||||
# bastion groups
|
||||
bastion_sec_groups = compact(concat([
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
openstack_networking_secgroup_v2.bastion[0].name,
|
||||
openstack_networking_secgroup_v2.k8s.id,
|
||||
openstack_networking_secgroup_v2.bastion[0].id,
|
||||
]))
|
||||
# etcd groups
|
||||
etcd_sec_groups = compact([openstack_networking_secgroup_v2.k8s.name])
|
||||
etcd_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id])
|
||||
# glusterfs groups
|
||||
gfs_sec_groups = compact([openstack_networking_secgroup_v2.k8s.name])
|
||||
gfs_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id])
|
||||
|
||||
# Image uuid
|
||||
image_to_use_node = var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.vm_image[0].id
|
||||
|
@ -189,11 +190,15 @@ locals {
|
|||
resource "openstack_networking_port_v2" "bastion_port" {
|
||||
count = var.number_of_bastions
|
||||
name = "${var.cluster_name}-bastion-${count.index + 1}"
|
||||
network_id = "${data.openstack_networking_network_v2.k8s_network.id}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.port_security_enabled
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "bastion" {
|
||||
|
@ -223,7 +228,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
|||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "bastion"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
|
@ -235,11 +240,15 @@ resource "openstack_compute_instance_v2" "bastion" {
|
|||
resource "openstack_networking_port_v2" "k8s_master_port" {
|
||||
count = var.number_of_k8s_masters
|
||||
name = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
network_id = "${data.openstack_networking_network_v2.k8s_network.id}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.port_security_enabled
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
|
@ -279,7 +288,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
|||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
|
@ -288,14 +297,76 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
|||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_masters_port" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
|
||||
name = "${var.cluster_name}-k8s-${each.key}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_masters" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
|
||||
name = "${var.cluster_name}-k8s-${each.key}"
|
||||
availability_zone = each.value.az
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = each.value.flavor
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
content {
|
||||
uuid = local.image_to_use_master
|
||||
source_type = "image"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
volume_type = var.master_volume_type
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
port = openstack_networking_port_v2.k8s_masters_port[each.key].id
|
||||
}
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "%{if each.value.etcd == true}etcd,%{endif}kube_control_plane,${var.supplementary_master_groups},k8s_cluster%{if each.value.floating_ip == false},no_floating%{endif}"
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_masters_fips : value.address]), 0)}/ > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" {
|
||||
count = var.number_of_k8s_masters_no_etcd
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}"
|
||||
network_id = "${data.openstack_networking_network_v2.k8s_network.id}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.port_security_enabled
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
|
@ -335,7 +406,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
|||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
|
@ -347,11 +418,15 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
|||
resource "openstack_networking_port_v2" "etcd_port" {
|
||||
count = var.number_of_etcd
|
||||
name = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||
network_id = "${data.openstack_networking_network_v2.k8s_network.id}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.port_security_enabled
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "etcd" {
|
||||
|
@ -389,7 +464,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
|||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,no_floating"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
@ -397,11 +472,15 @@ resource "openstack_compute_instance_v2" "etcd" {
|
|||
resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" {
|
||||
count = var.number_of_k8s_masters_no_floating_ip
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}"
|
||||
network_id = "${data.openstack_networking_network_v2.k8s_network.id}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.port_security_enabled
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
|
@ -439,7 +518,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
|||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
@ -447,11 +526,15 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
|||
resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port" {
|
||||
count = var.number_of_k8s_masters_no_floating_ip_no_etcd
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}"
|
||||
network_id = "${data.openstack_networking_network_v2.k8s_network.id}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.port_security_enabled
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
|
@ -490,7 +573,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
|||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
@ -498,11 +581,15 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
|||
resource "openstack_networking_port_v2" "k8s_node_port" {
|
||||
count = var.number_of_k8s_nodes
|
||||
name = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||
network_id = "${data.openstack_networking_network_v2.k8s_network.id}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.port_security_enabled
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
|
@ -542,7 +629,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
|||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_node,k8s_cluster,${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
|
@ -554,11 +641,15 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
|||
resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" {
|
||||
count = var.number_of_k8s_nodes_no_floating_ip
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}"
|
||||
network_id = "${data.openstack_networking_network_v2.k8s_network.id}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.port_security_enabled
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
|
@ -597,7 +688,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
|||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_node,k8s_cluster,no_floating,${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
@ -605,11 +696,15 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
|||
resource "openstack_networking_port_v2" "k8s_nodes_port" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||
network_id = "${data.openstack_networking_network_v2.k8s_network.id}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.port_security_enabled
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
|
@ -648,7 +743,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
|||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
|
@ -660,11 +755,15 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
|||
resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" {
|
||||
count = var.number_of_gfs_nodes_no_floating_ip
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
|
||||
network_id = "${data.openstack_networking_network_v2.k8s_network.id}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
|
@ -701,7 +800,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
|||
metadata = {
|
||||
ssh_user = var.ssh_user_gfs
|
||||
kubespray_groups = "gfs-cluster,network-storage,no_floating"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
@ -719,6 +818,12 @@ resource "openstack_networking_floatingip_associate_v2" "k8s_master" {
|
|||
port_id = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index)
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_associate_v2" "k8s_masters" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip } : {}
|
||||
floating_ip = var.k8s_masters_fips[each.key].address
|
||||
port_id = openstack_networking_port_v2.k8s_masters_port[each.key].id
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_associate_v2" "k8s_master_no_etcd" {
|
||||
count = var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0
|
||||
floating_ip = var.k8s_master_no_etcd_fips[count.index]
|
||||
|
|
|
@ -68,6 +68,14 @@ variable "network_id" {
|
|||
default = ""
|
||||
}
|
||||
|
||||
variable "use_existing_network" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "network_router_id" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "k8s_master_fips" {
|
||||
type = list
|
||||
}
|
||||
|
@ -80,6 +88,10 @@ variable "k8s_node_fips" {
|
|||
type = list
|
||||
}
|
||||
|
||||
variable "k8s_masters_fips" {
|
||||
type = map
|
||||
}
|
||||
|
||||
variable "k8s_nodes_fips" {
|
||||
type = map
|
||||
}
|
||||
|
@ -104,6 +116,8 @@ variable "k8s_allowed_egress_ips" {
|
|||
type = list
|
||||
}
|
||||
|
||||
variable "k8s_masters" {}
|
||||
|
||||
variable "k8s_nodes" {}
|
||||
|
||||
variable "supplementary_master_groups" {
|
||||
|
@ -167,3 +181,7 @@ variable "group_vars_path" {
|
|||
variable "port_security_enabled" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "force_null_port_security" {
|
||||
type = bool
|
||||
}
|
||||
|
|
|
@ -14,6 +14,12 @@ resource "openstack_networking_floatingip_v2" "k8s_master" {
|
|||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_masters" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip } : {}
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
# If user specifies pre-existing IPs to use in k8s_master_fips, do not create new ones.
|
||||
resource "openstack_networking_floatingip_v2" "k8s_master_no_etcd" {
|
||||
count = length(var.k8s_master_fips) > 0 ? 0 : var.number_of_k8s_masters_no_etcd
|
||||
|
|
|
@ -3,6 +3,10 @@ output "k8s_master_fips" {
|
|||
value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master[*].address
|
||||
}
|
||||
|
||||
output "k8s_masters_fips" {
|
||||
value = openstack_networking_floatingip_v2.k8s_masters
|
||||
}
|
||||
|
||||
# If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created.
|
||||
output "k8s_master_no_etcd_fips" {
|
||||
value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address
|
||||
|
|
|
@ -16,6 +16,8 @@ variable "router_id" {
|
|||
default = ""
|
||||
}
|
||||
|
||||
variable "k8s_masters" {}
|
||||
|
||||
variable "k8s_nodes" {}
|
||||
|
||||
variable "k8s_master_fips" {}
|
||||
|
|
|
@ -2,6 +2,10 @@ output "router_id" {
|
|||
value = "%{if var.use_neutron == 1} ${var.router_id == null ? element(concat(openstack_networking_router_v2.k8s.*.id, [""]), 0) : var.router_id} %{else} %{endif}"
|
||||
}
|
||||
|
||||
output "network_id" {
|
||||
value = element(concat(openstack_networking_network_v2.k8s.*.id, [""]),0)
|
||||
}
|
||||
|
||||
output "router_internal_port_id" {
|
||||
value = element(concat(openstack_networking_router_interface_v2.k8s.*.id, [""]), 0)
|
||||
}
|
||||
|
|
|
@ -32,6 +32,28 @@ number_of_k8s_masters_no_floating_ip_no_etcd = 0
|
|||
|
||||
flavor_k8s_master = "<UUID>"
|
||||
|
||||
k8s_masters = {
|
||||
# "master-1" = {
|
||||
# "az" = "nova"
|
||||
# "flavor" = "<UUID>"
|
||||
# "floating_ip" = true
|
||||
# "etcd" = true
|
||||
# },
|
||||
# "master-2" = {
|
||||
# "az" = "nova"
|
||||
# "flavor" = "<UUID>"
|
||||
# "floating_ip" = false
|
||||
# "etcd" = true
|
||||
# },
|
||||
# "master-3" = {
|
||||
# "az" = "nova"
|
||||
# "flavor" = "<UUID>"
|
||||
# "floating_ip" = true
|
||||
# "etcd" = true
|
||||
# },
|
||||
}
|
||||
|
||||
|
||||
# nodes
|
||||
number_of_k8s_nodes = 2
|
||||
|
||||
|
@ -52,6 +74,9 @@ number_of_k8s_nodes_no_floating_ip = 4
|
|||
# networking
|
||||
network_name = "<network>"
|
||||
|
||||
# Use a existing network with the name of network_name. Set to false to create a network with name of network_name.
|
||||
# use_existing_network = true
|
||||
|
||||
external_net = "<UUID>"
|
||||
|
||||
subnet_cidr = "<cidr>"
|
||||
|
@ -59,3 +84,6 @@ subnet_cidr = "<cidr>"
|
|||
floatingip_pool = "<pool>"
|
||||
|
||||
bastion_allowed_remote_ips = ["0.0.0.0/0"]
|
||||
|
||||
# Force port security to be null. Some cloud providers do not allow to set port security.
|
||||
# force_null_port_security = false
|
|
@ -137,6 +137,12 @@ variable "network_name" {
|
|||
default = "internal"
|
||||
}
|
||||
|
||||
variable "use_existing_network" {
|
||||
description = "Use an existing network"
|
||||
type = bool
|
||||
default = "false"
|
||||
}
|
||||
|
||||
variable "network_dns_domain" {
|
||||
description = "dns_domain for the internal network"
|
||||
type = string
|
||||
|
@ -154,6 +160,12 @@ variable "port_security_enabled" {
|
|||
default = "true"
|
||||
}
|
||||
|
||||
variable "force_null_port_security" {
|
||||
description = "Force port security to be null. Some providers does not allow setting port security"
|
||||
type = bool
|
||||
default = "false"
|
||||
}
|
||||
|
||||
variable "subnet_cidr" {
|
||||
description = "Subnet CIDR block."
|
||||
type = string
|
||||
|
@ -274,6 +286,10 @@ variable "router_internal_port_id" {
|
|||
default = null
|
||||
}
|
||||
|
||||
variable "k8s_masters" {
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "k8s_nodes" {
|
||||
default = {}
|
||||
}
|
||||
|
|
|
@ -114,10 +114,10 @@ def iterhosts(resources):
|
|||
|
||||
|
||||
def iterips(resources):
|
||||
'''yield ip tuples of (instance_id, ip)'''
|
||||
'''yield ip tuples of (port_id, ip)'''
|
||||
for module_name, key, resource in resources:
|
||||
resource_type, name = key.split('.', 1)
|
||||
if resource_type == 'openstack_compute_floatingip_associate_v2':
|
||||
if resource_type == 'openstack_networking_floatingip_associate_v2':
|
||||
yield openstack_floating_ips(resource)
|
||||
|
||||
|
||||
|
@ -243,13 +243,13 @@ def openstack_floating_ips(resource):
|
|||
raw_attrs = resource['primary']['attributes']
|
||||
attrs = {
|
||||
'ip': raw_attrs['floating_ip'],
|
||||
'instance_id': raw_attrs['instance_id'],
|
||||
'port_id': raw_attrs['port_id'],
|
||||
}
|
||||
return attrs
|
||||
|
||||
def openstack_floating_ips(resource):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
return raw_attrs['instance_id'], raw_attrs['floating_ip']
|
||||
return raw_attrs['port_id'], raw_attrs['floating_ip']
|
||||
|
||||
@parses('openstack_compute_instance_v2')
|
||||
@calculate_mantl_vars
|
||||
|
@ -282,6 +282,7 @@ def openstack_host(resource, module_name):
|
|||
# generic
|
||||
'public_ipv4': raw_attrs['access_ip_v4'],
|
||||
'private_ipv4': raw_attrs['access_ip_v4'],
|
||||
'port_id' : raw_attrs['network.0.port'],
|
||||
'provider': 'openstack',
|
||||
}
|
||||
|
||||
|
@ -339,10 +340,10 @@ def openstack_host(resource, module_name):
|
|||
def iter_host_ips(hosts, ips):
|
||||
'''Update hosts that have an entry in the floating IP list'''
|
||||
for host in hosts:
|
||||
host_id = host[1]['id']
|
||||
port_id = host[1]['port_id']
|
||||
|
||||
if host_id in ips:
|
||||
ip = ips[host_id]
|
||||
if port_id in ips:
|
||||
ip = ips[port_id]
|
||||
|
||||
host[1].update({
|
||||
'access_ip_v4': ip,
|
||||
|
|
Loading…
Reference in New Issue