Convert exoscale tf provider to new version (#10646)

This is untested. It passes terraform validate to un-broke the CI.
pull/10650/head
Max Gautier 2023-11-24 17:22:55 +01:00 committed by GitHub
parent b321ca3e64
commit d583d331b5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 117 additions and 119 deletions

View File

@ -12,7 +12,7 @@ ssh_public_keys = [
machines = {
"master-0" : {
"node_type" : "master",
"size" : "Medium",
"size" : "standard.medium",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
@ -22,7 +22,7 @@ machines = {
},
"worker-0" : {
"node_type" : "worker",
"size" : "Large",
"size" : "standard.large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
@ -32,7 +32,7 @@ machines = {
},
"worker-1" : {
"node_type" : "worker",
"size" : "Large",
"size" : "standard.large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
@ -42,7 +42,7 @@ machines = {
},
"worker-2" : {
"node_type" : "worker",
"size" : "Large",
"size" : "standard.large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,

View File

@ -1,29 +1,25 @@
data "exoscale_compute_template" "os_image" {
data "exoscale_template" "os_image" {
for_each = var.machines
zone = var.zone
name = each.value.boot_disk.image_name
}
data "exoscale_compute" "master_nodes" {
for_each = exoscale_compute.master
data "exoscale_compute_instance" "master_nodes" {
for_each = exoscale_compute_instance.master
id = each.value.id
# Since private IP address is not assigned until the nics are created we need this
depends_on = [exoscale_nic.master_private_network_nic]
zone = var.zone
}
data "exoscale_compute" "worker_nodes" {
for_each = exoscale_compute.worker
data "exoscale_compute_instance" "worker_nodes" {
for_each = exoscale_compute_instance.worker
id = each.value.id
# Since private IP address is not assigned until the nics are created we need this
depends_on = [exoscale_nic.worker_private_network_nic]
zone = var.zone
}
resource "exoscale_network" "private_network" {
resource "exoscale_private_network" "private_network" {
zone = var.zone
name = "${var.prefix}-network"
@ -34,25 +30,29 @@ resource "exoscale_network" "private_network" {
netmask = cidrnetmask(var.private_network_cidr)
}
resource "exoscale_compute" "master" {
resource "exoscale_compute_instance" "master" {
for_each = {
for name, machine in var.machines :
name => machine
if machine.node_type == "master"
}
display_name = "${var.prefix}-${each.key}"
template_id = data.exoscale_compute_template.os_image[each.key].id
size = each.value.size
name = "${var.prefix}-${each.key}"
template_id = data.exoscale_template.os_image[each.key].id
type = each.value.size
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
state = "Running"
zone = var.zone
security_groups = [exoscale_security_group.master_sg.name]
security_group_ids = [exoscale_security_group.master_sg.id]
network_interface {
network_id = exoscale_private_network.private_network.id
}
elastic_ip_ids = [exoscale_elastic_ip.control_plane_lb.id]
user_data = templatefile(
"${path.module}/templates/cloud-init.tmpl",
{
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
node_local_partition_size = each.value.boot_disk.node_local_partition_size
ceph_partition_size = each.value.boot_disk.ceph_partition_size
root_partition_size = each.value.boot_disk.root_partition_size
@ -62,25 +62,29 @@ resource "exoscale_compute" "master" {
)
}
resource "exoscale_compute" "worker" {
resource "exoscale_compute_instance" "worker" {
for_each = {
for name, machine in var.machines :
name => machine
if machine.node_type == "worker"
}
display_name = "${var.prefix}-${each.key}"
template_id = data.exoscale_compute_template.os_image[each.key].id
size = each.value.size
name = "${var.prefix}-${each.key}"
template_id = data.exoscale_template.os_image[each.key].id
type = each.value.size
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
state = "Running"
zone = var.zone
security_groups = [exoscale_security_group.worker_sg.name]
security_group_ids = [exoscale_security_group.worker_sg.id]
network_interface {
network_id = exoscale_private_network.private_network.id
}
elastic_ip_ids = [exoscale_elastic_ip.ingress_controller_lb.id]
user_data = templatefile(
"${path.module}/templates/cloud-init.tmpl",
{
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
node_local_partition_size = each.value.boot_disk.node_local_partition_size
ceph_partition_size = each.value.boot_disk.ceph_partition_size
root_partition_size = each.value.boot_disk.root_partition_size
@ -90,41 +94,33 @@ resource "exoscale_compute" "worker" {
)
}
resource "exoscale_nic" "master_private_network_nic" {
for_each = exoscale_compute.master
compute_id = each.value.id
network_id = exoscale_network.private_network.id
}
resource "exoscale_nic" "worker_private_network_nic" {
for_each = exoscale_compute.worker
compute_id = each.value.id
network_id = exoscale_network.private_network.id
}
resource "exoscale_security_group" "master_sg" {
name = "${var.prefix}-master-sg"
description = "Security group for Kubernetes masters"
}
resource "exoscale_security_group_rules" "master_sg_rules" {
resource "exoscale_security_group_rule" "master_sg_rule_ssh" {
security_group_id = exoscale_security_group.master_sg.id
for_each = toset(var.ssh_whitelist)
# SSH
ingress {
type = "INGRESS"
start_port = 22
end_port = 22
protocol = "TCP"
cidr_list = var.ssh_whitelist
ports = ["22"]
cidr = each.value
}
resource "exoscale_security_group_rule" "master_sg_rule_k8s_api" {
security_group_id = exoscale_security_group.master_sg.id
for_each = toset(var.api_server_whitelist)
# Kubernetes API
ingress {
type = "INGRESS"
start_port = 6443
end_port = 6443
protocol = "TCP"
cidr_list = var.api_server_whitelist
ports = ["6443"]
}
cidr = each.value
}
resource "exoscale_security_group" "worker_sg" {
@ -132,62 +128,64 @@ resource "exoscale_security_group" "worker_sg" {
description = "security group for kubernetes worker nodes"
}
resource "exoscale_security_group_rules" "worker_sg_rules" {
resource "exoscale_security_group_rule" "worker_sg_rule_ssh" {
security_group_id = exoscale_security_group.worker_sg.id
# SSH
ingress {
for_each = toset(var.ssh_whitelist)
type = "INGRESS"
start_port = 22
end_port = 22
protocol = "TCP"
cidr_list = var.ssh_whitelist
ports = ["22"]
cidr = each.value
}
resource "exoscale_security_group_rule" "worker_sg_rule_http" {
security_group_id = exoscale_security_group.worker_sg.id
# HTTP(S)
ingress {
for_each = toset(["80", "443"])
type = "INGRESS"
start_port = each.value
end_port = each.value
protocol = "TCP"
cidr_list = ["0.0.0.0/0"]
ports = ["80", "443"]
cidr = "0.0.0.0/0"
}
# Kubernetes Nodeport
ingress {
resource "exoscale_security_group_rule" "worker_sg_rule_nodeport" {
security_group_id = exoscale_security_group.worker_sg.id
# HTTP(S)
for_each = toset(var.nodeport_whitelist)
type = "INGRESS"
start_port = 30000
end_port = 32767
protocol = "TCP"
cidr_list = var.nodeport_whitelist
ports = ["30000-32767"]
}
cidr = each.value
}
resource "exoscale_ipaddress" "ingress_controller_lb" {
resource "exoscale_elastic_ip" "ingress_controller_lb" {
zone = var.zone
healthcheck_mode = "http"
healthcheck_port = 80
healthcheck_path = "/healthz"
healthcheck_interval = 10
healthcheck_timeout = 2
healthcheck_strikes_ok = 2
healthcheck_strikes_fail = 3
healthcheck {
mode = "http"
port = 80
uri = "/healthz"
interval = 10
timeout = 2
strikes_ok = 2
strikes_fail = 3
}
}
resource "exoscale_secondary_ipaddress" "ingress_controller_lb" {
for_each = exoscale_compute.worker
compute_id = each.value.id
ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
}
resource "exoscale_ipaddress" "control_plane_lb" {
resource "exoscale_elastic_ip" "control_plane_lb" {
zone = var.zone
healthcheck_mode = "tcp"
healthcheck_port = 6443
healthcheck_interval = 10
healthcheck_timeout = 2
healthcheck_strikes_ok = 2
healthcheck_strikes_fail = 3
healthcheck {
mode = "tcp"
port = 6443
interval = 10
timeout = 2
strikes_ok = 2
strikes_fail = 3
}
resource "exoscale_secondary_ipaddress" "control_plane_lb" {
for_each = exoscale_compute.master
compute_id = each.value.id
ip_address = exoscale_ipaddress.control_plane_lb.ip_address
}

View File

@ -1,19 +1,19 @@
output "master_ip_addresses" {
value = {
for key, instance in exoscale_compute.master :
for key, instance in exoscale_compute_instance.master :
instance.name => {
"private_ip" = contains(keys(data.exoscale_compute.master_nodes), key) ? data.exoscale_compute.master_nodes[key].private_network_ip_addresses[0] : ""
"public_ip" = exoscale_compute.master[key].ip_address
"private_ip" = contains(keys(data.exoscale_compute_instance.master_nodes), key) ? data.exoscale_compute_instance.master_nodes[key].private_network_ip_addresses[0] : ""
"public_ip" = exoscale_compute_instance.master[key].ip_address
}
}
}
output "worker_ip_addresses" {
value = {
for key, instance in exoscale_compute.worker :
for key, instance in exoscale_compute_instance.worker :
instance.name => {
"private_ip" = contains(keys(data.exoscale_compute.worker_nodes), key) ? data.exoscale_compute.worker_nodes[key].private_network_ip_addresses[0] : ""
"public_ip" = exoscale_compute.worker[key].ip_address
"private_ip" = contains(keys(data.exoscale_compute_instance.worker_nodes), key) ? data.exoscale_compute_instance.worker_nodes[key].private_network_ip_addresses[0] : ""
"public_ip" = exoscale_compute_instance.worker[key].ip_address
}
}
}
@ -23,9 +23,9 @@ output "cluster_private_network_cidr" {
}
output "ingress_controller_lb_ip_address" {
value = exoscale_ipaddress.ingress_controller_lb.ip_address
value = exoscale_elastic_ip.ingress_controller_lb.ip_address
}
output "control_plane_lb_ip_address" {
value = exoscale_ipaddress.control_plane_lb.ip_address
value = exoscale_elastic_ip.control_plane_lb.ip_address
}