[2.23] kubernetes: hashes for 1.27.8, 1.26.11, default to 1.27.8 (#10706)

* kubernetes: add hashes for 1.27.8, 1.26.11

Make 1.27.8 default.

* Convert exoscale tf provider to new version (#10646)

This is untested. It passes terraform validate to un-broke the CI.

* Update 0040-verify-settings.yml (#10699)

remove embedded template

---------

Co-authored-by: piwinkler <9642809+piwinkler@users.noreply.github.com>
pull/10715/head
Max Gautier 2023-12-11 17:26:26 +01:00 committed by GitHub
parent 72da838519
commit 0107dbc29c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 144 additions and 122 deletions

View File

@ -161,7 +161,7 @@ Note: Upstart/SysV init based OS types are not supported.
## Supported Components
- Core
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.27.7
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.27.8
- [etcd](https://github.com/etcd-io/etcd) v3.5.9
- [docker](https://www.docker.com/) v20.10 (see note)
- [containerd](https://containerd.io/) v1.7.5

View File

@ -12,7 +12,7 @@ ssh_public_keys = [
machines = {
"master-0" : {
"node_type" : "master",
"size" : "Medium",
"size" : "standard.medium",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
@ -22,7 +22,7 @@ machines = {
},
"worker-0" : {
"node_type" : "worker",
"size" : "Large",
"size" : "standard.large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
@ -32,7 +32,7 @@ machines = {
},
"worker-1" : {
"node_type" : "worker",
"size" : "Large",
"size" : "standard.large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
@ -42,7 +42,7 @@ machines = {
},
"worker-2" : {
"node_type" : "worker",
"size" : "Large",
"size" : "standard.large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,

View File

@ -1,29 +1,25 @@
data "exoscale_compute_template" "os_image" {
data "exoscale_template" "os_image" {
for_each = var.machines
zone = var.zone
name = each.value.boot_disk.image_name
}
data "exoscale_compute" "master_nodes" {
for_each = exoscale_compute.master
data "exoscale_compute_instance" "master_nodes" {
for_each = exoscale_compute_instance.master
id = each.value.id
# Since private IP address is not assigned until the nics are created we need this
depends_on = [exoscale_nic.master_private_network_nic]
id = each.value.id
zone = var.zone
}
data "exoscale_compute" "worker_nodes" {
for_each = exoscale_compute.worker
data "exoscale_compute_instance" "worker_nodes" {
for_each = exoscale_compute_instance.worker
id = each.value.id
# Since private IP address is not assigned until the nics are created we need this
depends_on = [exoscale_nic.worker_private_network_nic]
id = each.value.id
zone = var.zone
}
resource "exoscale_network" "private_network" {
resource "exoscale_private_network" "private_network" {
zone = var.zone
name = "${var.prefix}-network"
@ -34,25 +30,29 @@ resource "exoscale_network" "private_network" {
netmask = cidrnetmask(var.private_network_cidr)
}
resource "exoscale_compute" "master" {
resource "exoscale_compute_instance" "master" {
for_each = {
for name, machine in var.machines :
name => machine
if machine.node_type == "master"
}
display_name = "${var.prefix}-${each.key}"
template_id = data.exoscale_compute_template.os_image[each.key].id
size = each.value.size
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
state = "Running"
zone = var.zone
security_groups = [exoscale_security_group.master_sg.name]
name = "${var.prefix}-${each.key}"
template_id = data.exoscale_template.os_image[each.key].id
type = each.value.size
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
state = "Running"
zone = var.zone
security_group_ids = [exoscale_security_group.master_sg.id]
network_interface {
network_id = exoscale_private_network.private_network.id
}
elastic_ip_ids = [exoscale_elastic_ip.control_plane_lb.id]
user_data = templatefile(
"${path.module}/templates/cloud-init.tmpl",
{
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
node_local_partition_size = each.value.boot_disk.node_local_partition_size
ceph_partition_size = each.value.boot_disk.ceph_partition_size
root_partition_size = each.value.boot_disk.root_partition_size
@ -62,25 +62,29 @@ resource "exoscale_compute" "master" {
)
}
resource "exoscale_compute" "worker" {
resource "exoscale_compute_instance" "worker" {
for_each = {
for name, machine in var.machines :
name => machine
if machine.node_type == "worker"
}
display_name = "${var.prefix}-${each.key}"
template_id = data.exoscale_compute_template.os_image[each.key].id
size = each.value.size
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
state = "Running"
zone = var.zone
security_groups = [exoscale_security_group.worker_sg.name]
name = "${var.prefix}-${each.key}"
template_id = data.exoscale_template.os_image[each.key].id
type = each.value.size
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
state = "Running"
zone = var.zone
security_group_ids = [exoscale_security_group.worker_sg.id]
network_interface {
network_id = exoscale_private_network.private_network.id
}
elastic_ip_ids = [exoscale_elastic_ip.ingress_controller_lb.id]
user_data = templatefile(
"${path.module}/templates/cloud-init.tmpl",
{
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
node_local_partition_size = each.value.boot_disk.node_local_partition_size
ceph_partition_size = each.value.boot_disk.ceph_partition_size
root_partition_size = each.value.boot_disk.root_partition_size
@ -90,41 +94,33 @@ resource "exoscale_compute" "worker" {
)
}
resource "exoscale_nic" "master_private_network_nic" {
for_each = exoscale_compute.master
compute_id = each.value.id
network_id = exoscale_network.private_network.id
}
resource "exoscale_nic" "worker_private_network_nic" {
for_each = exoscale_compute.worker
compute_id = each.value.id
network_id = exoscale_network.private_network.id
}
resource "exoscale_security_group" "master_sg" {
name = "${var.prefix}-master-sg"
description = "Security group for Kubernetes masters"
}
resource "exoscale_security_group_rules" "master_sg_rules" {
resource "exoscale_security_group_rule" "master_sg_rule_ssh" {
security_group_id = exoscale_security_group.master_sg.id
for_each = toset(var.ssh_whitelist)
# SSH
ingress {
protocol = "TCP"
cidr_list = var.ssh_whitelist
ports = ["22"]
}
type = "INGRESS"
start_port = 22
end_port = 22
protocol = "TCP"
cidr = each.value
}
resource "exoscale_security_group_rule" "master_sg_rule_k8s_api" {
security_group_id = exoscale_security_group.master_sg.id
for_each = toset(var.api_server_whitelist)
# Kubernetes API
ingress {
protocol = "TCP"
cidr_list = var.api_server_whitelist
ports = ["6443"]
}
type = "INGRESS"
start_port = 6443
end_port = 6443
protocol = "TCP"
cidr = each.value
}
resource "exoscale_security_group" "worker_sg" {
@ -132,62 +128,64 @@ resource "exoscale_security_group" "worker_sg" {
description = "security group for kubernetes worker nodes"
}
resource "exoscale_security_group_rules" "worker_sg_rules" {
resource "exoscale_security_group_rule" "worker_sg_rule_ssh" {
security_group_id = exoscale_security_group.worker_sg.id
# SSH
ingress {
protocol = "TCP"
cidr_list = var.ssh_whitelist
ports = ["22"]
}
for_each = toset(var.ssh_whitelist)
type = "INGRESS"
start_port = 22
end_port = 22
protocol = "TCP"
cidr = each.value
}
resource "exoscale_security_group_rule" "worker_sg_rule_http" {
security_group_id = exoscale_security_group.worker_sg.id
# HTTP(S)
ingress {
protocol = "TCP"
cidr_list = ["0.0.0.0/0"]
ports = ["80", "443"]
}
for_each = toset(["80", "443"])
type = "INGRESS"
start_port = each.value
end_port = each.value
protocol = "TCP"
cidr = "0.0.0.0/0"
}
# Kubernetes Nodeport
ingress {
protocol = "TCP"
cidr_list = var.nodeport_whitelist
ports = ["30000-32767"]
resource "exoscale_security_group_rule" "worker_sg_rule_nodeport" {
security_group_id = exoscale_security_group.worker_sg.id
# HTTP(S)
for_each = toset(var.nodeport_whitelist)
type = "INGRESS"
start_port = 30000
end_port = 32767
protocol = "TCP"
cidr = each.value
}
resource "exoscale_elastic_ip" "ingress_controller_lb" {
zone = var.zone
healthcheck {
mode = "http"
port = 80
uri = "/healthz"
interval = 10
timeout = 2
strikes_ok = 2
strikes_fail = 3
}
}
resource "exoscale_ipaddress" "ingress_controller_lb" {
zone = var.zone
healthcheck_mode = "http"
healthcheck_port = 80
healthcheck_path = "/healthz"
healthcheck_interval = 10
healthcheck_timeout = 2
healthcheck_strikes_ok = 2
healthcheck_strikes_fail = 3
}
resource "exoscale_secondary_ipaddress" "ingress_controller_lb" {
for_each = exoscale_compute.worker
compute_id = each.value.id
ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
}
resource "exoscale_ipaddress" "control_plane_lb" {
zone = var.zone
healthcheck_mode = "tcp"
healthcheck_port = 6443
healthcheck_interval = 10
healthcheck_timeout = 2
healthcheck_strikes_ok = 2
healthcheck_strikes_fail = 3
}
resource "exoscale_secondary_ipaddress" "control_plane_lb" {
for_each = exoscale_compute.master
compute_id = each.value.id
ip_address = exoscale_ipaddress.control_plane_lb.ip_address
resource "exoscale_elastic_ip" "control_plane_lb" {
zone = var.zone
healthcheck {
mode = "tcp"
port = 6443
interval = 10
timeout = 2
strikes_ok = 2
strikes_fail = 3
}
}

View File

@ -1,19 +1,19 @@
output "master_ip_addresses" {
value = {
for key, instance in exoscale_compute.master :
for key, instance in exoscale_compute_instance.master :
instance.name => {
"private_ip" = contains(keys(data.exoscale_compute.master_nodes), key) ? data.exoscale_compute.master_nodes[key].private_network_ip_addresses[0] : ""
"public_ip" = exoscale_compute.master[key].ip_address
"private_ip" = contains(keys(data.exoscale_compute_instance.master_nodes), key) ? data.exoscale_compute_instance.master_nodes[key].private_network_ip_addresses[0] : ""
"public_ip" = exoscale_compute_instance.master[key].ip_address
}
}
}
output "worker_ip_addresses" {
value = {
for key, instance in exoscale_compute.worker :
for key, instance in exoscale_compute_instance.worker :
instance.name => {
"private_ip" = contains(keys(data.exoscale_compute.worker_nodes), key) ? data.exoscale_compute.worker_nodes[key].private_network_ip_addresses[0] : ""
"public_ip" = exoscale_compute.worker[key].ip_address
"private_ip" = contains(keys(data.exoscale_compute_instance.worker_nodes), key) ? data.exoscale_compute_instance.worker_nodes[key].private_network_ip_addresses[0] : ""
"public_ip" = exoscale_compute_instance.worker[key].ip_address
}
}
}
@ -23,9 +23,9 @@ output "cluster_private_network_cidr" {
}
output "ingress_controller_lb_ip_address" {
value = exoscale_ipaddress.ingress_controller_lb.ip_address
value = exoscale_elastic_ip.ingress_controller_lb.ip_address
}
output "control_plane_lb_ip_address" {
value = exoscale_ipaddress.control_plane_lb.ip_address
value = exoscale_elastic_ip.control_plane_lb.ip_address
}

View File

@ -1,7 +1,7 @@
terraform {
required_providers {
exoscale = {
source = "exoscale/exoscale"
source = "exoscale/exoscale"
version = ">= 0.21"
}
}

View File

@ -73,6 +73,7 @@ crio_archive_checksums:
# Kubernetes versions above Kubespray's current target version are untested and should be used with caution.
kubelet_checksums:
arm:
v1.27.8: 0
v1.27.7: 0
v1.27.6: 0
v1.27.5: 0
@ -81,6 +82,7 @@ kubelet_checksums:
v1.27.2: 0
v1.27.1: 0
v1.27.0: 0
v1.26.11: a7bbe545ab23b068d5e25262bf17a10039edb5c073ba896d036af12afb5fbf9c
v1.26.10: 3130ae14dda009f92296045d9cc2a1f566fa7d85905d0f6a8db47b05496c5d53
v1.26.9: 739c62a6801d935477121614ee3a2ef6deba78ecd088ae5477c3f18bf19d68c8
v1.26.8: 9db839028b706c005fb6db4442e7dae32c2916acf826a5666d54236399f447fa
@ -109,6 +111,7 @@ kubelet_checksums:
v1.25.1: 6fe430ad91e1ed50cf5cc396aa204fda0889c36b8a3b84619d633cd9a6a146e2
v1.25.0: ad45ac3216aa186648fd034dec30a00c1a2d2d1187cab8aae21aa441a13b4faa
arm64:
v1.27.8: 71849182ceb018dc084f499ad28b7b1afb7f23e35ccaf8421941dd5dafef0d4c
v1.27.7: ed5bfa48ee64d5e6cf23ed9fc03ea0593021839429fdc1ea7cc2ebf3f11b6491
v1.27.6: be579ef4e8fa3e1de9d40a77e4d35d99e535a293f66bf3038cbea9cf803d11e5
v1.27.5: 4e78fafdeb5d61ab6ebcd6e75e968c47001c321bec169bb9bd9f001132de5321
@ -117,6 +120,7 @@ kubelet_checksums:
v1.27.2: 810cd9a611e9f084e57c9ee466e33c324b2228d4249ff38c2588a0cc3224f10d
v1.27.1: dbb09d297d924575654db38ed2fc627e35913c2d4000c34613ac6de4995457d0
v1.27.0: 37aa2edc7c0c4b3e488518c6a4b44c8aade75a55010534ee2be291220c73d157
v1.26.11: ff8940394446028e75a2b8155e22eccf635f6a128f45dee41e293493d2743d17
v1.26.10: ddebcc1af7f203a2ee3d80dad0baaf84a4680748839f5583b39cbce4b8afa7f2
v1.26.9: f6b1dcee9960ffe6b778dc91cabef8ce4a7bd06c76378ef2784232709eace6a5
v1.26.8: 0f15e484c4a7a7c3bad9e0aa4d4334ca029b97513fbe03f053201dd937cf316e
@ -145,6 +149,7 @@ kubelet_checksums:
v1.25.1: b6baa99b99ecc1f358660208a9a27b64c65f3314ff95a84c73091b51ac98484b
v1.25.0: 69572a7b3d179d4a479aa2e0f90e2f091d8d84ef33a35422fc89975dc137a590
amd64:
v1.27.8: 2e0557b38c5b9a1263eed25a0b84d741453ed9c0c7bd916f80eadaf7edfb7784
v1.27.7: 236bc8bc22c52e914d3364c23e273628c63e193365b6a43b8cb013716c1cd2f5
v1.27.6: daa42f9b6f5e2176bbce0d24d89a05613000630bcddec1fafd2a8d42a523ce9d
v1.27.5: 66df07ab4f9d72028c97ec7e5eea23adc0ab62a209ba2285431456d7d75a5bb3
@ -153,6 +158,7 @@ kubelet_checksums:
v1.27.2: a0d12afcab3b2836de4a427558d067bebdff040e9b306b0512c93d9d2a066579
v1.27.1: cb2845fff0ce41c400489393da73925d28fbee54cfeb7834cd4d11e622cbd3a7
v1.27.0: 0b4ed4fcd75d33f5dff3ba17776e6089847fc83064d3f7a3ad59a34e94e60a29
v1.26.11: a62953f20fa9fedff50c6c5423e68981e3382d92cf04174d5bca5f4d084de0c5
v1.26.10: 4c27b3a9f332a6762f7240d0784c64775d4db5a1b881eeae05c4561d06c267ec
v1.26.9: baa2b021ab2f90c342518e2b8981a18de7e1e6b33f11c57e3ff23d40364877a8
v1.26.8: 1c68a65a6a0c2230325e29da0cc3eaaef9bbf688a7a0bb8243b4a7ebfe0e3363
@ -181,6 +187,7 @@ kubelet_checksums:
v1.25.1: 63e38bcbc4437ce10227695f8722371ec0d178067f1031d09fe1f59b6fcf214a
v1.25.0: 7f9183fce12606818612ce80b6c09757452c4fb50aefea5fc5843951c5020e24
ppc64le:
v1.27.8: 2354fdb19b5018cabe43fde1979965686afd3c95b75531e678a0064c4a30b4e9
v1.27.7: 9154a7b5d5793fb8a05cba0d309ddfd975409d0e0686be19cf2feca172f06162
v1.27.6: 1001da3586a3f868c371aefde991af94ca780ec1599c8a969390ba105aaf9dcb
v1.27.5: 3c643564bf07753c1388096aef9125811800fd28aa6a5faf3bfb1cef0e1637eb
@ -189,6 +196,7 @@ kubelet_checksums:
v1.27.2: 3af92edd687f7932e7fce877944dfe5efa437bf5f171fc8331725c631a1a86ef
v1.27.1: 7a800b9539beaba0b5d6357070a40fb3c4d216c2ad6693b15f9b1307b1c99e1f
v1.27.0: 17c061a9f7919697ac71c151c19337f65b86f59f59441687ac92e977d851c75b
v1.26.11: 408e47e33159551ebeeb6f4ef836bd7d07fc92d16ca6d662c85ea8fa0fe34a8e
v1.26.10: 5cc1046c8f9188dc6d5ec22200eeed7c8c1ae88a23ce5d79e8dfaf1ff61f2bf8
v1.26.9: e87a83c1ca74e55cea51eda53d29324de7fb7f9330c266ea1f2e270fe0f9b677
v1.26.8: 92c8deba1f6a89a6d6555c224cebab43d141d5822c252511988ad43ff1a7cc1d
@ -218,6 +226,7 @@ kubelet_checksums:
v1.25.0: 8015f88d1364cf77436c157de8a5d3ab87f1cb2dfaa9289b097c92a808845491
kubectl_checksums:
arm:
v1.27.8: 2f2936f950beb3f08ee0e45fbf80d020163829b95aa11c99ec726ee1a922329c
v1.27.7: 2a387ade64cd4cf90f002ae8c0e6f60250d26d1ee2cce6051f559430a44352ea
v1.27.6: 3a34a38908a9d0f85dc531cc1c49061ceeaa2ab742382d891d9fc7bf8dc53b8c
v1.27.5: c5e8a02102a93c84413ce8a029f194049429d27ad559061de267d84020a4594b
@ -226,6 +235,7 @@ kubectl_checksums:
v1.27.2: 7792f5630543c0af84f444521ee6113da5ae00f2b50872d57324aa725a5341c5
v1.27.1: fe704e355bf2c5f69964cd12772687535a11a5e9ec0baf4f27e0a8fb156bc615
v1.27.0: 288470e3eb89a2f55273d753ce6674dfb00e732f2971428acb964810aa726188
v1.26.11: 34cc371daef6e42aea27117aa643619ab870d1cb61995530fc9bcb992e3caf10
v1.26.10: d26ab68b4884eec4dcadd0f564ba197d48f21aebed509b9b1541d31f2aaf5890
v1.26.9: 8e020ffe72dd4c8694ee5e9f124833ca302a2341fa046650482b38ddb189d1fd
v1.26.8: 411c5c6ba9a247d7fa30f68fd37cfdb92ef14326127bed2512a0daf11a6097d4
@ -254,6 +264,7 @@ kubectl_checksums:
v1.25.1: e8c6bfd8797e42501d14c7d75201324630f15436f712c4f7e46ce8c8067d9adc
v1.25.0: 0b907cfdcabafae7d2d4ac7de55e3ef814df999acdf6b1bd0ecf6abbef7c7131
arm64:
v1.27.8: 97ed6739e2803e63fd2d9de78be22d5ba6205bb63179a16ec773063526525a8e
v1.27.7: 61fc334f2c0290270e43fb8a1d4ff07e8cec5642d5a123eb7ab66a134b04ae83
v1.27.6: 7322a6f600de6d0d06cf333bdc24cd2a340bba12920b0c2385c97884c808c810
v1.27.5: 0158955c59c775165937918f910380ed7b52fca4a26fb41a369734e83aa44874
@ -262,6 +273,7 @@ kubectl_checksums:
v1.27.2: 1b0966692e398efe71fe59f913eaec44ffd4468cc1acd00bf91c29fa8ff8f578
v1.27.1: fd3cb8f16e6ed8aee9955b76e3027ac423b6d1cc7356867310d128082e2db916
v1.27.0: f8e09630211f2b7c6a8cc38835e7dea94708d401f5c84b23a37c70c604602ddc
v1.26.11: 4a6d2b7204af3cf84cd0e2c670fbb211501050c9a288de49de3c6363d4e0a63e
v1.26.10: 5752e3908fa1d338eb1fa99a6f39c6a4c27b065cb459da84e35c4ec718879f14
v1.26.9: f945c63220b393ddf8df67d87e67ff74b7f56219a670dee38bc597a078588e90
v1.26.8: e93f836cba409b5ef5341020d9501067a51bf8210cb35649518e5f4d114244cf
@ -290,6 +302,7 @@ kubectl_checksums:
v1.25.1: 73602eabf20b877f88642fafcbe1eda439162c2c1dbcc9ed09fdd4d7ac9919ea
v1.25.0: 24db547bbae294c5c44f2b4a777e45f0e2f3d6295eace0d0c4be2b2dfa45330d
amd64:
v1.27.8: 027b3161e99fa0a7fa529e8f17f73ee2c0807c81c721ca7cf307f6b41c17bc57
v1.27.7: e5fe510ba6f421958358d3d43b3f0b04c2957d4bc3bb24cf541719af61a06d79
v1.27.6: 2b7adb71c8630904da1b94e262c8c3c477e9609b3c0ed8ae1213a1e156ae38dd
v1.27.5: 9a091fb65e4cf4e8be3ce9a21c79210177dd7ce31a2998ec638c92f37f058bcd
@ -298,6 +311,7 @@ kubectl_checksums:
v1.27.2: 4f38ee903f35b300d3b005a9c6bfb9a46a57f92e89ae602ef9c129b91dc6c5a5
v1.27.1: 7fe3a762d926fb068bae32c399880e946e8caf3d903078bea9b169dcd5c17f6d
v1.27.0: 71a78259d70da9c5540c4cf4cff121f443e863376f68f89a759d90cef3f51e87
v1.26.11: 27c34a0870230d9dd723e1e01114634e396cd2a3d25ced263b769a4bd53e4edd
v1.26.10: 93ad44b4072669237247bfbc171be816f08e7e9e4260418d2cfdd0da1704ae86
v1.26.9: 98ea4a13895e54ba24f57e0d369ff6be0d3906895305d5390197069b1da12ae2
v1.26.8: d8e0dba258d1096f95bb6746ca359db2ee8abe226e777f89dc8a5d1bb76795aa
@ -326,6 +340,7 @@ kubectl_checksums:
v1.25.1: 9cc2d6ce59740b6acf6d5d4a04d4a7d839b0a81373248ef0ce6c8d707143435b
v1.25.0: e23cc7092218c95c22d8ee36fb9499194a36ac5b5349ca476886b7edc0203885
ppc64le:
v1.27.8: e25a09dea99192ff43ee13af61bfadd7c79eb538dc8e85376b6c590b4d471204
v1.27.7: a60da56f856bc5b224f0be71b03443d5a0fb69424f31cd86f069cc79c13b2870
v1.27.6: f3ed7752a20dbae271eeff9e9d109381e3ed6772853b5c84dc8a7476bbad847c
v1.27.5: 7ab5fe6eb51bd267b3156ef6e18f9e264e6c7c26ec0dafc2f55edcf3164bac99
@ -334,6 +349,7 @@ kubectl_checksums:
v1.27.2: efee037a276f72c77cc230194d7dadf943a5778be46b7985edeb414d27894266
v1.27.1: 440bcfd9611319f3d9e5d4fa4cdee2421cdf80c01fad223934d9a9b640673d75
v1.27.0: daa9f1d4fe3f217de2546bca4ac14601f34b34a25c1f571f1e44eb313aee1385
v1.26.11: f2c577e75277415203e699ca7bc08eb640a0b8211a0366926122e3ac4f98997c
v1.26.10: 24674d7ce1ccef207d127bd952f6f40f951d7682cfa603f6989ed80e91dcaafc
v1.26.9: bcb287f24a30bd7ef27bc36dc4f896aba3f1091f947afde73576fbd81af65cc5
v1.26.8: e94748f8954f44bd5ad5be78a2906ee6a8db7c00ea2d50c9db1bfa09cfc097b9
@ -363,6 +379,7 @@ kubectl_checksums:
v1.25.0: dffe15c626d7921d77e85f390b15f13ebc3a9699785f6b210cd13fa6f4653513
kubeadm_checksums:
arm:
v1.27.8: 0
v1.27.7: 0
v1.27.6: 0
v1.27.5: 0
@ -371,6 +388,7 @@ kubeadm_checksums:
v1.27.2: 0
v1.27.1: 0
v1.27.0: 0
v1.26.11: 3683f52861f89c3cf48ea7754fd2e81ad30a51bef57d056b2663e5e9139870b6
v1.26.10: b59433ebaddf5c5d816033304989f9dd2e0b863650532cd195c1d41c0f973f14
v1.26.9: a6841e7e554407776e4d0fc83306756ad1836d1f92d6d5cce1055eee1999732a
v1.26.8: 31f37eeed5a9e23719e97055051a5efada2fb69deda958056b3d6b0b41e7eaa5
@ -399,6 +417,7 @@ kubeadm_checksums:
v1.25.1: ecb7a459ca23dfe527f4eedf33fdb0df3d55519481a8be3f04a5c3a4d41fa588
v1.25.0: 67b6b58cb6abd5a4c9024aeaca103f999077ce6ec8e2ca13ced737f5139ad2f0
arm64:
v1.27.8: 0d0f5b2781d663d314e785d14361aa5a09cfaf6e1694aa3cc731f4f06342ec13
v1.27.7: 46d7c43532233906919a53ee0e03ab04ab9e08514392d17a86f058e0364cda4b
v1.27.6: faec35315203913b835e9b789d89001a05e072943c960bcf4de1e331d08e10c8
v1.27.5: 3023ef1d2eff885af860e13c8b9fcdb857d259728f16bf992d59c2be522cec82
@ -407,6 +426,7 @@ kubeadm_checksums:
v1.27.2: 8f01f363f7c7f92de2f2276124a895503cdc5a60ff549440170880f296b087eb
v1.27.1: 024a59cd6fc76784b597c0c1cf300526e856e8c9fefa5fa7948158929b739551
v1.27.0: acd805c6783b678ee0068b9dd8165bbfd879c345fd9c25d6a978dbc965f48544
v1.26.11: a13318c1493e58a9f7c4359c79443f3c86a690ec601bcc76308c809d8d61edb8
v1.26.10: 1ddcb47ee4f7171736dbacc046a7ceae55411ee09920435c3821b530f4650428
v1.26.9: 14c87cbb9a3fa02308a9546aad192ce2d93e5d1d0296d28ba449079e6a1cb2b2
v1.26.8: f12d5d748abb8586723b78a2f0300f88faf0391f56d4d49f1ad1cef74160a1b5
@ -435,6 +455,7 @@ kubeadm_checksums:
v1.25.1: f4d57d89c53b7fb3fe347c9272ed40ec55eab120f4f09cd6b684e97cb9cbf1f0
v1.25.0: 07d9c6ffd3676502acd323c0ca92f44328a1f0e89a7d42a664099fd3016cf16b
amd64:
v1.27.8: f8864769b8b2d7a14f53eb983f23317ff14d68ab76aba71e9de17ce84c38d4eb
v1.27.7: bc589219a003b3b94c114e4bcf20549a02657a0c6e5c73f588b37817148892d2
v1.27.6: 2bcdd68957ec25d0689bb56f32b4ec86e38463d2691d5ea21cd109c7afa3aa7c
v1.27.5: 35df8efa6e1bc864ed3c48a665caed634a5c46cfd7f41cda5ad66defdfddb2aa
@ -443,6 +464,7 @@ kubeadm_checksums:
v1.27.2: 95c4bfb7929900506a42de4d92280f06efe6b47e0a32cbc1f5a1ed737592977a
v1.27.1: c7d32d698e99b90f877025104cb4a9f3f8c707e99e6817940f260135b6d1ad0a
v1.27.0: 78d0e04705a7bdb76a514d60f60c073b16334b15f57ee87f064354ca8a233e80
v1.26.11: 58f886e39e517ba1a92493f136e80f1b6ea9362966ad9d2accdf2133004161f2
v1.26.10: 27ed1d857f4a315f3d059168c6e25fdbf0559f9c8e59bab6c50e7921f74dadbf
v1.26.9: 73e128821dd1f799a75c922218d12f6c4618b8e29cc7dae2a7390fb80092d3d9
v1.26.8: 233a89277ca49dbd666b7391c6c0e43c33d2f08052d5b93e9cd0100ee69430c8
@ -471,6 +493,7 @@ kubeadm_checksums:
v1.25.1: adaa1e65c1cf9267a01e889d4c13884f883cf27948f00abb823f10486f1a8420
v1.25.0: 10b30b87af2cdc865983d742891eba467d038f94f3926bf5d0174f1abf6628f8
ppc64le:
v1.27.8: d65b972cd661cb28972f0df731f9e5b65d959920275bad5ef44ff94d3bb8331d
v1.27.7: 6a95a8fb5560a6698d895b2a809485ace0cf5b8c6fb89c843ab20cf89d8b11ad
v1.27.6: f2b53fdcd0a71390e84d16facbcd7a581f1309cb8bd0501f9508ebefe5a3498c
v1.27.5: 3df86ca5de57a6c6b4043be3c050ed9ed39a50720364b399e12e9e52e87e377b
@ -479,6 +502,7 @@ kubeadm_checksums:
v1.27.2: 412bccd310f4976201d359f0637745944944c0fb2ace315e5e07b180445530c7
v1.27.1: d4c46dcc3d210b6eae0b8c34b3ece9f24b1bb2697175615c451db717a99430fb
v1.27.0: cf2860aef800496fee0d9fd8722bd7d17c6609e32d87ca380127151f2ce02bb0
v1.26.11: bfa424bccba7c10b4a011ef1cc5b282ac5861e30e3e3897ac8ef1cf8cd95b723
v1.26.10: 5dad57d977923d80f20e1d3b9be5dba806642682a0c1bb511c23543c710761cb
v1.26.9: 1cd0e3623b93aa1786dddb73570a841323db35df4eca45004db2046550ca5d12
v1.26.8: c93248ce2c9906d16fcb7590d8f3929406b28967da79d6a01c2b2d39203a7f58

View File

@ -1,7 +1,7 @@
---
- name: Stop if either kube_control_plane or kube_node group is empty
assert:
that: "groups.get('{{ item }}')"
that: "groups.get( item )"
with_items:
- kube_control_plane
- kube_node

View File

@ -16,7 +16,7 @@ kubelet_fail_swap_on: true
kubelet_swap_behavior: LimitedSwap
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.27.7
kube_version: v1.27.8
## The minimum version working
kube_version_min_required: v1.25.0