[2.21] Ensure that CI is available (#10673)
* Convert exoscale tf provider to new version (#10646) This is untested. It passes terraform validate to un-broke the CI. * specify pyyaml version to 5.3.1 Signed-off-by: bo.jiang <bo.jiang@daocloud.io> * Migrate CI_BUILD_ID to CI_JOB_ID and CI_BUILD_REF to CI_COMMIT_SHA (#10063) * Use supported version of fedora in CI (#10108) * tests: replace fedora35 with fedora37 Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> * tests: replace fedora36 with fedora38 Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> * docs: update fedora version in docs Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> * molecule: upgrade fedora version Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> * tests: upgrade fedora images for vagrant and kubevirt Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> * vagrant: workaround to fix private network ip address in fedora Fedora stop supporting syconfig network script so we added a workaround here https://github.com/hashicorp/vagrant/issues/12762#issuecomment-1535957837 to fix it. * netowrkmanager: do not configure dns if using systemd-resolved We should not configure dns if we point to systemd-resolved. Systemd-resolved is using NetworkManager to infer the upstream DNS server so if we set NetworkManager to 127.0.0.53 it will prevent systemd-resolved to get the correct network DNS server. Thus if we are in this case we just don't set this setting. Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> * image-builder: update centos7 image Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> * gitlab-ci: mark fedora packet jobs as allow failure Fedora networking is still broken on Packet, let's mark it as allow failure for now. Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> --------- Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> --------- Signed-off-by: bo.jiang <bo.jiang@daocloud.io> Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> Co-authored-by: Max Gautier <mg@max.gautier.name> Co-authored-by: Florian Ruynat <16313165+floryut@users.noreply.github.com> Co-authored-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>pull/10665/head
parent
d498df20db
commit
c3e73aabcf
|
@ -14,7 +14,7 @@ variables:
|
|||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_JOB_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
||||
|
|
|
@ -86,7 +86,7 @@ packet_ubuntu18-crio:
|
|||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_fedora35-crio:
|
||||
packet_fedora37-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
@ -173,10 +173,11 @@ packet_almalinux8-docker:
|
|||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora36-docker-weave:
|
||||
packet_fedora38-docker-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
|
||||
packet_opensuse-canal:
|
||||
stage: deploy-part2
|
||||
|
@ -236,19 +237,19 @@ packet_centos7-canal-ha:
|
|||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora36-docker-calico:
|
||||
packet_fedora38-docker-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_fedora35-calico-selinux:
|
||||
packet_fedora37-calico-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_fedora35-calico-swap-selinux:
|
||||
packet_fedora37-calico-swap-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
@ -263,7 +264,7 @@ packet_almalinux8-calico-nodelocaldns-secondary:
|
|||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora36-kube-ovn:
|
||||
packet_fedora38-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
|
|
@ -56,7 +56,7 @@ vagrant_ubuntu16-kube-router-svc-proxy:
|
|||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_fedora35-kube-router:
|
||||
vagrant_fedora37-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
|
|
@ -134,7 +134,7 @@ vagrant up
|
|||
- **Debian** Bullseye, Buster, Jessie, Stretch
|
||||
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
||||
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
||||
- **Fedora** 35, 36
|
||||
- **Fedora** 37, 38
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
||||
|
|
|
@ -29,8 +29,8 @@ SUPPORTED_OS = {
|
|||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
||||
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
|
||||
"fedora37" => {box: "fedora/37-cloud-base", user: "vagrant"},
|
||||
"fedora38" => {box: "fedora/38-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
|
@ -201,7 +201,8 @@ Vagrant.configure("2") do |config|
|
|||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
node.vm.network :private_network, ip: ip,
|
||||
node.vm.network :private_network,
|
||||
:ip => ip,
|
||||
:libvirt__guest_ipv6 => 'yes',
|
||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||
:libvirt__ipv6_prefix => "64",
|
||||
|
@ -216,6 +217,14 @@ Vagrant.configure("2") do |config|
|
|||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||
end
|
||||
# Hack for fedora37/38 to get the IP address of the second interface
|
||||
if ["fedora37", "fedora38"].include? $os
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
nmcli conn modify 'Wired connection 2' ipv4.addresses $(cat /etc/sysconfig/network-scripts/ifcfg-eth1 | grep IPADDR | cut -d "=" -f2)
|
||||
nmcli conn modify 'Wired connection 2' ipv4.method manual
|
||||
service NetworkManager restart
|
||||
SHELL
|
||||
end
|
||||
|
||||
# Disable firewalld on oraclelinux/redhat vms
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
|
||||
|
|
|
@ -12,7 +12,7 @@ ssh_public_keys = [
|
|||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "Medium",
|
||||
"size" : "standard.medium",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
|
@ -22,7 +22,7 @@ machines = {
|
|||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"size" : "standard.large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
|
@ -32,7 +32,7 @@ machines = {
|
|||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"size" : "standard.large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
|
@ -42,7 +42,7 @@ machines = {
|
|||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"size" : "standard.large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
|
|
|
@ -1,29 +1,25 @@
|
|||
data "exoscale_compute_template" "os_image" {
|
||||
data "exoscale_template" "os_image" {
|
||||
for_each = var.machines
|
||||
|
||||
zone = var.zone
|
||||
name = each.value.boot_disk.image_name
|
||||
}
|
||||
|
||||
data "exoscale_compute" "master_nodes" {
|
||||
for_each = exoscale_compute.master
|
||||
data "exoscale_compute_instance" "master_nodes" {
|
||||
for_each = exoscale_compute_instance.master
|
||||
|
||||
id = each.value.id
|
||||
|
||||
# Since private IP address is not assigned until the nics are created we need this
|
||||
depends_on = [exoscale_nic.master_private_network_nic]
|
||||
id = each.value.id
|
||||
zone = var.zone
|
||||
}
|
||||
|
||||
data "exoscale_compute" "worker_nodes" {
|
||||
for_each = exoscale_compute.worker
|
||||
data "exoscale_compute_instance" "worker_nodes" {
|
||||
for_each = exoscale_compute_instance.worker
|
||||
|
||||
id = each.value.id
|
||||
|
||||
# Since private IP address is not assigned until the nics are created we need this
|
||||
depends_on = [exoscale_nic.worker_private_network_nic]
|
||||
id = each.value.id
|
||||
zone = var.zone
|
||||
}
|
||||
|
||||
resource "exoscale_network" "private_network" {
|
||||
resource "exoscale_private_network" "private_network" {
|
||||
zone = var.zone
|
||||
name = "${var.prefix}-network"
|
||||
|
||||
|
@ -34,25 +30,29 @@ resource "exoscale_network" "private_network" {
|
|||
netmask = cidrnetmask(var.private_network_cidr)
|
||||
}
|
||||
|
||||
resource "exoscale_compute" "master" {
|
||||
resource "exoscale_compute_instance" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
display_name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.master_sg.name]
|
||||
name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_template.os_image[each.key].id
|
||||
type = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_group_ids = [exoscale_security_group.master_sg.id]
|
||||
network_interface {
|
||||
network_id = exoscale_private_network.private_network.id
|
||||
}
|
||||
elastic_ip_ids = [exoscale_elastic_ip.control_plane_lb.id]
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
|
@ -62,25 +62,29 @@ resource "exoscale_compute" "master" {
|
|||
)
|
||||
}
|
||||
|
||||
resource "exoscale_compute" "worker" {
|
||||
resource "exoscale_compute_instance" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
display_name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.worker_sg.name]
|
||||
name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_template.os_image[each.key].id
|
||||
type = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_group_ids = [exoscale_security_group.worker_sg.id]
|
||||
network_interface {
|
||||
network_id = exoscale_private_network.private_network.id
|
||||
}
|
||||
elastic_ip_ids = [exoscale_elastic_ip.ingress_controller_lb.id]
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
|
@ -90,41 +94,33 @@ resource "exoscale_compute" "worker" {
|
|||
)
|
||||
}
|
||||
|
||||
resource "exoscale_nic" "master_private_network_nic" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
compute_id = each.value.id
|
||||
network_id = exoscale_network.private_network.id
|
||||
}
|
||||
|
||||
resource "exoscale_nic" "worker_private_network_nic" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
compute_id = each.value.id
|
||||
network_id = exoscale_network.private_network.id
|
||||
}
|
||||
|
||||
resource "exoscale_security_group" "master_sg" {
|
||||
name = "${var.prefix}-master-sg"
|
||||
description = "Security group for Kubernetes masters"
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rules" "master_sg_rules" {
|
||||
resource "exoscale_security_group_rule" "master_sg_rule_ssh" {
|
||||
security_group_id = exoscale_security_group.master_sg.id
|
||||
|
||||
for_each = toset(var.ssh_whitelist)
|
||||
# SSH
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.ssh_whitelist
|
||||
ports = ["22"]
|
||||
}
|
||||
type = "INGRESS"
|
||||
start_port = 22
|
||||
end_port = 22
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rule" "master_sg_rule_k8s_api" {
|
||||
security_group_id = exoscale_security_group.master_sg.id
|
||||
|
||||
for_each = toset(var.api_server_whitelist)
|
||||
# Kubernetes API
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.api_server_whitelist
|
||||
ports = ["6443"]
|
||||
}
|
||||
type = "INGRESS"
|
||||
start_port = 6443
|
||||
end_port = 6443
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_security_group" "worker_sg" {
|
||||
|
@ -132,62 +128,64 @@ resource "exoscale_security_group" "worker_sg" {
|
|||
description = "security group for kubernetes worker nodes"
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rules" "worker_sg_rules" {
|
||||
resource "exoscale_security_group_rule" "worker_sg_rule_ssh" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
|
||||
# SSH
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.ssh_whitelist
|
||||
ports = ["22"]
|
||||
}
|
||||
for_each = toset(var.ssh_whitelist)
|
||||
type = "INGRESS"
|
||||
start_port = 22
|
||||
end_port = 22
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rule" "worker_sg_rule_http" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
|
||||
# HTTP(S)
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = ["0.0.0.0/0"]
|
||||
ports = ["80", "443"]
|
||||
}
|
||||
for_each = toset(["80", "443"])
|
||||
type = "INGRESS"
|
||||
start_port = each.value
|
||||
end_port = each.value
|
||||
protocol = "TCP"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
# Kubernetes Nodeport
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.nodeport_whitelist
|
||||
ports = ["30000-32767"]
|
||||
|
||||
resource "exoscale_security_group_rule" "worker_sg_rule_nodeport" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
|
||||
# HTTP(S)
|
||||
for_each = toset(var.nodeport_whitelist)
|
||||
type = "INGRESS"
|
||||
start_port = 30000
|
||||
end_port = 32767
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_elastic_ip" "ingress_controller_lb" {
|
||||
zone = var.zone
|
||||
healthcheck {
|
||||
mode = "http"
|
||||
port = 80
|
||||
uri = "/healthz"
|
||||
interval = 10
|
||||
timeout = 2
|
||||
strikes_ok = 2
|
||||
strikes_fail = 3
|
||||
}
|
||||
}
|
||||
|
||||
resource "exoscale_ipaddress" "ingress_controller_lb" {
|
||||
zone = var.zone
|
||||
healthcheck_mode = "http"
|
||||
healthcheck_port = 80
|
||||
healthcheck_path = "/healthz"
|
||||
healthcheck_interval = 10
|
||||
healthcheck_timeout = 2
|
||||
healthcheck_strikes_ok = 2
|
||||
healthcheck_strikes_fail = 3
|
||||
}
|
||||
|
||||
resource "exoscale_secondary_ipaddress" "ingress_controller_lb" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
}
|
||||
|
||||
resource "exoscale_ipaddress" "control_plane_lb" {
|
||||
zone = var.zone
|
||||
healthcheck_mode = "tcp"
|
||||
healthcheck_port = 6443
|
||||
healthcheck_interval = 10
|
||||
healthcheck_timeout = 2
|
||||
healthcheck_strikes_ok = 2
|
||||
healthcheck_strikes_fail = 3
|
||||
}
|
||||
|
||||
resource "exoscale_secondary_ipaddress" "control_plane_lb" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
resource "exoscale_elastic_ip" "control_plane_lb" {
|
||||
zone = var.zone
|
||||
healthcheck {
|
||||
mode = "tcp"
|
||||
port = 6443
|
||||
interval = 10
|
||||
timeout = 2
|
||||
strikes_ok = 2
|
||||
strikes_fail = 3
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
output "master_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in exoscale_compute.master :
|
||||
for key, instance in exoscale_compute_instance.master :
|
||||
instance.name => {
|
||||
"private_ip" = contains(keys(data.exoscale_compute.master_nodes), key) ? data.exoscale_compute.master_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute.master[key].ip_address
|
||||
"private_ip" = contains(keys(data.exoscale_compute_instance.master_nodes), key) ? data.exoscale_compute_instance.master_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute_instance.master[key].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in exoscale_compute.worker :
|
||||
for key, instance in exoscale_compute_instance.worker :
|
||||
instance.name => {
|
||||
"private_ip" = contains(keys(data.exoscale_compute.worker_nodes), key) ? data.exoscale_compute.worker_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute.worker[key].ip_address
|
||||
"private_ip" = contains(keys(data.exoscale_compute_instance.worker_nodes), key) ? data.exoscale_compute_instance.worker_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute_instance.worker[key].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -23,9 +23,9 @@ output "cluster_private_network_cidr" {
|
|||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
value = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
value = exoscale_elastic_ip.control_plane_lb.ip_address
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
exoscale = {
|
||||
source = "exoscale/exoscale"
|
||||
source = "exoscale/exoscale"
|
||||
version = ">= 0.21"
|
||||
}
|
||||
}
|
||||
|
|
12
docs/ci.md
12
docs/ci.md
|
@ -12,8 +12,8 @@ centos7 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: |
|
|||
debian10 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||
fedora36 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||
fedora38 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
@ -32,8 +32,8 @@ centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|||
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora36 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora38 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
@ -52,8 +52,8 @@ centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|||
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora35 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora36 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
fedora37 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora38 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
opensuse | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
|
|
@ -24,7 +24,7 @@ platforms:
|
|||
- kube_node
|
||||
- k8s_cluster
|
||||
- name: fedora
|
||||
box: fedora/36-cloud-base
|
||||
box: fedora/38-cloud-base
|
||||
cpus: 2
|
||||
memory: 2048
|
||||
groups:
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
value: "{{ nameserverentries }}"
|
||||
mode: '0600'
|
||||
backup: yes
|
||||
when:
|
||||
- nameserverentries != "127.0.0.53" or systemd_resolved_enabled.rc != 0
|
||||
notify: Preinstall | update resolvconf for networkmanager
|
||||
|
||||
- name: set default dns if remove_default_searchdomains is false
|
||||
|
|
|
@ -2,18 +2,18 @@
|
|||
# A naive premoderation script to allow Gitlab CI pipeline on a specific PRs' comment
|
||||
# Exits with 0, if the pipeline is good to go
|
||||
# Exits with 1, if the user is not allowed to start pipeline
|
||||
# Exits with 2, if script is unable to get issue id from CI_BUILD_REF_NAME variable
|
||||
# Exits with 2, if script is unable to get issue id from CI_COMMIT_REF_NAME variable
|
||||
# Exits with 3, if missing the magic comment in the pipeline to start the pipeline
|
||||
|
||||
CURL_ARGS="-fs --retry 4 --retry-delay 5"
|
||||
MAGIC="${MAGIC:-ci check this}"
|
||||
exit_code=0
|
||||
|
||||
# Get PR number from CI_BUILD_REF_NAME
|
||||
issue=$(echo ${CI_BUILD_REF_NAME} | perl -ne '/^pr-(\d+)-\S+$/ && print $1')
|
||||
# Get PR number from CI_COMMIT_REF_NAME
|
||||
issue=$(echo ${CI_COMMIT_REF_NAME} | perl -ne '/^pr-(\d+)-\S+$/ && print $1')
|
||||
|
||||
if [ "$issue" = "" ]; then
|
||||
echo "Unable to get issue id from: $CI_BUILD_REF_NAME"
|
||||
echo "Unable to get issue id from: $CI_COMMIT_REF_NAME"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
|
|
|
@ -34,17 +34,17 @@ images:
|
|||
converted: false
|
||||
tag: "latest"
|
||||
|
||||
fedora-35:
|
||||
filename: Fedora-Cloud-Base-35-1.2.x86_64.qcow2
|
||||
url: https://download.fedoraproject.org/pub/fedora/linux/releases/35/Cloud/x86_64/images/Fedora-Cloud-Base-35-1.2.x86_64.qcow2
|
||||
checksum: sha256:fe84502779b3477284a8d4c86731f642ca10dd3984d2b5eccdf82630a9ca2de6
|
||||
fedora-37:
|
||||
filename: Fedora-Cloud-Base-37-1.7.x86_64.qcow2
|
||||
url: https://download.fedoraproject.org/pub/fedora/linux/releases/37/Cloud/x86_64/images/Fedora-Cloud-Base-37-1.7.x86_64.qcow2
|
||||
checksum: sha256:b5b9bec91eee65489a5745f6ee620573b23337cbb1eb4501ce200b157a01f3a0
|
||||
converted: true
|
||||
tag: "latest"
|
||||
|
||||
fedora-36:
|
||||
filename: Fedora-Cloud-Base-36-1.5.x86_64.qcow2
|
||||
url: https://download.fedoraproject.org/pub/fedora/linux/releases/36/Cloud/x86_64/images/Fedora-Cloud-Base-36-1.5.x86_64.qcow2
|
||||
checksum: sha256:ca9e514cc2f4a7a0188e7c68af60eb4e573d2e6850cc65b464697223f46b4605
|
||||
fedora-38:
|
||||
filename: Fedora-Cloud-Base-38-1.6.x86_64.qcow2
|
||||
url: https://download.fedoraproject.org/pub/fedora/linux/releases/38/Cloud/x86_64/images/Fedora-Cloud-Base-38-1.6.x86_64.qcow2
|
||||
checksum: sha256:d334670401ff3d5b4129fcc662cf64f5a6e568228af59076cc449a4945318482
|
||||
converted: true
|
||||
tag: "latest"
|
||||
|
||||
|
@ -57,8 +57,8 @@ images:
|
|||
|
||||
centos-7:
|
||||
filename: CentOS-7-x86_64-GenericCloud-2009.qcow2
|
||||
url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-2009.qcow2
|
||||
checksum: sha256:e38bab0475cc6d004d2e17015969c659e5a308111851b0e2715e84646035bdd3
|
||||
url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-2211.qcow2
|
||||
checksum: sha256:284aab2b23d91318f169ff464bce4d53404a15a0618ceb34562838c59af4adea
|
||||
converted: true
|
||||
tag: "latest"
|
||||
|
||||
|
|
|
@ -31,8 +31,8 @@ cloud_init:
|
|||
debian-9: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
|
||||
debian-10: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
|
||||
debian-11: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
|
||||
fedora-35: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IGZlZG9yYQp1c2VyczoKIC0gbmFtZToga3ViZXNwcmF5CiAgIGdyb3Vwczogd2hlZWwKICAgc3VkbzogJ0FMTD0oQUxMKSBOT1BBU1NXRDpBTEwnCiAgIHNoZWxsOiAvYmluL2Jhc2gKICAgbG9ja19wYXNzd2Q6IEZhbHNlCiAgIGhvbWU6IC9ob21lL2t1YmVzcHJheQogICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgIC0gc3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDYW5UaS9lS3gwK3RIWUpBZURocStzRlMyT2JVUDEvSTY5ZjdpVjNVdGtLbFQyMEpmVzFmNkZlWHQvMDRWZjI3V1FxK05xczZ2R0JxRDlRWFNZdWYrdDAvczdFUExqVGVpOW1lMW1wcXIrdVRlK0tEdFRQMzlwZkQzL2VWQ2FlQjcyNkdQMkZrYUQwRnpwbUViNjZPM05xaHhPUTk2R3gvOVhUdXcvSzNsbGo0T1ZENkdyalIzQjdjNFh0RUJzWmNacHBNSi9vSDFtR3lHWGRoMzFtV1FTcUFSTy9QOFU4R3d0MCtIR3BVd2gvaGR5M3QrU1lvVEIyR3dWYjB6b3lWd3RWdmZEUXpzbThmcTNhdjRLdmV6OGtZdU5ESnYwNXg0bHZVWmdSMTVaRFJYc0FuZGhReXFvWGRDTEFlMCtlYUtYcTlCa1d4S0ZiOWhQZTBBVWpqYTU="
|
||||
fedora-36: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IGZlZG9yYQp1c2VyczoKIC0gbmFtZToga3ViZXNwcmF5CiAgIGdyb3Vwczogd2hlZWwKICAgc3VkbzogJ0FMTD0oQUxMKSBOT1BBU1NXRDpBTEwnCiAgIHNoZWxsOiAvYmluL2Jhc2gKICAgbG9ja19wYXNzd2Q6IEZhbHNlCiAgIGhvbWU6IC9ob21lL2t1YmVzcHJheQogICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgIC0gc3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDYW5UaS9lS3gwK3RIWUpBZURocStzRlMyT2JVUDEvSTY5ZjdpVjNVdGtLbFQyMEpmVzFmNkZlWHQvMDRWZjI3V1FxK05xczZ2R0JxRDlRWFNZdWYrdDAvczdFUExqVGVpOW1lMW1wcXIrdVRlK0tEdFRQMzlwZkQzL2VWQ2FlQjcyNkdQMkZrYUQwRnpwbUViNjZPM05xaHhPUTk2R3gvOVhUdXcvSzNsbGo0T1ZENkdyalIzQjdjNFh0RUJzWmNacHBNSi9vSDFtR3lHWGRoMzFtV1FTcUFSTy9QOFU4R3d0MCtIR3BVd2gvaGR5M3QrU1lvVEIyR3dWYjB6b3lWd3RWdmZEUXpzbThmcTNhdjRLdmV6OGtZdU5ESnYwNXg0bHZVWmdSMTVaRFJYc0FuZGhReXFvWGRDTEFlMCtlYUtYcTlCa1d4S0ZiOWhQZTBBVWpqYTU="
|
||||
fedora-37: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IGZlZG9yYQp1c2VyczoKIC0gbmFtZToga3ViZXNwcmF5CiAgIGdyb3Vwczogd2hlZWwKICAgc3VkbzogJ0FMTD0oQUxMKSBOT1BBU1NXRDpBTEwnCiAgIHNoZWxsOiAvYmluL2Jhc2gKICAgbG9ja19wYXNzd2Q6IEZhbHNlCiAgIGhvbWU6IC9ob21lL2t1YmVzcHJheQogICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgIC0gc3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDYW5UaS9lS3gwK3RIWUpBZURocStzRlMyT2JVUDEvSTY5ZjdpVjNVdGtLbFQyMEpmVzFmNkZlWHQvMDRWZjI3V1FxK05xczZ2R0JxRDlRWFNZdWYrdDAvczdFUExqVGVpOW1lMW1wcXIrdVRlK0tEdFRQMzlwZkQzL2VWQ2FlQjcyNkdQMkZrYUQwRnpwbUViNjZPM05xaHhPUTk2R3gvOVhUdXcvSzNsbGo0T1ZENkdyalIzQjdjNFh0RUJzWmNacHBNSi9vSDFtR3lHWGRoMzFtV1FTcUFSTy9QOFU4R3d0MCtIR3BVd2gvaGR5M3QrU1lvVEIyR3dWYjB6b3lWd3RWdmZEUXpzbThmcTNhdjRLdmV6OGtZdU5ESnYwNXg0bHZVWmdSMTVaRFJYc0FuZGhReXFvWGRDTEFlMCtlYUtYcTlCa1d4S0ZiOWhQZTBBVWpqYTU="
|
||||
fedora-38: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IGZlZG9yYQp1c2VyczoKIC0gbmFtZToga3ViZXNwcmF5CiAgIGdyb3Vwczogd2hlZWwKICAgc3VkbzogJ0FMTD0oQUxMKSBOT1BBU1NXRDpBTEwnCiAgIHNoZWxsOiAvYmluL2Jhc2gKICAgbG9ja19wYXNzd2Q6IEZhbHNlCiAgIGhvbWU6IC9ob21lL2t1YmVzcHJheQogICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgIC0gc3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDYW5UaS9lS3gwK3RIWUpBZURocStzRlMyT2JVUDEvSTY5ZjdpVjNVdGtLbFQyMEpmVzFmNkZlWHQvMDRWZjI3V1FxK05xczZ2R0JxRDlRWFNZdWYrdDAvczdFUExqVGVpOW1lMW1wcXIrdVRlK0tEdFRQMzlwZkQzL2VWQ2FlQjcyNkdQMkZrYUQwRnpwbUViNjZPM05xaHhPUTk2R3gvOVhUdXcvSzNsbGo0T1ZENkdyalIzQjdjNFh0RUJzWmNacHBNSi9vSDFtR3lHWGRoMzFtV1FTcUFSTy9QOFU4R3d0MCtIR3BVd2gvaGR5M3QrU1lvVEIyR3dWYjB6b3lWd3RWdmZEUXpzbThmcTNhdjRLdmV6OGtZdU5ESnYwNXg0bHZVWmdSMTVaRFJYc0FuZGhReXFvWGRDTEFlMCtlYUtYcTlCa1d4S0ZiOWhQZTBBVWpqYTU="
|
||||
opensuse-leap-15: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
|
||||
rhel-server-7: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo="
|
||||
amazon-linux-2: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo="
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image: fedora-35
|
||||
cloud_image: fedora-37
|
||||
mode: default
|
||||
|
||||
# Kubespray settings
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image: fedora-35
|
||||
cloud_image: fedora-37
|
||||
mode: default
|
||||
|
||||
# Kubespray settings
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image: fedora-35
|
||||
cloud_image: fedora-37
|
||||
mode: default
|
||||
|
||||
# Kubespray settings
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image: fedora-36
|
||||
cloud_image: fedora-38
|
||||
mode: default
|
||||
|
||||
# Kubespray settings
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image: fedora-36
|
||||
cloud_image: fedora-38
|
||||
mode: default
|
||||
|
||||
# Kubespray settings
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image: fedora-36
|
||||
cloud_image: fedora-38
|
||||
mode: default
|
||||
|
||||
# Kubespray settings
|
|
@ -1,6 +1,6 @@
|
|||
$num_instances = 2
|
||||
$vm_memory ||= 2048
|
||||
$os = "fedora35"
|
||||
$os = "fedora37"
|
||||
|
||||
$kube_master_instances = 1
|
||||
$etcd_instances = 1
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image: fedora-35
|
||||
cloud_image: fedora-37
|
||||
mode: default
|
||||
|
||||
# Kubespray settings
|
|
@ -1,4 +1,5 @@
|
|||
-r ../requirements-2.11.txt
|
||||
pyyaml==5.3.1
|
||||
yamllint==1.19.0
|
||||
apache-libcloud==2.2.1
|
||||
tox==3.11.1
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
-r ../requirements-2.12.txt
|
||||
pyyaml==5.3.1
|
||||
yamllint==1.19.0
|
||||
apache-libcloud==2.2.1
|
||||
tox==3.11.1
|
||||
|
|
|
@ -57,9 +57,9 @@ fi
|
|||
# Check out latest tag if testing upgrade
|
||||
test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout "$KUBESPRAY_VERSION"
|
||||
# Checkout the CI vars file so it is available
|
||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" ${CI_TEST_REGISTRY_MIRROR}
|
||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" ${CI_TEST_SETTING}
|
||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" tests/files/${CI_JOB_NAME}.yml
|
||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" ${CI_TEST_REGISTRY_MIRROR}
|
||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" ${CI_TEST_SETTING}
|
||||
|
||||
# Create cluster
|
||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml
|
||||
|
@ -68,7 +68,7 @@ ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGIS
|
|||
if [ "${UPGRADE_TEST}" != "false" ]; then
|
||||
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml"
|
||||
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml"
|
||||
git checkout "${CI_BUILD_REF}"
|
||||
git checkout "${CI_COMMIT_SHA}"
|
||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" $PLAYBOOK
|
||||
fi
|
||||
|
||||
|
|
Loading…
Reference in New Issue