CI: rework pipeline: short/extended based on labels (#11324)

* CI: reduce VM resources requests to improve scheduling

* CI: Reduce default jobs; add labels(ci-full/extended) to run more test

* CI: use jobs dependencies instead of stages

* precommit one-job

* CI: Use Kubevirt VM to run Molecule and Vagrant jobs
pull/11341/head
Antoine Legrand 2024-07-01 12:25:36 +02:00 committed by GitHub
parent ff18f65a17
commit a0587e0b8e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
30 changed files with 433 additions and 335 deletions

View File

@ -1,12 +1,9 @@
---
stages:
- build
- unit-tests
- moderator
- test
- deploy-part1
- deploy-part2
- deploy-part3
- deploy-special
- deploy-extended
variables:
KUBESPRAY_VERSION: v2.25.0
@ -43,15 +40,26 @@ before_script:
.job: &job
tags:
- packet
- ffci
image: $PIPELINE_IMAGE
artifacts:
when: always
paths:
- cluster-dump/
needs:
- pipeline-image
.job-moderated:
extends: .job
needs:
- pipeline-image
- ci-not-authorized
- check-galaxy-version # lint
- pre-commit # lint
- vagrant-validate # lint
.testcases: &testcases
<<: *job
extends: .job-moderated
retry: 1
interruptible: true
before_script:
@ -61,12 +69,12 @@ before_script:
script:
- ./tests/scripts/testcases_run.sh
after_script:
- chronic ./tests/scripts/testcases_cleanup.sh
- ./tests/scripts/testcases_cleanup.sh
# For failfast, at least 1 job must be defined in .gitlab-ci.yml
# Premoderated with manual actions
ci-not-authorized:
stage: moderator
stage: build
before_script: []
after_script: []
rules:
@ -87,8 +95,8 @@ ci-not-authorized:
script:
- exit $CI_OK_TO_TEST
tags:
- light
- ffci
needs: []
include:
- .gitlab-ci/build.yml

View File

@ -5,7 +5,7 @@
paths:
- image-cache
tags:
- packet
- ffci
stage: build
image:
name: gcr.io/kaniko-project/executor:debug

View File

@ -1,40 +1,35 @@
---
generate-pre-commit:
image: 'mikefarah/yq@sha256:bcb889a1f9bdb0613c8a054542d02360c2b1b35521041be3e1bd8fbd0534d411'
stage: build
before_script: []
pre-commit:
stage: test
tags:
- ffci
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:aaf2c7b38b22286f2d381c11673bec571c28f61dd086d11b43a1c9444a813cef'
variables:
PRE_COMMIT_HOME: /pre-commit-cache
script:
- >
yq -r < .pre-commit-config.yaml '.repos[].hooks[].id' |
sed 's/^/ - /' |
cat .gitlab-ci/pre-commit-dynamic-stub.yml - > pre-commit-generated.yml
artifacts:
- pre-commit run --all-files
cache:
key: pre-commit-all
paths:
- pre-commit-generated.yml
run-pre-commit:
stage: unit-tests
trigger:
include:
- artifact: pre-commit-generated.yml
job: generate-pre-commit
strategy: depend
- /pre-commit-cache
needs: []
vagrant-validate:
extends: .job
stage: unit-tests
tags: [light]
stage: test
tags: [ffci]
variables:
VAGRANT_VERSION: 2.3.7
script:
- ./tests/scripts/vagrant-validate.sh
- ./tests/scripts/vagrant-validate.sh
except: ['triggers', 'master']
# TODO: convert to pre-commit hook
check-galaxy-version:
stage: unit-tests
tags: [light]
needs: []
stage: test
tags: [ffci]
image: python:3
script:
- tests/scripts/check_galaxy_version.sh
- tests/scripts/check_galaxy_version.sh

View File

@ -1,30 +1,42 @@
---
.molecule:
tags: [c3.small.x86]
tags: [ffci-vm-med]
only: [/^pr-.*$/]
except: ['triggers']
image: $PIPELINE_IMAGE
image: quay.io/kubespray/vm-kubespray-ci:v6
services: []
stage: deploy-part1
needs: []
# - ci-not-authorized
variables:
VAGRANT_DEFAULT_PROVIDER: "libvirt"
before_script:
- tests/scripts/rebase.sh
- ./tests/scripts/vagrant_clean.sh
- groups
- python3 -m venv citest
- source citest/bin/activate
- vagrant plugin expunge --reinstall --force --no-tty
- vagrant plugin install vagrant-libvirt
- pip install --no-compile --no-cache-dir pip -U
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/requirements.txt
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt
- ./tests/scripts/rebase.sh
- ./tests/scripts/vagrant_clean.sh
script:
- ./tests/scripts/molecule_run.sh
- ./tests/scripts/molecule_run.sh
after_script:
- chronic ./tests/scripts/molecule_logs.sh
- ./tests/scripts/molecule_logs.sh
artifacts:
when: always
paths:
- molecule_logs/
- molecule_logs/
# CI template for periodic CI jobs
# Enabled when PERIODIC_CI_ENABLED var is set
.molecule_periodic:
only:
variables:
- $PERIODIC_CI_ENABLED
- $PERIODIC_CI_ENABLED
allow_failure: true
extends: .molecule
@ -34,50 +46,50 @@ molecule_full:
molecule_no_container_engines:
extends: .molecule
script:
- ./tests/scripts/molecule_run.sh -e container-engine
- ./tests/scripts/molecule_run.sh -e container-engine
when: on_success
molecule_docker:
extends: .molecule
script:
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
when: on_success
molecule_containerd:
extends: .molecule
script:
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
when: on_success
molecule_cri-o:
extends: .molecule
stage: deploy-part2
stage: deploy-part1
script:
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
allow_failure: true
when: on_success
# Stage 3 container engines don't get as much attention so allow them to fail
molecule_kata:
extends: .molecule
stage: deploy-part3
script:
- ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
when: manual
# FIXME: this test is broken (perma-failing)
# # Stage 3 container engines don't get as much attention so allow them to fail
# molecule_kata:
# extends: .molecule
# stage: deploy-extended
# script:
# - ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
# when: manual
# # FIXME: this test is broken (perma-failing)
molecule_gvisor:
extends: .molecule
stage: deploy-part3
stage: deploy-extended
script:
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
when: manual
# FIXME: this test is broken (perma-failing)
molecule_youki:
extends: .molecule
stage: deploy-part3
stage: deploy-extended
script:
- ./tests/scripts/molecule_run.sh -i container-engine/youki
- ./tests/scripts/molecule_run.sh -i container-engine/youki
when: manual
# FIXME: this test is broken (perma-failing)

View File

@ -6,14 +6,56 @@
CI_PLATFORM: packet
SSH_USER: kubespray
tags:
- packet
except: [triggers]
- ffci
needs:
- pipeline-image
- ci-not-authorized
# CI template for PRs
.packet_pr:
only: [/^pr-.*$/]
stage: deploy-part1
rules:
- if: $PR_LABELS =~ /.*ci-short.*/
when: manual
allow_failure: true
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
when: on_success
- when: manual
allow_failure: true
extends: .packet
## Uncomment this to have multiple stages
# needs:
# - packet_ubuntu20-calico-all-in-one
.packet_pr_short:
stage: deploy-part1
extends: .packet
rules:
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
when: on_success
- when: manual
allow_failure: true
.packet_pr_manual:
extends: .packet_pr
stage: deploy-extended
rules:
- if: $PR_LABELS =~ /.*ci-full.*/
when: on_success
# Else run as manual
- when: manual
allow_failure: true
.packet_pr_extended:
extends: .packet_pr
stage: deploy-extended
rules:
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
when: on_success
- when: manual
allow_failure: true
# CI template for periodic CI jobs
# Enabled when PERIODIC_CI_ENABLED var is set
.packet_periodic:
@ -34,118 +76,87 @@ packet_cleanup_old:
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
packet_ubuntu20-calico-all-in-one:
stage: deploy-part1
extends: .packet_pr
when: on_success
extends: .packet_pr_short
variables:
RESET_CHECK: "true"
# ### PR JOBS PART2
packet_ubuntu20-all-in-one-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu20-calico-all-in-one-hardening:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu22-all-in-one-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu20-crio:
extends: .packet_pr_manual
packet_ubuntu22-calico-all-in-one:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu24-all-in-one-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu24-calico-all-in-one:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu24-calico-etcd-datastore:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_centos7-flannel-addons-ha:
extends: .packet_pr
stage: deploy-part2
when: on_success
packet_almalinux8-crio:
extends: .packet_pr
stage: deploy-part2
when: on_success
allow_failure: true
packet_ubuntu20-crio:
packet_almalinux8-kube-ovn:
extends: .packet_pr
stage: deploy-part2
when: manual
packet_fedora37-crio:
extends: .packet_pr
stage: deploy-part2
when: manual
packet_ubuntu20-flannel-ha:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian10-cilium-svc-proxy:
stage: deploy-part2
extends: .packet_periodic
when: on_success
packet_debian10-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian10-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian11-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian11-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian12-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian12-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian12-cilium:
stage: deploy-part2
extends: .packet_periodic
when: on_success
extends: .packet_pr
packet_rockylinux8-calico:
extends: .packet_pr
packet_rockylinux9-cilium:
extends: .packet_pr
variables:
RESET_CHECK: "true"
packet_amazon-linux-2-all-in-one:
extends: .packet_pr
packet_fedora38-docker-weave:
extends: .packet_pr
allow_failure: true
packet_opensuse-docker-cilium:
extends: .packet_pr
packet_ubuntu20-cilium-sep:
extends: .packet_pr
## Extended
packet_debian11-docker:
extends: .packet_pr_extended
packet_debian12-docker:
extends: .packet_pr_extended
packet_debian12-calico:
extends: .packet_pr_extended
packet_almalinux8-calico-remove-node:
extends: .packet_pr_extended
variables:
REMOVE_NODE_CHECK: "true"
REMOVE_NODE_NAME: "instance-3"
packet_rockylinux9-calico:
extends: .packet_pr_extended
packet_almalinux8-calico:
extends: .packet_pr_extended
packet_almalinux8-docker:
extends: .packet_pr_extended
packet_centos7-calico-ha-once-localhost:
stage: deploy-part2
extends: .packet_pr
when: on_success
extends: .packet_pr_extended
variables:
# This will instruct Docker not to start over TLS.
DOCKER_TLS_CERTDIR: ""
@ -153,196 +164,127 @@ packet_centos7-calico-ha-once-localhost:
services:
- docker:26.1.4-dind
packet_almalinux8-kube-ovn:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_centos7-flannel-addons-ha:
extends: .packet_pr_extended
packet_almalinux8-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian10-calico:
extends: .packet_pr_extended
packet_rockylinux8-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu20-calico-all-in-one-hardening:
extends: .packet_pr_extended
packet_rockylinux9-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu24-calico-all-in-one:
extends: .packet_pr_extended
packet_rockylinux9-cilium:
stage: deploy-part2
extends: .packet_pr
when: on_success
variables:
RESET_CHECK: "true"
packet_ubuntu20-calico-etcd-kubeadm:
extends: .packet_pr_extended
packet_almalinux8-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu24-all-in-one-docker:
extends: .packet_pr_extended
packet_amazon-linux-2-all-in-one:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_fedora38-docker-weave:
stage: deploy-part2
extends: .packet_pr
when: on_success
allow_failure: true
packet_opensuse-docker-cilium:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu22-all-in-one-docker:
extends: .packet_pr_extended
# ### MANUAL JOBS
packet_centos7-calico-ha:
extends: .packet_pr_manual
packet_fedora37-crio:
extends: .packet_pr_manual
packet_ubuntu20-flannel-ha:
extends: .packet_pr_manual
packet_ubuntu20-all-in-one-docker:
extends: .packet_pr_manual
packet_ubuntu20-docker-weave-sep:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_ubuntu20-cilium-sep:
stage: deploy-special
extends: .packet_pr
when: manual
extends: .packet_pr_manual
packet_ubuntu20-flannel-ha-once:
stage: deploy-part2
extends: .packet_pr
when: manual
extends: .packet_pr_manual
packet_fedora37-calico-swap-selinux:
extends: .packet_pr_manual
# Calico HA eBPF
packet_almalinux8-calico-ha-ebpf:
stage: deploy-part2
extends: .packet_pr
when: manual
extends: .packet_pr_manual
packet_almalinux8-calico-nodelocaldns-secondary:
extends: .packet_pr_manual
packet_debian10-macvlan:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_centos7-calico-ha:
stage: deploy-part2
extends: .packet_pr
when: manual
extends: .packet_pr_manual
packet_centos7-multus-calico:
stage: deploy-part2
extends: .packet_pr
when: manual
extends: .packet_pr_manual
packet_debian11-custom-cni:
extends: .packet_pr_manual
packet_debian11-kubelet-csr-approver:
extends: .packet_pr_manual
packet_debian12-custom-cni-helm:
extends: .packet_pr_manual
packet_ubuntu20-calico-ha-wireguard:
extends: .packet_pr_manual
# PERIODIC
packet_fedora38-docker-calico:
stage: deploy-part2
stage: deploy-extended
extends: .packet_periodic
when: on_success
variables:
RESET_CHECK: "true"
packet_fedora37-calico-selinux:
stage: deploy-part2
stage: deploy-extended
extends: .packet_periodic
when: on_success
packet_fedora37-calico-swap-selinux:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_almalinux8-calico-nodelocaldns-secondary:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_fedora38-kube-ovn:
stage: deploy-part2
stage: deploy-extended
extends: .packet_periodic
when: on_success
packet_debian11-custom-cni:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian11-kubelet-csr-approver:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian12-custom-cni-helm:
stage: deploy-part2
extends: .packet_pr
when: manual
# ### PR JOBS PART3
# Long jobs (45min+)
packet_centos7-weave-upgrade-ha:
stage: deploy-part3
stage: deploy-extended
extends: .packet_periodic
when: on_success
variables:
UPGRADE_TEST: basic
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha:
stage: deploy-part3
stage: deploy-extended
extends: .packet_periodic
when: on_success
variables:
UPGRADE_TEST: basic
# Calico HA Wireguard
packet_ubuntu20-calico-ha-wireguard:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian11-calico-upgrade:
stage: deploy-part3
extends: .packet_pr
when: on_success
variables:
UPGRADE_TEST: graceful
packet_almalinux8-calico-remove-node:
stage: deploy-part3
extends: .packet_pr
when: on_success
variables:
REMOVE_NODE_CHECK: "true"
REMOVE_NODE_NAME: "instance-3"
packet_ubuntu20-calico-etcd-kubeadm:
stage: deploy-part3
extends: .packet_pr
when: on_success
packet_debian11-calico-upgrade-once:
stage: deploy-part3
stage: deploy-extended
extends: .packet_periodic
when: on_success
variables:
UPGRADE_TEST: graceful
packet_ubuntu20-calico-ha-recover:
stage: deploy-part3
stage: deploy-extended
extends: .packet_periodic
when: on_success
variables:
RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
packet_ubuntu20-calico-ha-recover-noquorum:
stage: deploy-part3
stage: deploy-extended
extends: .packet_periodic
when: on_success
variables:
RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:]:kube_control_plane[1:]"
packet_debian10-cilium-svc-proxy:
stage: deploy-extended
extends: .packet_periodic
packet_debian11-calico-upgrade:
stage: deploy-extended
extends: .packet_periodic
variables:
UPGRADE_TEST: graceful

View File

@ -7,7 +7,7 @@ pre-commit:
variables:
PRE_COMMIT_HOME: /pre-commit-cache
script:
- pre-commit run -a $HOOK_ID
- pre-commit run --all-files
cache:
key: pre-commit-$HOOK_ID
paths:

View File

@ -2,6 +2,10 @@
# Tests for contrib/terraform/
.terraform_install:
extends: .job
needs:
- ci-not-authorized
- pipeline-image
stage: deploy-part1
before_script:
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
- ./tests/scripts/rebase.sh
@ -24,17 +28,19 @@
.terraform_validate:
extends: .terraform_install
stage: unit-tests
tags: [light]
tags: [ffci]
only: ['master', /^pr-.*$/]
script:
- terraform -chdir="contrib/terraform/$PROVIDER" validate
- terraform -chdir="contrib/terraform/$PROVIDER" fmt -check -diff
stage: test
needs:
- pipeline-image
.terraform_apply:
extends: .terraform_install
tags: [light]
stage: deploy-part3
tags: [ffci]
stage: deploy-extended
when: manual
only: [/^pr-.*$/]
artifacts:
@ -51,7 +57,7 @@
- tests/scripts/testcases_run.sh
after_script:
# Cleanup regardless of exit code
- chronic ./tests/scripts/testcases_cleanup.sh
- ./tests/scripts/testcases_cleanup.sh
tf-validate-openstack:
extends: .terraform_validate
@ -146,8 +152,7 @@ tf-validate-nifcloud:
TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df"
tf-elastx_cleanup:
stage: unit-tests
tags: [light]
tags: [ffci]
image: python
variables:
<<: *elastx_variables
@ -155,10 +160,11 @@ tf-elastx_cleanup:
- pip install -r scripts/openstack-cleanup/requirements.txt
script:
- ./scripts/openstack-cleanup/main.py
allow_failure: true
tf-elastx_ubuntu20-calico:
extends: .terraform_apply
stage: deploy-part3
stage: deploy-part1
when: on_success
allow_failure: true
variables:

View File

@ -1,64 +1,73 @@
---
.vagrant:
extends: .testcases
needs:
- ci-not-authorized
variables:
CI_PLATFORM: "vagrant"
SSH_USER: "vagrant"
VAGRANT_DEFAULT_PROVIDER: "libvirt"
KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb
tags: [c3.small.x86]
only: [/^pr-.*$/]
except: ['triggers']
image: $PIPELINE_IMAGE
DOCKER_NAME: vagrant
VAGRANT_ANSIBLE_TAGS: facts
tags: [ffci-vm-large]
# only: [/^pr-.*$/]
# except: ['triggers']
image: quay.io/kubespray/vm-kubespray-ci:v6
services: []
before_script:
- echo $USER
- python3 -m venv citest
- source citest/bin/activate
- vagrant plugin expunge --reinstall --force --no-tty
- vagrant plugin install vagrant-libvirt
- pip install --no-compile --no-cache-dir pip -U
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/requirements.txt
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt
- ./tests/scripts/vagrant_clean.sh
script:
- ./tests/scripts/testcases_run.sh
after_script:
- chronic ./tests/scripts/testcases_cleanup.sh
vagrant_ubuntu20-calico-dual-stack:
stage: deploy-part2
stage: deploy-extended
extends: .vagrant
when: manual
# FIXME: this test if broken (perma-failing)
vagrant_ubuntu20-weave-medium:
stage: deploy-part2
stage: deploy-extended
extends: .vagrant
when: manual
vagrant_ubuntu20-flannel:
stage: deploy-part2
stage: deploy-part1
extends: .vagrant
when: on_success
allow_failure: false
vagrant_ubuntu20-flannel-collection:
stage: deploy-part2
stage: deploy-extended
extends: .vagrant
when: on_success
when: manual
vagrant_ubuntu20-kube-router-sep:
stage: deploy-part2
stage: deploy-extended
extends: .vagrant
when: manual
# Service proxy test fails connectivity testing
vagrant_ubuntu20-kube-router-svc-proxy:
stage: deploy-part2
stage: deploy-extended
extends: .vagrant
when: manual
vagrant_fedora37-kube-router:
stage: deploy-part2
stage: deploy-extended
extends: .vagrant
when: manual
# FIXME: this test if broken (perma-failing)
vagrant_centos7-kube-router:
stage: deploy-part2
stage: deploy-extended
extends: .vagrant
when: manual

1
Vagrantfile vendored
View File

@ -278,6 +278,7 @@ Vagrant.configure("2") do |config|
"local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
"local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}",
"ansible_ssh_user": SUPPORTED_OS[$os][:user],
"ansible_ssh_private_key_file": File.join(Dir.home, ".vagrant.d", "insecure_private_key"),
"unsafe_show_logs": "True"
}

View File

@ -5,8 +5,8 @@
1. build: build a docker image to be used in the pipeline
2. unit-tests: fast jobs for fast feedback (linting, etc...)
3. deploy-part1: small number of jobs to test if the PR works with default settings
4. deploy-part2: slow jobs testing different platforms, OS, settings, CNI, etc...
5. deploy-part3: very slow jobs (upgrades, etc...)
4. deploy-extended: slow jobs testing different platforms, OS, settings, CNI, etc...
5. deploy-extended: very slow jobs (upgrades, etc...)
## Runners

View File

@ -4,7 +4,7 @@ FROM ubuntu:jammy-20230308
# Pip needs this as well at the moment to install ansible
# (and potentially other packages)
# See: https://github.com/pypa/pip/issues/10219
ENV VAGRANT_VERSION=2.3.7 \
ENV VAGRANT_VERSION=2.4.1 \
VAGRANT_DEFAULT_PROVIDER=libvirt \
VAGRANT_ANSIBLE_TAGS=facts \
LANG=C.UTF-8 \
@ -31,6 +31,8 @@ RUN apt update -q \
unzip \
libvirt-clients \
qemu-utils \
qemu-kvm \
dnsmasq \
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
&& add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" \
&& apt update -q \
@ -46,6 +48,7 @@ ADD ./roles/kubespray-defaults/defaults/main/main.yml /kubespray/roles/kubespray
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
&& pip install --no-compile --no-cache-dir pip -U \
&& pip install --no-compile --no-cache-dir -r tests/requirements.txt \
&& pip install --no-compile --no-cache-dir -r requirements.txt \
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main/main.yml) \
&& curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
&& echo $(curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \

View File

@ -35,8 +35,7 @@ cleanup-packet:
create-vagrant:
vagrant up
find / -name vagrant_ansible_inventory
cp /builds/kargo-ci/kubernetes-sigs-kubespray/inventory/sample/vagrant_ansible_inventory $(INVENTORY)
cp $(CI_PROJECT_DIR)/inventory/sample/vagrant_ansible_inventory $(INVENTORY)
delete-vagrant:
vagrant destroy -f

View File

@ -4,14 +4,13 @@
vm_cpu_cores: 2
vm_cpu_sockets: 1
vm_cpu_threads: 2
vm_memory: 2048Mi
vm_memory: 2048
# Replace invalid characters so that we can use the branch name in kubernetes labels
branch_name_sane: "{{ branch | regex_replace('/', '-') }}"
# Request/Limit allocation settings
cpu_allocation_ratio: 0.5
cpu_allocation_ratio: 0.25
memory_allocation_ratio: 1
# Default path for inventory

View File

@ -4,6 +4,8 @@ kind: VirtualMachine
metadata:
name: "instance-{{ vm_id }}"
namespace: "{{ test_name }}"
annotations:
kubespray.com/ci.template-path: "tests/cloud_playbooks/roles/packet-ci/templates/vm.yml.j2"
labels:
kubevirt.io/os: {{ cloud_image }}
spec:
@ -34,10 +36,10 @@ spec:
threads: {{ vm_cpu_threads }}
resources:
requests:
memory: {{ vm_memory * memory_allocation_ratio }}
memory: "{{ vm_memory * memory_allocation_ratio }}Mi"
cpu: {{ vm_cpu_cores * cpu_allocation_ratio }}
limits:
memory: {{ vm_memory }}
memory: "{{ vm_memory }}Mi"
cpu: {{ vm_cpu_cores }}
networks:
- name: default

View File

@ -2,7 +2,7 @@
# Instance settings
cloud_image: almalinux-8
mode: ha
vm_memory: 3072Mi
vm_memory: 3072
# Kubespray settings
calico_bpf_enabled: true

View File

@ -2,7 +2,7 @@
# Instance settings
cloud_image: almalinux-8
mode: default
vm_memory: 3072Mi
vm_memory: 3072
# Kubespray settings
enable_nodelocaldns_secondary: true

View File

@ -2,7 +2,7 @@
# Instance settings
cloud_image: almalinux-8
mode: default
vm_memory: 3072Mi
vm_memory: 3072
# Kubespray settings
metrics_server_enabled: true

View File

@ -2,7 +2,7 @@
# Instance settings
cloud_image: almalinux-8
mode: default
vm_memory: 3072Mi
vm_memory: 3072
# Use docker
container_manager: docker

View File

@ -2,7 +2,7 @@
# Instance settings
cloud_image: almalinux-8
mode: default
vm_memory: 3072Mi
vm_memory: 3072
# Kubespray settings
kube_network_plugin: kube-ovn

View File

@ -2,7 +2,7 @@
# Instance settings
cloud_image: rockylinux-8
mode: default
vm_memory: 3072Mi
vm_memory: 3072
# Kubespray settings
metrics_server_enabled: true

View File

@ -2,7 +2,7 @@
# Instance settings
cloud_image: rockylinux-9
mode: default
vm_memory: 3072Mi
vm_memory: 3072
# Kubespray settings
metrics_server_enabled: true

View File

@ -2,7 +2,7 @@
# Instance settings
cloud_image: rockylinux-9
mode: default
vm_memory: 3072Mi
vm_memory: 3072
# Kubespray settings
kube_network_plugin: cilium

View File

@ -2,7 +2,7 @@
# Instance settings
cloud_image: ubuntu-2204
mode: all-in-one
vm_memory: 1600Mi
vm_memory: 1600
# Kubespray settings
auto_renew_certificates: true

View File

@ -2,7 +2,7 @@
# Instance settings
cloud_image: ubuntu-2204
mode: all-in-one
vm_memory: 1600Mi
vm_memory: 1600
# Kubespray settings
auto_renew_certificates: true

View File

@ -2,7 +2,7 @@
# Instance settings
cloud_image: ubuntu-2404
mode: all-in-one
vm_memory: 1600Mi
vm_memory: 1600
# Kubespray settings
auto_renew_certificates: true

View File

@ -2,7 +2,7 @@
# Instance settings
cloud_image: ubuntu-2404
mode: all-in-one
vm_memory: 1600Mi
vm_memory: 1600
# Kubespray settings
auto_renew_certificates: true

View File

@ -2,7 +2,7 @@
# Instance settings
cloud_image: ubuntu-2404
mode: node-etcd-client
vm_memory: 1600Mi
vm_memory: 1600
# Kubespray settings
auto_renew_certificates: true

View File

@ -2,7 +2,6 @@ $os = "ubuntu2004"
# For CI we are not worries about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false

View File

@ -1,3 +1,4 @@
---
# Kubespray settings
kube_network_plugin: flannel
ansible_ssh_private_key: .vagrant.d/insecure_private_key

View File

@ -0,0 +1,122 @@
#!/bin/bash
# install_vagrant() {
# sudo apt install vagrant-libvirt vagrant -y
# sudo vagrant plugin install vagrant-libvirt
# }
# prep(){
# sudo apt-get update -y
# sudo apt-get install ca-certificates curl libvirt-daemon-system\
# libvirt-clients qemu-utils qemu-kvm htop atop -y
# sudo install -m 0755 -d /etc/apt/keyrings
# }
# install_docker() {
# VERSION_STRING=5:26.1.0-1~ubuntu.24.04~noble
# sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
# sudo chmod a+r /etc/apt/keyrings/docker.asc
# # Add the repository to Apt sources:
# echo \
# "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
# $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
# sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
# sudo apt-get update -y
# sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y
# }
# install_docker_auto () {
# curl -fsSL https://get.docker.com -o get-docker.sh
# sudo sh ./get-docker.sh --dry-run
# }
VAGRANT_VERSION=2.4.1
VAGRANT_DEFAULT_PROVIDER=libvirt
VAGRANT_ANSIBLE_TAGS=facts
LANG=C.UTF-8
DEBIAN_FRONTEND=noninteractive
PYTHONDONTWRITEBYTECODE=1
KUBE_VERSION=1.29.5
pipeline_install() {
cp /etc/apt/sources.list /etc/apt/sources.list."$(date +"%F")"
sed -i -e '/^# deb-src.*universe$/s/# //g' /etc/apt/sources.list
sed -i 's/^Types: deb$/Types: deb deb-src/' /etc/apt/sources.list.d/ubuntu.sources
apt update
# libssl-dev \
# python3-dev \
# # jq \
# moreutils \
# libvirt-dev \
# # rsync \
# git \
# # htop \
# gpg \
# atop
# gnupg2 \
# software-properties-common
#
apt install --no-install-recommends -y \
git \
make \
python3-pip \
sshpass \
apt-transport-https \
openssh-client \
ca-certificates \
curl \
libfuse2 \
unzip \
qemu-utils \
libvirt-daemon-system \
libvirt-clients \
qemu-kvm \
ebtables libguestfs-tools \
ruby-fog-libvirt \
libvirt-dev \
gcc \
build-essential \
ruby-libvirt \
libxslt-dev libxml2-dev zlib1g-dev \
python3-venv python3-full \
dnsmasq
apt-get build-dep -y ruby-libvirt ruby-dev
### VAGRANT ###
# apt-get install -y unzip
curl -LO https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}_linux_amd64.zip
unzip vagrant_${VAGRANT_VERSION}_linux_amd64.zip
mv vagrant /usr/local/bin/vagrant
chmod a+x /usr/local/bin/vagrant
# ls -la /usr/local/bin/vagrant
/usr/local/bin/vagrant plugin install vagrant-libvirt
usermod -aG kvm kubespray
usermod -aG libvirt kubespray
### DOCKER ###
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository -y "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt update
apt install --no-install-recommends -y docker-ce
apt autoremove -y --purge && apt clean && rm -rf /var/lib/apt/lists/* /var/log/*
### KUBECTL ###
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
mv kubectl /usr/local/bin/kubectl
chmod a+x /usr/local/bin/kubectl
systemctl restart libvirtd
# Install Vagrant
# apt update -y
# echo apt-get install -y unzip libfuse2 vagrant vagrant-libvirt
# apt --fix-broken install -y
# dpkg --configure -a -y
}
# wrapped up in a function so that we have some protection against only getting
# half the file during "curl | sh"
pipeline_install