[2.13] Backport CI fix (#7119)
* [2.14] fix ci (#7021) * fix flake8 errors in Kubespray CI - tox-inventory-builder * fix flake8 errors in Kubespray CI - tox-inventory-builder * Invalidate CRI-O kubic repo's cache Signed-off-by: Victor Morales <v.morales@samsung.com> * add support to configure pkg install retries and use in CI job tf-ovh_ubuntu18-calico (due to it failing often) * Switch Calico and Cilium image repos to Quay.io Co-authored-by: Victor Morales <v.morales@samsung.com> Co-authored-by: Barry Melbourne <9964974+bmelbourne@users.noreply.github.com> Conflicts: roles/download/defaults/main.yml * up vagrant box to fedora/33-cloud-base in cri-o molecule tests (cherry picked from commitrelease-2.1306ec5393d7
) * add Google proxy-mirror-cache for docker hub to CI tests (cherry picked from commitd739a6bb2f
) * containerd docker hub registry mirror support * containerd docker hub registry mirror support * add docs * fix typo * fix yamllint * fix indent in sample and ansible-playbook param in testcases_run * fix md * mv common vars to tests/common/_docker_hub_registry_mirror.yml * checkout vars to upgrade tests (cherry picked from commit4a8a52bad9
) * Exclude .git/ from shellcheck If a branch name contains '.sh', current shellcheck checks the branch file under .git/ and outputs error because the format is not shell script one. This makes shellcheck exclude files under .git/ to avoid this issue. (cherry picked from commite2467d87b6
) Co-authored-by: Hans Feldt <2808287+hafe@users.noreply.github.com> Co-authored-by: Sergey <s.bondarev@southbridge.ru> Co-authored-by: Kenichi Omichi <ken-oomichi@wx.jp.nec.com> * Switch some image from dockerhub to k8s.gcr (also increase pkg retries) (#6955) * Set packet_centos8-kube-ovn test to manual Signed-off-by: Rick Haan <rickhaan94@gmail.com> Co-authored-by: Etienne Champetier <champetier.etienne@gmail.com> Co-authored-by: Hans Feldt <2808287+hafe@users.noreply.github.com> Co-authored-by: Sergey <s.bondarev@southbridge.ru> Co-authored-by: Kenichi Omichi <ken-oomichi@wx.jp.nec.com> Co-authored-by: Florian Ruynat <16313165+floryut@users.noreply.github.com>
parent
d28a6d68f9
commit
cd832eadea
|
@ -15,6 +15,7 @@ variables:
|
||||||
MAGIC: "ci check this"
|
MAGIC: "ci check this"
|
||||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||||
|
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||||
GS_ACCESS_KEY_ID: $GS_KEY
|
GS_ACCESS_KEY_ID: $GS_KEY
|
||||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||||
CONTAINER_ENGINE: docker
|
CONTAINER_ENGINE: docker
|
||||||
|
|
|
@ -83,7 +83,7 @@ packet_centos7-calico-ha-once-localhost:
|
||||||
packet_centos8-kube-ovn:
|
packet_centos8-kube-ovn:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet
|
extends: .packet
|
||||||
when: on_success
|
when: manual
|
||||||
|
|
||||||
packet_centos8-calico:
|
packet_centos8-calico:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
|
|
|
@ -12,5 +12,5 @@ shellcheck:
|
||||||
- shellcheck --version
|
- shellcheck --version
|
||||||
script:
|
script:
|
||||||
# Run shellcheck for all *.sh except contrib/
|
# Run shellcheck for all *.sh except contrib/
|
||||||
- find . -name '*.sh' -not -path './contrib/*' | xargs shellcheck --severity error
|
- find . -name '*.sh' -not -path './contrib/*' -not -path './.git/*' | xargs shellcheck --severity error
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
|
|
|
@ -51,7 +51,7 @@ class TestInventory(unittest.TestCase):
|
||||||
groups = ['group1', 'group2']
|
groups = ['group1', 'group2']
|
||||||
self.inv.ensure_required_groups(groups)
|
self.inv.ensure_required_groups(groups)
|
||||||
for group in groups:
|
for group in groups:
|
||||||
self.assertTrue(group in self.inv.yaml_config['all']['children'])
|
self.assertIn(group, self.inv.yaml_config['all']['children'])
|
||||||
|
|
||||||
def test_get_host_id(self):
|
def test_get_host_id(self):
|
||||||
hostnames = ['node99', 'no99de01', '01node01', 'node1.domain',
|
hostnames = ['node99', 'no99de01', '01node01', 'node1.domain',
|
||||||
|
@ -209,8 +209,8 @@ class TestInventory(unittest.TestCase):
|
||||||
('doesnotbelong2', {'whateveropts=ilike'})])
|
('doesnotbelong2', {'whateveropts=ilike'})])
|
||||||
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||||
self.inv.purge_invalid_hosts(proper_hostnames)
|
self.inv.purge_invalid_hosts(proper_hostnames)
|
||||||
self.assertTrue(
|
self.assertNotIn(
|
||||||
bad_host not in self.inv.yaml_config['all']['hosts'].keys())
|
bad_host, self.inv.yaml_config['all']['hosts'].keys())
|
||||||
|
|
||||||
def test_add_host_to_group(self):
|
def test_add_host_to_group(self):
|
||||||
group = 'etcd'
|
group = 'etcd'
|
||||||
|
@ -227,8 +227,8 @@ class TestInventory(unittest.TestCase):
|
||||||
host = 'node1'
|
host = 'node1'
|
||||||
|
|
||||||
self.inv.set_kube_master([host])
|
self.inv.set_kube_master([host])
|
||||||
self.assertTrue(
|
self.assertIn(
|
||||||
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||||
|
|
||||||
def test_set_all(self):
|
def test_set_all(self):
|
||||||
hosts = OrderedDict([
|
hosts = OrderedDict([
|
||||||
|
@ -246,8 +246,8 @@ class TestInventory(unittest.TestCase):
|
||||||
|
|
||||||
self.inv.set_k8s_cluster()
|
self.inv.set_k8s_cluster()
|
||||||
for host in expected_hosts:
|
for host in expected_hosts:
|
||||||
self.assertTrue(
|
self.assertIn(
|
||||||
host in
|
host,
|
||||||
self.inv.yaml_config['all']['children'][group]['children'])
|
self.inv.yaml_config['all']['children'][group]['children'])
|
||||||
|
|
||||||
def test_set_kube_node(self):
|
def test_set_kube_node(self):
|
||||||
|
@ -255,16 +255,16 @@ class TestInventory(unittest.TestCase):
|
||||||
host = 'node1'
|
host = 'node1'
|
||||||
|
|
||||||
self.inv.set_kube_node([host])
|
self.inv.set_kube_node([host])
|
||||||
self.assertTrue(
|
self.assertIn(
|
||||||
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||||
|
|
||||||
def test_set_etcd(self):
|
def test_set_etcd(self):
|
||||||
group = 'etcd'
|
group = 'etcd'
|
||||||
host = 'node1'
|
host = 'node1'
|
||||||
|
|
||||||
self.inv.set_etcd([host])
|
self.inv.set_etcd([host])
|
||||||
self.assertTrue(
|
self.assertIn(
|
||||||
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||||
|
|
||||||
def test_scale_scenario_one(self):
|
def test_scale_scenario_one(self):
|
||||||
num_nodes = 50
|
num_nodes = 50
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
# conrainerd
|
||||||
|
|
||||||
|
[containerd] An industry-standard container runtime with an emphasis on simplicity, robustness and portability
|
||||||
|
Kubespray supports basic functionality for using containerd as the default container runtime in a cluster.
|
||||||
|
|
||||||
|
_To use the containerd container runtime set the following variables:_
|
||||||
|
|
||||||
|
## k8s-cluster.yml
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
container_manager: containerd
|
||||||
|
```
|
||||||
|
|
||||||
|
## Containerd config
|
||||||
|
|
||||||
|
Example: define registry mirror for docker hub
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
containerd_config:
|
||||||
|
grpc:
|
||||||
|
max_recv_message_size: 16777216
|
||||||
|
max_send_message_size: 16777216
|
||||||
|
debug:
|
||||||
|
level: ""
|
||||||
|
registries:
|
||||||
|
"docker.io":
|
||||||
|
- "https://mirror.gcr.io"
|
||||||
|
- "https://registry-1.docker.io"
|
||||||
|
```
|
||||||
|
|
||||||
|
[containerd]: https://containerd.io/
|
|
@ -1,6 +1,8 @@
|
||||||
---
|
---
|
||||||
# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options
|
# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options
|
||||||
|
|
||||||
|
# Example: define registry mirror for docker hub
|
||||||
|
|
||||||
# containerd_config:
|
# containerd_config:
|
||||||
# grpc:
|
# grpc:
|
||||||
# max_recv_message_size: 16777216
|
# max_recv_message_size: 16777216
|
||||||
|
@ -8,7 +10,9 @@
|
||||||
# debug:
|
# debug:
|
||||||
# level: ""
|
# level: ""
|
||||||
# registries:
|
# registries:
|
||||||
# "docker.io": "https://registry-1.docker.io"
|
# "docker.io":
|
||||||
|
# - "https://mirror.gcr.io"
|
||||||
|
# - "https://registry-1.docker.io"
|
||||||
# max_container_log_line_size: -1
|
# max_container_log_line_size: -1
|
||||||
# metrics:
|
# metrics:
|
||||||
# address: ""
|
# address: ""
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
# Kubernetes dashboard
|
# Kubernetes dashboard
|
||||||
# RBAC required. see docs/getting-started.md for access details.
|
# RBAC required. see docs/getting-started.md for access details.
|
||||||
dashboard_enabled: true
|
# dashboard_enabled: true
|
||||||
|
|
||||||
# Helm deployment
|
# Helm deployment
|
||||||
helm_enabled: false
|
helm_enabled: false
|
||||||
|
|
|
@ -55,7 +55,7 @@ disabled_plugins = ["restart"]
|
||||||
[plugins.cri.registry.mirrors]
|
[plugins.cri.registry.mirrors]
|
||||||
{% for registry, addr in containerd_config.registries.items() %}
|
{% for registry, addr in containerd_config.registries.items() %}
|
||||||
[plugins.cri.registry.mirrors."{{ registry }}"]
|
[plugins.cri.registry.mirrors."{{ registry }}"]
|
||||||
endpoint = ["{{ addr }}"]
|
endpoint = ["{{ ([ addr ] | flatten ) | join('","') }}"]
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ platforms:
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube-master
|
||||||
- name: fedora
|
- name: fedora
|
||||||
box: fedora/31-cloud-base
|
box: fedora/33-cloud-base
|
||||||
cpus: 2
|
cpus: 2
|
||||||
memory: 1024
|
memory: 1024
|
||||||
groups:
|
groups:
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
baseurl: http://widehat.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_$releasever/
|
baseurl: http://widehat.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_$releasever/
|
||||||
gpgcheck: yes
|
gpgcheck: yes
|
||||||
gpgkey: http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_$releasever/repodata/repomd.xml.key
|
gpgkey: http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_$releasever/repodata/repomd.xml.key
|
||||||
|
keepcache: false
|
||||||
when: ansible_distribution in ["CentOS"]
|
when: ansible_distribution in ["CentOS"]
|
||||||
|
|
||||||
- name: Add CRI-O kubic repo
|
- name: Add CRI-O kubic repo
|
||||||
|
|
|
@ -624,13 +624,13 @@ etcd_image_repo: "{{ quay_image_repo }}/coreos/etcd"
|
||||||
etcd_image_tag: "{{ etcd_version }}{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}"
|
etcd_image_tag: "{{ etcd_version }}{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}"
|
||||||
flannel_image_repo: "{{ quay_image_repo }}/coreos/flannel"
|
flannel_image_repo: "{{ quay_image_repo }}/coreos/flannel"
|
||||||
flannel_image_tag: "{{ flannel_version }}"
|
flannel_image_tag: "{{ flannel_version }}"
|
||||||
calico_node_image_repo: "{{ docker_image_repo }}/calico/node"
|
calico_node_image_repo: "{{ quay_image_repo }}/calico/node"
|
||||||
calico_node_image_tag: "{{ calico_version }}"
|
calico_node_image_tag: "{{ calico_version }}"
|
||||||
calico_cni_image_repo: "{{ docker_image_repo }}/calico/cni"
|
calico_cni_image_repo: "{{ quay_image_repo }}/calico/cni"
|
||||||
calico_cni_image_tag: "{{ calico_cni_version }}"
|
calico_cni_image_tag: "{{ calico_cni_version }}"
|
||||||
calico_policy_image_repo: "{{ docker_image_repo }}/calico/kube-controllers"
|
calico_policy_image_repo: "{{ quay_image_repo }}/calico/kube-controllers"
|
||||||
calico_policy_image_tag: "{{ calico_policy_version }}"
|
calico_policy_image_tag: "{{ calico_policy_version }}"
|
||||||
calico_typha_image_repo: "{{ docker_image_repo }}/calico/typha"
|
calico_typha_image_repo: "{{ quay_image_repo }}/calico/typha"
|
||||||
calico_typha_image_tag: "{{ calico_typha_version }}"
|
calico_typha_image_tag: "{{ calico_typha_version }}"
|
||||||
pod_infra_image_repo: "{{ kube_image_repo }}/pause"
|
pod_infra_image_repo: "{{ kube_image_repo }}/pause"
|
||||||
pod_infra_image_tag: "{{ pod_infra_version }}"
|
pod_infra_image_tag: "{{ pod_infra_version }}"
|
||||||
|
@ -655,11 +655,11 @@ contiv_etcd_init_image_repo: "{{ docker_image_repo }}/ferest/etcd-initer"
|
||||||
contiv_etcd_init_image_tag: latest
|
contiv_etcd_init_image_tag: latest
|
||||||
contiv_ovs_image_repo: "{{ docker_image_repo }}/contiv/ovs"
|
contiv_ovs_image_repo: "{{ docker_image_repo }}/contiv/ovs"
|
||||||
contiv_ovs_image_tag: "latest"
|
contiv_ovs_image_tag: "latest"
|
||||||
cilium_image_repo: "{{ docker_image_repo }}/cilium/cilium"
|
cilium_image_repo: "{{ quay_image_repo }}/cilium/cilium"
|
||||||
cilium_image_tag: "{{ cilium_version }}"
|
cilium_image_tag: "{{ cilium_version }}"
|
||||||
cilium_init_image_repo: "{{ docker_image_repo }}/cilium/cilium-init"
|
cilium_init_image_repo: "{{ quay_image_repo }}/cilium/cilium-init"
|
||||||
cilium_init_image_tag: "2019-04-05"
|
cilium_init_image_tag: "2019-04-05"
|
||||||
cilium_operator_image_repo: "{{ docker_image_repo }}/cilium/operator"
|
cilium_operator_image_repo: "{{ quay_image_repo }}/cilium/operator"
|
||||||
cilium_operator_image_tag: "{{ cilium_version }}"
|
cilium_operator_image_tag: "{{ cilium_version }}"
|
||||||
kube_ovn_container_image_repo: "{{ alauda_image_repo }}/alaudak8s/kube-ovn"
|
kube_ovn_container_image_repo: "{{ alauda_image_repo }}/alaudak8s/kube-ovn"
|
||||||
kube_ovn_container_image_tag: "{{ kube_ovn_version }}"
|
kube_ovn_container_image_tag: "{{ kube_ovn_version }}"
|
||||||
|
@ -676,7 +676,8 @@ haproxy_image_tag: 1.9
|
||||||
# Coredns version should be supported by corefile-migration (or at least work with)
|
# Coredns version should be supported by corefile-migration (or at least work with)
|
||||||
# bundle with kubeadm; if not 'basic' upgrade can sometimes fail
|
# bundle with kubeadm; if not 'basic' upgrade can sometimes fail
|
||||||
coredns_version: "1.6.5"
|
coredns_version: "1.6.5"
|
||||||
coredns_image_repo: "{{ docker_image_repo }}/coredns/coredns"
|
coredns_image_repo: "{{ kube_image_repo }}/coredns"
|
||||||
|
|
||||||
coredns_image_tag: "{{ coredns_version }}"
|
coredns_image_tag: "{{ coredns_version }}"
|
||||||
|
|
||||||
nodelocaldns_version: "1.15.12"
|
nodelocaldns_version: "1.15.12"
|
||||||
|
@ -686,10 +687,8 @@ nodelocaldns_image_tag: "{{ nodelocaldns_version }}"
|
||||||
dnsautoscaler_version: 1.7.1
|
dnsautoscaler_version: 1.7.1
|
||||||
dnsautoscaler_image_repo: "{{ kube_image_repo }}/cluster-proportional-autoscaler-{{ image_arch }}"
|
dnsautoscaler_image_repo: "{{ kube_image_repo }}/cluster-proportional-autoscaler-{{ image_arch }}"
|
||||||
dnsautoscaler_image_tag: "{{ dnsautoscaler_version }}"
|
dnsautoscaler_image_tag: "{{ dnsautoscaler_version }}"
|
||||||
test_image_repo: "{{ docker_image_repo }}/library/busybox"
|
test_image_repo: "{{ kube_image_repo }}/busybox"
|
||||||
test_image_tag: latest
|
test_image_tag: latest
|
||||||
busybox_image_repo: "{{ docker_image_repo }}/library/busybox"
|
|
||||||
busybox_image_tag: 1.31.1
|
|
||||||
helm_version: "v3.1.2"
|
helm_version: "v3.1.2"
|
||||||
helm_image_repo: "{{ docker_image_repo }}/lachlanevenson/k8s-helm"
|
helm_image_repo: "{{ docker_image_repo }}/lachlanevenson/k8s-helm"
|
||||||
helm_image_tag: "{{ helm_version }}"
|
helm_image_tag: "{{ helm_version }}"
|
||||||
|
@ -1095,15 +1094,6 @@ downloads:
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube-master
|
||||||
|
|
||||||
busybox:
|
|
||||||
enabled: "{{ kube_network_plugin in ['kube-router'] }}"
|
|
||||||
container: true
|
|
||||||
repo: "{{ busybox_image_repo }}"
|
|
||||||
tag: "{{ busybox_image_tag }}"
|
|
||||||
sha256: "{{ busybox_digest_checksum|default(None) }}"
|
|
||||||
groups:
|
|
||||||
- k8s-cluster
|
|
||||||
|
|
||||||
testbox:
|
testbox:
|
||||||
enabled: false
|
enabled: false
|
||||||
container: true
|
container: true
|
||||||
|
|
|
@ -55,3 +55,6 @@ minimal_node_memory_mb: 1024
|
||||||
minimal_master_memory_mb: 1500
|
minimal_master_memory_mb: 1500
|
||||||
|
|
||||||
yum_repo_dir: /etc/yum.repos.d
|
yum_repo_dir: /etc/yum.repos.d
|
||||||
|
|
||||||
|
# number of times package install task should be retried
|
||||||
|
pkg_install_retries: 4
|
||||||
|
|
|
@ -77,7 +77,7 @@
|
||||||
state: latest
|
state: latest
|
||||||
register: pkgs_task_result
|
register: pkgs_task_result
|
||||||
until: pkgs_task_result is succeeded
|
until: pkgs_task_result is succeeded
|
||||||
retries: 4
|
retries: "{{ pkg_install_retries }}"
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
when: not (ansible_os_family in ["CoreOS", "Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos)
|
when: not (ansible_os_family in ["CoreOS", "Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos)
|
||||||
tags:
|
tags:
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
---
|
||||||
|
docker_registry_mirrors:
|
||||||
|
- "https://mirror.gcr.io"
|
||||||
|
|
||||||
|
containerd_config:
|
||||||
|
grpc:
|
||||||
|
max_recv_message_size: 16777216
|
||||||
|
max_send_message_size: 16777216
|
||||||
|
debug:
|
||||||
|
level: ""
|
||||||
|
registries:
|
||||||
|
"docker.io":
|
||||||
|
- "https://mirror.gcr.io"
|
||||||
|
- "https://registry-1.docker.io"
|
||||||
|
max_container_log_line_size: -1
|
|
@ -7,6 +7,7 @@ mode: default
|
||||||
kube_network_plugin: calico
|
kube_network_plugin: calico
|
||||||
deploy_netchecker: true
|
deploy_netchecker: true
|
||||||
dns_min_replicas: 1
|
dns_min_replicas: 1
|
||||||
|
dashboard_enabled: true
|
||||||
|
|
||||||
# required / not autodetected for now
|
# required / not autodetected for now
|
||||||
calico_iptables_backend: "NFT"
|
calico_iptables_backend: "NFT"
|
||||||
|
|
|
@ -41,7 +41,8 @@ fi
|
||||||
# Check out latest tag if testing upgrade
|
# Check out latest tag if testing upgrade
|
||||||
test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout "$KUBESPRAY_VERSION"
|
test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout "$KUBESPRAY_VERSION"
|
||||||
# Checkout the CI vars file so it is available
|
# Checkout the CI vars file so it is available
|
||||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml tests/testcases/*.yml
|
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
||||||
|
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" ${CI_TEST_REGISTRY_MIRROR}
|
||||||
|
|
||||||
# Install mitogen ansible plugin
|
# Install mitogen ansible plugin
|
||||||
if [ "${MITOGEN_ENABLE}" = "true" ]; then
|
if [ "${MITOGEN_ENABLE}" = "true" ]; then
|
||||||
|
@ -51,20 +52,20 @@ if [ "${MITOGEN_ENABLE}" = "true" ]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create cluster
|
# Create cluster
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml
|
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml
|
||||||
|
|
||||||
# Repeat deployment if testing upgrade
|
# Repeat deployment if testing upgrade
|
||||||
if [ "${UPGRADE_TEST}" != "false" ]; then
|
if [ "${UPGRADE_TEST}" != "false" ]; then
|
||||||
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml"
|
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml"
|
||||||
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml"
|
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml"
|
||||||
git checkout "${CI_BUILD_REF}"
|
git checkout "${CI_BUILD_REF}"
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" $PLAYBOOK
|
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" $PLAYBOOK
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Test control plane recovery
|
# Test control plane recovery
|
||||||
if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then
|
if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "${RECOVER_CONTROL_PLANE_TEST_GROUPS}:!fake_hosts" -e reset_confirmation=yes reset.yml
|
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "${RECOVER_CONTROL_PLANE_TEST_GROUPS}:!fake_hosts" -e reset_confirmation=yes reset.yml
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads -e etcd_retries=10 --limit etcd,kube-master:!fake_hosts recover-control-plane.yml
|
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads -e etcd_retries=10 --limit etcd,kube-master:!fake_hosts recover-control-plane.yml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Tests Cases
|
# Tests Cases
|
||||||
|
@ -88,7 +89,7 @@ ansible-playbook -i ${ANSIBLE_INVENTORY} -e @${CI_TEST_VARS} --limit "all:!fake_
|
||||||
|
|
||||||
## Idempotency checks 1/5 (repeat deployment)
|
## Idempotency checks 1/5 (repeat deployment)
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml
|
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## Idempotency checks 2/5 (Advanced DNS checks)
|
## Idempotency checks 2/5 (Advanced DNS checks)
|
||||||
|
@ -98,12 +99,12 @@ fi
|
||||||
|
|
||||||
## Idempotency checks 3/5 (reset deployment)
|
## Idempotency checks 3/5 (reset deployment)
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_VARS} -e reset_confirmation=yes --limit "all:!fake_hosts" reset.yml
|
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e reset_confirmation=yes --limit "all:!fake_hosts" reset.yml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## Idempotency checks 4/5 (redeploy after reset)
|
## Idempotency checks 4/5 (redeploy after reset)
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml
|
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## Idempotency checks 5/5 (Advanced DNS checks)
|
## Idempotency checks 5/5 (Advanced DNS checks)
|
||||||
|
|
Loading…
Reference in New Issue