Merge pull request #11559 from VannTen/cleanup/less_inventory_boilerplate

Only require minimum structure in inventory, compute the rest
pull/11564/head
Kubernetes Prow Robot 2024-09-23 10:08:00 +01:00 committed by GitHub
commit e9d406ed08
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
30 changed files with 85 additions and 122 deletions

View File

@ -127,8 +127,7 @@ recommended here:
You need to edit your inventory and add:
* `calico_rr` group with nodes in it. `calico_rr` can be combined with
`kube_node` and/or `kube_control_plane`. `calico_rr` group also must be a child
group of `k8s_cluster` group.
`kube_node` and/or `kube_control_plane`.
* `cluster_id` by route reflector node/group (see details [here](https://hub.docker.com/r/calico/routereflector/))
Here's an example of Kubespray inventory with standalone route reflectors:
@ -157,11 +156,6 @@ node3
node4
node5
[k8s_cluster:children]
kube_node
kube_control_plane
calico_rr
[calico_rr]
rr0
rr1

View File

@ -42,14 +42,6 @@ The inventory is composed of 3 groups:
* **kube_control_plane** : list of servers where kubernetes control plane components (apiserver, scheduler, controller) will run.
* **etcd**: list of servers to compose the etcd server. You should have at least 3 servers for failover purpose.
Note: do not modify the children of _k8s_cluster_, like putting
the _etcd_ group into the _k8s_cluster_, unless you are certain
to do that and you have it fully contained in the latter:
```ShellSession
etcd ⊂ k8s_cluster => kube_node ∩ etcd = etcd
```
When _kube_node_ contains _etcd_, you define your etcd cluster to be as well schedulable for Kubernetes workloads.
If you want it a standalone, make sure those groups do not intersect.
If you want the server to act both as control-plane and node, the server must be defined
@ -62,6 +54,9 @@ There are also two special groups:
* **calico_rr** : explained for [advanced Calico networking cases](/docs/CNI/calico.md)
* **bastion** : configure a bastion host if your nodes are not directly reachable
Lastly, the **k8s_cluster** is dynamically defined as the union of **kube_node**, **kube_control_plane** and **calico_rr**.
This is used internally and for the purpose of defining whole cluster variables (`<inventory>/group_vars/k8s_cluster/*.yml`)
Below is a complete inventory example:
```ini
@ -89,10 +84,6 @@ node3
node4
node5
node6
[k8s_cluster:children]
kube_node
kube_control_plane
```
## Group vars and overriding variables precedence

View File

@ -71,9 +71,6 @@
[kube_node:children]
kubenode
[k8s_cluster:children]
kubernetes
[etcd:children]
kubemaster
kubemaster-ha
@ -81,9 +78,6 @@
[kube_control_plane:children]
kubemaster
kubemaster-ha
[kubespray:children]
kubernetes
```
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.

View File

@ -8,7 +8,3 @@ node1
[kube_node]
node1
[k8s_cluster:children]
kube_node
kube_control_plane

View File

@ -29,10 +29,3 @@
# node4
# node5
# node6
[calico_rr]
[k8s_cluster:children]
kube_control_plane
kube_node
calico_rr

View File

@ -2,7 +2,9 @@
- name: Check ansible version
import_playbook: ansible_version.yml
# These are inventory compatibility tasks to ensure we keep compatibility with old style group names
# These are inventory compatibility tasks with two purposes:
# - to ensure we keep compatibility with old style group names
# - to reduce inventory boilerplate (defining parent groups / empty groups)
- name: Add kube-master nodes to kube_control_plane
hosts: kube-master
@ -22,15 +24,6 @@
group_by:
key: 'kube_node'
- name: Add k8s-cluster nodes to k8s_cluster
hosts: k8s-cluster
gather_facts: false
tags: always
tasks:
- name: Add nodes to k8s_cluster group
group_by:
key: 'k8s_cluster'
- name: Add calico-rr nodes to calico_rr
hosts: calico-rr
gather_facts: false
@ -40,6 +33,15 @@
group_by:
key: 'calico_rr'
- name: Define k8s_cluster group
hosts: kube_node:kube_control_plane:calico_rr
gather_facts: false
tags: always
tasks:
- name: Add nodes to k8s_cluster group
group_by:
key: 'k8s_cluster'
- name: Add no-floating nodes to no_floating
hosts: no-floating
gather_facts: false

View File

@ -11,7 +11,7 @@
include_tasks: prep_kubeadm_images.yml
when:
- not skip_downloads | default(false)
- inventory_hostname in groups['kube_control_plane']
- ('kube_control_plane' in group_names)
tags:
- download
- upload

View File

@ -21,7 +21,7 @@
get_checksum: true
get_mime: false
register: etcd_member_certs
when: inventory_hostname in groups['etcd']
when: ('etcd' in group_names)
with_items:
- ca.pem
- member-{{ inventory_hostname }}.pem
@ -33,7 +33,7 @@
stat:
path: "{{ etcd_cert_dir }}/{{ item }}"
register: etcd_node_certs
when: inventory_hostname in groups['k8s_cluster']
when: ('k8s_cluster' in group_names)
with_items:
- ca.pem
- node-{{ inventory_hostname }}.pem
@ -99,7 +99,7 @@
set_fact:
etcd_member_requires_sync: true
when:
- inventory_hostname in groups['etcd']
- ('etcd' in group_names)
- (not etcd_member_certs.results[0].stat.exists | default(false)) or
(not etcd_member_certs.results[1].stat.exists | default(false)) or
(not etcd_member_certs.results[2].stat.exists | default(false)) or
@ -115,7 +115,7 @@
set_fact:
kubernetes_host_requires_sync: true
when:
- inventory_hostname in groups['k8s_cluster'] and
- ('k8s_cluster' in group_names) and
inventory_hostname not in groups['etcd']
- (not etcd_node_certs.results[0].stat.exists | default(false)) or
(not etcd_node_certs.results[1].stat.exists | default(false)) or

View File

@ -79,7 +79,7 @@
{% endfor %}]"
delegate_to: "{{ groups['etcd'][0] }}"
when:
- inventory_hostname in groups['etcd']
- ('etcd' in group_names)
- sync_certs | default(false)
- inventory_hostname != groups['etcd'][0]
notify: Set etcd_secret_changed
@ -93,7 +93,7 @@
mode: "0640"
with_items: "{{ etcd_master_certs.results }}"
when:
- inventory_hostname in groups['etcd']
- ('etcd' in group_names)
- sync_certs | default(false)
- inventory_hostname != groups['etcd'][0]
loop_control:
@ -110,7 +110,7 @@
{% endfor %}]"
delegate_to: "{{ groups['etcd'][0] }}"
when:
- inventory_hostname in groups['etcd']
- ('etcd' in group_names)
- inventory_hostname != groups['etcd'][0]
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
@ -125,7 +125,7 @@
mode: "0640"
with_items: "{{ etcd_master_node_certs.results }}"
when:
- inventory_hostname in groups['etcd']
- ('etcd' in group_names)
- inventory_hostname != groups['etcd'][0]
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
@ -135,7 +135,7 @@
- name: Gen_certs | Generate etcd certs
include_tasks: gen_nodes_certs_script.yml
when:
- inventory_hostname in groups['kube_control_plane'] and
- ('kube_control_plane' in group_names) and
sync_certs | default(false) and inventory_hostname not in groups['etcd']
- name: Gen_certs | Generate etcd certs on nodes if needed
@ -143,7 +143,7 @@
when:
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- inventory_hostname in groups['k8s_cluster'] and
- ('k8s_cluster' in group_names) and
sync_certs | default(false) and inventory_hostname not in groups['etcd']
- name: Gen_certs | check certificate permissions

View File

@ -25,7 +25,7 @@
when:
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- inventory_hostname in groups['k8s_cluster']
- ('k8s_cluster' in group_names)
tags:
- etcd-secrets
@ -37,7 +37,7 @@
when:
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- inventory_hostname in groups['k8s_cluster']
- ('k8s_cluster' in group_names)
tags:
- master # master tag is deprecated and replaced by control-plane
- control-plane
@ -49,7 +49,7 @@
when:
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- inventory_hostname in groups['k8s_cluster']
- ('k8s_cluster' in group_names)
tags:
- master # master tag is deprecated and replaced by control-plane
- control-plane

View File

@ -9,7 +9,7 @@
loop_control:
loop_var: delegate_host_to_write_cacert
when:
- inventory_hostname in groups['k8s_cluster']
- ('k8s_cluster' in group_names)
- cinder_cacert is defined
- cinder_cacert | length > 0

View File

@ -243,5 +243,5 @@
delegate_to: "{{ first_kube_control_plane }}"
with_items:
- "node-role.kubernetes.io/control-plane:NoSchedule-"
when: inventory_hostname in groups['kube_node']
when: ('kube_node' in group_names)
failed_when: false

View File

@ -3,7 +3,7 @@
uri:
url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz"
validate_certs: false
when: inventory_hostname in groups['kube_control_plane']
when: ('kube_control_plane' in group_names)
register: _result
retries: 60
delay: 5

View File

@ -51,7 +51,7 @@
register: "etcd_client_cert_serial_result"
changed_when: false
when:
- inventory_hostname in groups['k8s_cluster'] | union(groups['calico_rr'] | default([])) | unique | sort
- group_names | intersect(['k8s_cluster', 'calico_rr']) | length > 0
tags:
- network

View File

@ -8,7 +8,7 @@
tags:
- kubeadm
when:
- not inventory_hostname in groups['kube_control_plane']
- not ('kube_control_plane' in group_names)
- name: Install | Copy kubelet binary from download dir
copy:

View File

@ -35,7 +35,7 @@
get_checksum: false
get_mime: false
register: kube_apiserver_set
when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
when: ('kube_control_plane' in group_names) and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
listen: Preinstall | propagate resolvconf to k8s components
# FIXME(mattymo): Also restart for kubeadm mode
@ -46,7 +46,7 @@
get_checksum: false
get_mime: false
register: kube_controller_set
when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
when: ('kube_control_plane' in group_names) and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
listen: Preinstall | propagate resolvconf to k8s components
- name: Preinstall | restart kube-controller-manager docker
@ -55,7 +55,7 @@
executable: /bin/bash
when:
- container_manager == "docker"
- inventory_hostname in groups['kube_control_plane']
- ('kube_control_plane' in group_names)
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- kube_controller_set.stat.exists
@ -71,7 +71,7 @@
until: preinstall_restart_controller_manager.rc == 0
when:
- container_manager in ['crio', 'containerd']
- inventory_hostname in groups['kube_control_plane']
- ('kube_control_plane' in group_names)
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- kube_controller_set.stat.exists
@ -83,7 +83,7 @@
executable: /bin/bash
when:
- container_manager == "docker"
- inventory_hostname in groups['kube_control_plane']
- ('kube_control_plane' in group_names)
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- kube_apiserver_set.stat.exists
@ -99,7 +99,7 @@
delay: 1
when:
- container_manager in ['crio', 'containerd']
- inventory_hostname in groups['kube_control_plane']
- ('kube_control_plane' in group_names)
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- kube_apiserver_set.stat.exists
@ -116,7 +116,7 @@
delay: 1
when:
- dns_late
- inventory_hostname in groups['kube_control_plane']
- ('kube_control_plane' in group_names)
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and not is_fedora_coreos

View File

@ -65,14 +65,14 @@
that: ansible_memtotal_mb >= minimal_master_memory_mb
when:
- not ignore_assert_errors
- inventory_hostname in groups['kube_control_plane']
- ('kube_control_plane' in group_names)
- name: Stop if memory is too small for nodes
assert:
that: ansible_memtotal_mb >= minimal_node_memory_mb
when:
- not ignore_assert_errors
- inventory_hostname in groups['kube_node']
- ('kube_node' in group_names)
# This command will fail if cgroups are not enabled on the node.
# For reference: https://kubernetes.io/docs/concepts/architecture/cgroups/#check-cgroup-version
@ -92,7 +92,7 @@
msg: "Do not schedule more pods on a node than inet addresses are available."
when:
- not ignore_assert_errors
- inventory_hostname in groups['k8s_cluster']
- ('k8s_cluster' in group_names)
- kube_network_node_prefix is defined
- kube_network_plugin != 'calico'

View File

@ -5,7 +5,7 @@
state: directory
owner: "{{ kube_owner }}"
mode: "0755"
when: inventory_hostname in groups['k8s_cluster']
when: ('k8s_cluster' in group_names)
become: true
tags:
- kubelet
@ -30,7 +30,7 @@
state: directory
owner: root
mode: "0755"
when: inventory_hostname in groups['k8s_cluster']
when: ('k8s_cluster' in group_names)
become: true
tags:
- kubelet
@ -55,7 +55,7 @@
get_mime: false
register: kube_cert_compat_dir_check
when:
- inventory_hostname in groups['k8s_cluster']
- ('k8s_cluster' in group_names)
- kube_cert_dir != kube_cert_compat_dir
- name: Create kubernetes kubeadm compat cert dir (kubernetes/kubeadm issue 1498)
@ -65,7 +65,7 @@
state: link
mode: "0755"
when:
- inventory_hostname in groups['k8s_cluster']
- ('k8s_cluster' in group_names)
- kube_cert_dir != kube_cert_compat_dir
- not kube_cert_compat_dir_check.stat.exists
@ -80,7 +80,7 @@
- "/opt/cni/bin"
when:
- kube_network_plugin in ["calico", "weave", "flannel", "cilium", "kube-ovn", "kube-router", "macvlan"]
- inventory_hostname in groups['k8s_cluster']
- ('k8s_cluster' in group_names)
tags:
- network
- cilium
@ -100,7 +100,7 @@
- "/var/lib/calico"
when:
- kube_network_plugin == "calico"
- inventory_hostname in groups['k8s_cluster']
- ('k8s_cluster' in group_names)
tags:
- network
- calico
@ -115,7 +115,7 @@
mode: "{{ local_volume_provisioner_directory_mode }}"
with_items: "{{ local_volume_provisioner_storage_classes.keys() | list }}"
when:
- inventory_hostname in groups['k8s_cluster']
- ('k8s_cluster' in group_names)
- local_volume_provisioner_enabled
tags:
- persistent_volumes

View File

@ -57,7 +57,7 @@
args:
executable: /bin/bash
when:
- inventory_hostname in groups['kube_control_plane']
- ('kube_control_plane' in group_names)
- sync_tokens | default(false)
- inventory_hostname != groups['kube_control_plane'][0]
- tokens_data.stdout

View File

@ -273,7 +273,7 @@ kubelet_shutdown_grace_period: 60s
kubelet_shutdown_grace_period_critical_pods: 20s
# Whether to deploy the container engine
deploy_container_engine: "{{ inventory_hostname in groups['k8s_cluster'] or etcd_deployment_type == 'docker' }}"
deploy_container_engine: "{{ 'k8s_cluster' in group_names or etcd_deployment_type == 'docker' }}"
# Container for runtime
container_manager: containerd

View File

@ -121,7 +121,7 @@
- name: Calico | kdd specific configuration
when:
- inventory_hostname in groups['kube_control_plane']
- ('kube_control_plane' in group_names)
- calico_datastore == "kdd"
block:
- name: Calico | Check if extra directory is needed
@ -321,7 +321,7 @@
nodeToNodeMeshEnabled: "false"
when:
- peer_with_router | default(false) or peer_with_calico_rr | default(false)
- inventory_hostname in groups['k8s_cluster']
- ('k8s_cluster' in group_names)
run_once: true
- name: Calico | Configure Calico BGP
@ -382,7 +382,7 @@
- {name: kubernetes-services-endpoint, file: kubernetes-services-endpoint.yml, type: cm }
register: calico_node_manifests
when:
- inventory_hostname in groups['kube_control_plane']
- ('kube_control_plane' in group_names)
- rbac_enabled or item.type not in rbac_resources
- name: Calico | Create calico manifests for typha
@ -394,7 +394,7 @@
- {name: calico, file: calico-typha.yml, type: typha}
register: calico_node_typha_manifest
when:
- inventory_hostname in groups['kube_control_plane']
- ('kube_control_plane' in group_names)
- typha_enabled
- name: Calico | get calico apiserver caBundle
@ -421,7 +421,7 @@
- {name: calico, file: calico-apiserver.yml, type: calico-apiserver}
register: calico_apiserver_manifest
when:
- inventory_hostname in groups['kube_control_plane']
- ('kube_control_plane' in group_names)
- calico_apiserver_enabled
- name: Start Calico resources
@ -473,7 +473,7 @@
with_items:
- {name: calico, file: calico-ipamconfig.yml, type: ipam}
when:
- inventory_hostname in groups['kube_control_plane']
- ('kube_control_plane' in group_names)
- calico_datastore == "kdd"
- name: Calico | Create ipamconfig resources

View File

@ -32,7 +32,7 @@
when:
- calico_rr_id is defined
- calico_group_id is defined
- inventory_hostname in groups['calico_rr']
- ('calico_rr' in group_names)
- name: Calico | Configure peering with route reflectors at global scope
command:

View File

@ -28,7 +28,7 @@
cmd: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }}"
register: output_get_node
when:
- inventory_hostname in groups['k8s_cluster']
- ('k8s_cluster' in group_names)
- local_as is defined
- groups['calico_rr'] | default([]) | length == 0
delegate_to: "{{ groups['kube_control_plane'][0] }}"
@ -50,7 +50,7 @@
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
when:
- inventory_hostname in groups['k8s_cluster']
- ('k8s_cluster' in group_names)
- local_as is defined
- groups['calico_rr'] | default([]) | length == 0
- output_get_node.rc == 0
@ -77,7 +77,7 @@
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
when:
- inventory_hostname in groups['k8s_cluster']
- ('k8s_cluster' in group_names)
- local_as is defined
- groups['calico_rr'] | default([]) | length == 0
- output_get_node.rc != 0
@ -110,4 +110,4 @@
- "{{ peers | default([]) | selectattr('scope', 'undefined') | list | union(peers | default([]) | selectattr('scope', 'defined') | selectattr('scope', 'equalto', 'node') | list ) }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when:
- inventory_hostname in groups['k8s_cluster']
- ('k8s_cluster' in group_names)

View File

@ -59,7 +59,7 @@
- {name: cilium, file: sa.yml, type: sa}
register: cilium_node_manifests
when:
- inventory_hostname in groups['kube_control_plane']
- ('kube_control_plane' in group_names)
- item.when | default(True) | bool
- name: Cilium | Create Cilium Hubble manifests

View File

@ -4,18 +4,18 @@
with_items:
- "{{ kube_router_annotations_master }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane']
when: kube_router_annotations_master is defined and 'kube_control_plane' in group_names
- name: Kube-router | Add annotations on kube_node
command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}"
with_items:
- "{{ kube_router_annotations_node }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: kube_router_annotations_node is defined and inventory_hostname in groups['kube_node']
when: kube_router_annotations_node is defined and 'kube_node' in group_names
- name: Kube-router | Add common annotations on all servers
command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}"
with_items:
- "{{ kube_router_annotations_all }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: kube_router_annotations_all is defined and inventory_hostname in groups['k8s_cluster']
when: kube_router_annotations_all is defined and 'k8s_cluster' in group_names

View File

@ -5,7 +5,7 @@
when:
- groups['kube_control_plane'] | length > 0
# ignore servers that are not nodes
- inventory_hostname in groups['k8s_cluster'] and kube_override_hostname | default(inventory_hostname) in nodes.stdout_lines
- ('k8s_cluster' in group_names) and kube_override_hostname | default(inventory_hostname) in nodes.stdout_lines
retries: "{{ delete_node_retries }}"
# Sometimes the api-server can have a short window of indisponibility when we delete a control plane node
delay: "{{ delete_node_delay_seconds }}"

View File

@ -6,7 +6,7 @@
register: remove_node_ip
when:
- groups['kube_control_plane'] | length > 0
- inventory_hostname in groups['etcd']
- ('etcd' in group_names)
- ip is not defined
- access_ip is not defined
delegate_to: "{{ groups['etcd'] | first }}"
@ -16,14 +16,14 @@
set_fact:
node_ip: "{{ ip | default(access_ip | default(remove_node_ip.stdout)) | trim }}"
when:
- inventory_hostname in groups['etcd']
- ('etcd' in group_names)
- name: Make sure node_ip is set
assert:
that: node_ip is defined and node_ip | length > 0
msg: "Etcd node ip is not set !"
when:
- inventory_hostname in groups['etcd']
- ('etcd' in group_names)
- name: Lookup etcd member id
shell: "set -o pipefail && {{ bin_dir }}/etcdctl member list | grep -w {{ node_ip }} | cut -d, -f1"
@ -42,7 +42,7 @@
ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}"
ETCDCTL_ENDPOINTS: "https://127.0.0.1:2379"
delegate_to: "{{ groups['etcd'] | first }}"
when: inventory_hostname in groups['etcd']
when: ('etcd' in group_names)
- name: Remove etcd member from cluster
command: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}"
@ -54,5 +54,5 @@
ETCDCTL_ENDPOINTS: "https://127.0.0.1:2379"
delegate_to: "{{ groups['etcd'] | first }}"
when:
- inventory_hostname in groups['etcd']
- ('etcd' in group_names)
- etcd_member_id.stdout | length > 0

View File

@ -211,7 +211,7 @@
command: "ipvsadm -C"
ignore_errors: true # noqa ignore-errors
when:
- kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s_cluster']
- kube_proxy_mode == 'ipvs' and 'k8s_cluster' in group_names
- name: Reset | check kube-ipvs0 network device
stat:

View File

@ -96,10 +96,3 @@ instance-2
instance-3
instance-4
{% endif %}
[k8s_cluster:children]
kube_node
kube_control_plane
calico_rr
[calico_rr]

View File

@ -15,6 +15,15 @@ else
fi
fi
# Check out latest tag if testing upgrade
if [ "${UPGRADE_TEST}" != "false" ]; then
git fetch --all && git checkout "$KUBESPRAY_VERSION"
# Checkout the CI vars file so it is available
git checkout "${CI_COMMIT_SHA}" tests/files/${CI_JOB_NAME}.yml
git checkout "${CI_COMMIT_SHA}" ${CI_TEST_REGISTRY_MIRROR}
git checkout "${CI_COMMIT_SHA}" ${CI_TEST_SETTING}
fi
# needed for ara not to complain
export TZ=UTC
@ -41,15 +50,6 @@ if [[ "$CI_JOB_NAME" =~ "opensuse" ]]; then
ansible all -m raw -a 'zypper --gpg-auto-import-keys refresh'
fi
# Check out latest tag if testing upgrade
if [ "${UPGRADE_TEST}" != "false" ]; then
git fetch --all && git checkout "$KUBESPRAY_VERSION"
# Checkout the CI vars file so it is available
git checkout "${CI_COMMIT_SHA}" tests/files/${CI_JOB_NAME}.yml
git checkout "${CI_COMMIT_SHA}" ${CI_TEST_REGISTRY_MIRROR}
git checkout "${CI_COMMIT_SHA}" ${CI_TEST_SETTING}
fi
run_playbook () {
playbook=$1
shift