project: resolve ansible-lint key-order rule (#10314)

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>
pull/10357/head
Arthur Outhenin-Chalandre 2023-08-10 09:57:27 +02:00 committed by GitHub
parent 2a7c9d27b2
commit d21bfb84ad
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 100 additions and 109 deletions

View File

@ -25,10 +25,6 @@ skip_list:
# We use template in names # We use template in names
- 'name[template]' - 'name[template]'
# order of keys errors
# (Disabled in June 2023 after ansible upgrade; FIXME)
- 'key-order'
# No changed-when on commands # No changed-when on commands
# (Disabled in June 2023 after ansible upgrade; FIXME) # (Disabled in June 2023 after ansible upgrade; FIXME)
- 'no-changed-when' - 'no-changed-when'

View File

@ -1,5 +1,7 @@
--- ---
- name: Disable firewalld and ufw - name: Disable firewalld and ufw
when:
- disable_service_firewall is defined and disable_service_firewall
block: block:
- name: List services - name: List services
service_facts: service_facts:
@ -19,6 +21,3 @@
enabled: no enabled: no
when: when:
"'ufw.service' in services" "'ufw.service' in services"
when:
- disable_service_firewall is defined and disable_service_firewall

View File

@ -112,6 +112,7 @@
notify: Restart containerd notify: Restart containerd
- name: Containerd | Configure containerd registries - name: Containerd | Configure containerd registries
when: containerd_use_config_path is defined and containerd_use_config_path | bool and containerd_insecure_registries is defined
block: block:
- name: Containerd Create registry directories - name: Containerd Create registry directories
file: file:
@ -131,7 +132,6 @@
capabilities = ["pull", "resolve", "push"] capabilities = ["pull", "resolve", "push"]
skip_verify = true skip_verify = true
with_dict: "{{ containerd_insecure_registries }}" with_dict: "{{ containerd_insecure_registries }}"
when: containerd_use_config_path is defined and containerd_use_config_path | bool and containerd_insecure_registries is defined
# you can sometimes end up in a state where everything is installed # you can sometimes end up in a state where everything is installed
# but containerd was not started / enabled # but containerd was not started / enabled

View File

@ -73,6 +73,14 @@
- facts - facts
- name: Uninstall containerd - name: Uninstall containerd
vars:
service_name: containerd.service
when:
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
- container_manager != "containerd"
- docker_installed.matched == 0
- containerd_installed.matched > 0
- ansible_facts.services[service_name]['state'] == 'running'
block: block:
- name: Drain node - name: Drain node
include_role: include_role:
@ -91,16 +99,15 @@
name: container-engine/containerd name: container-engine/containerd
tasks_from: reset tasks_from: reset
handlers_from: reset handlers_from: reset
vars:
service_name: containerd.service
when:
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
- container_manager != "containerd"
- docker_installed.matched == 0
- containerd_installed.matched > 0
- ansible_facts.services[service_name]['state'] == 'running'
- name: Uninstall docker - name: Uninstall docker
vars:
service_name: docker.service
when:
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
- container_manager != "docker"
- docker_installed.matched > 0
- ansible_facts.services[service_name]['state'] == 'running'
block: block:
- name: Drain node - name: Drain node
include_role: include_role:
@ -118,15 +125,15 @@
import_role: import_role:
name: container-engine/docker name: container-engine/docker
tasks_from: reset tasks_from: reset
vars:
service_name: docker.service
when:
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
- container_manager != "docker"
- docker_installed.matched > 0
- ansible_facts.services[service_name]['state'] == 'running'
- name: Uninstall crio - name: Uninstall crio
vars:
service_name: crio.service
when:
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
- container_manager != "crio"
- crio_installed.matched > 0
- ansible_facts.services[service_name]['state'] == 'running'
block: block:
- name: Drain node - name: Drain node
include_role: include_role:
@ -144,10 +151,3 @@
import_role: import_role:
name: container-engine/cri-o name: container-engine/cri-o
tasks_from: reset tasks_from: reset
vars:
service_name: crio.service
when:
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
- container_manager != "crio"
- crio_installed.matched > 0
- ansible_facts.services[service_name]['state'] == 'running'

View File

@ -1,5 +1,7 @@
--- ---
- block: - tags:
- download
block:
- name: Set default values for flag variables - name: Set default values for flag variables
set_fact: set_fact:
image_is_cached: false image_is_cached: false
@ -121,5 +123,3 @@
path: "{{ image_path_final }}" path: "{{ image_path_final }}"
when: when:
- not download_keep_remote_cache - not download_keep_remote_cache
tags:
- download

View File

@ -1,5 +1,7 @@
--- ---
- name: "Download_file | download {{ download.dest }}" - name: "Download_file | download {{ download.dest }}"
tags:
- download
block: block:
- name: Prep_download | Set a few facts - name: Prep_download | Set a few facts
set_fact: set_fact:
@ -139,6 +141,3 @@
- name: "Download_file | Extract file archives" - name: "Download_file | Extract file archives"
include_tasks: "extract_file.yml" include_tasks: "extract_file.yml"
tags:
- download

View File

@ -15,6 +15,7 @@
register: stat_etcdctl register: stat_etcdctl
- name: Remove old etcd binary - name: Remove old etcd binary
when: stat_etcdctl.stat.exists
block: block:
- name: Check version - name: Check version
command: "{{ bin_dir }}/etcdctl version" command: "{{ bin_dir }}/etcdctl version"
@ -27,7 +28,6 @@
path: "{{ bin_dir }}/etcdctl" path: "{{ bin_dir }}/etcdctl"
state: absent state: absent
when: etcd_version.lstrip('v') not in etcdctl_version.stdout when: etcd_version.lstrip('v') not in etcdctl_version.stdout
when: stat_etcdctl.stat.exists
- name: Check if etcdctl still exist after version check - name: Check if etcdctl still exist after version check
stat: stat:
@ -38,6 +38,7 @@
register: stat_etcdctl register: stat_etcdctl
- name: Copy etcdctl script to host - name: Copy etcdctl script to host
when: not stat_etcdctl.stat.exists
block: block:
- name: Copy etcdctl script to host - name: Copy etcdctl script to host
shell: "{{ docker_bin_dir }}/docker cp \"$({{ docker_bin_dir }}/docker ps -qf ancestor={{ etcd_image_repo }}:{{ etcd_image_tag }})\":/usr/local/bin/etcdctl {{ etcd_data_dir }}/etcdctl" shell: "{{ docker_bin_dir }}/docker cp \"$({{ docker_bin_dir }}/docker ps -qf ancestor={{ etcd_image_repo }}:{{ etcd_image_tag }})\":/usr/local/bin/etcdctl {{ etcd_data_dir }}/etcdctl"
@ -56,7 +57,6 @@
dest: "{{ bin_dir }}" dest: "{{ bin_dir }}"
remote_src: true remote_src: true
mode: 0755 mode: 0755
when: not stat_etcdctl.stat.exists
- name: Remove binary in etcd data dir - name: Remove binary in etcd data dir
file: file:

View File

@ -54,6 +54,9 @@
- inventory_hostname == groups['kube_control_plane'][0] - inventory_hostname == groups['kube_control_plane'][0]
- name: MetalLB | Address pools - name: MetalLB | Address pools
when:
- inventory_hostname == groups['kube_control_plane'][0]
- metallb_config.address_pools is defined
block: block:
- name: MetalLB | Layout address pools template - name: MetalLB | Layout address pools template
ansible.builtin.template: ansible.builtin.template:
@ -69,11 +72,11 @@
filename: "{{ kube_config_dir }}/pools.yaml" filename: "{{ kube_config_dir }}/pools.yaml"
state: "{{ pools_rendering.changed | ternary('latest', 'present') }}" state: "{{ pools_rendering.changed | ternary('latest', 'present') }}"
become: true become: true
when:
- inventory_hostname == groups['kube_control_plane'][0]
- metallb_config.address_pools is defined
- name: MetalLB | Layer2 - name: MetalLB | Layer2
when:
- inventory_hostname == groups['kube_control_plane'][0]
- metallb_config.layer2 is defined
block: block:
- name: MetalLB | Layout layer2 template - name: MetalLB | Layout layer2 template
ansible.builtin.template: ansible.builtin.template:
@ -89,11 +92,11 @@
filename: "{{ kube_config_dir }}/layer2.yaml" filename: "{{ kube_config_dir }}/layer2.yaml"
state: "{{ layer2_rendering.changed | ternary('latest', 'present') }}" state: "{{ layer2_rendering.changed | ternary('latest', 'present') }}"
become: true become: true
when:
- inventory_hostname == groups['kube_control_plane'][0]
- metallb_config.layer2 is defined
- name: MetalLB | Layer3 - name: MetalLB | Layer3
when:
- inventory_hostname == groups['kube_control_plane'][0]
- metallb_config.layer3 is defined
block: block:
- name: MetalLB | Layout layer3 template - name: MetalLB | Layout layer3 template
ansible.builtin.template: ansible.builtin.template:
@ -109,9 +112,6 @@
filename: "{{ kube_config_dir }}/layer3.yaml" filename: "{{ kube_config_dir }}/layer3.yaml"
state: "{{ layer3_rendering.changed | ternary('latest', 'present') }}" state: "{{ layer3_rendering.changed | ternary('latest', 'present') }}"
become: true become: true
when:
- inventory_hostname == groups['kube_control_plane'][0]
- metallb_config.layer3 is defined
- name: Kubernetes Apps | Delete MetalLB ConfigMap - name: Kubernetes Apps | Delete MetalLB ConfigMap

View File

@ -109,6 +109,12 @@
loop: "{{ kube_apiserver_enable_admission_plugins }}" loop: "{{ kube_apiserver_enable_admission_plugins }}"
- name: Kubeadm | Check apiserver.crt SANs - name: Kubeadm | Check apiserver.crt SANs
vars:
apiserver_ips: "{{ apiserver_sans | map('ipaddr') | reject('equalto', False) | list }}"
apiserver_hosts: "{{ apiserver_sans | difference(apiserver_ips) }}"
when:
- kubeadm_already_run.stat.exists
- not kube_external_ca_mode
block: block:
- name: Kubeadm | Check apiserver.crt SAN IPs - name: Kubeadm | Check apiserver.crt SAN IPs
command: command:
@ -122,12 +128,6 @@
loop: "{{ apiserver_hosts }}" loop: "{{ apiserver_hosts }}"
register: apiserver_sans_host_check register: apiserver_sans_host_check
changed_when: apiserver_sans_host_check.stdout is not search('does match certificate') changed_when: apiserver_sans_host_check.stdout is not search('does match certificate')
vars:
apiserver_ips: "{{ apiserver_sans | map('ipaddr') | reject('equalto', False) | list }}"
apiserver_hosts: "{{ apiserver_sans | difference(apiserver_ips) }}"
when:
- kubeadm_already_run.stat.exists
- not kube_external_ca_mode
- name: Kubeadm | regenerate apiserver cert 1/2 - name: Kubeadm | regenerate apiserver cert 1/2
file: file:

View File

@ -1,5 +1,6 @@
--- ---
- name: Gather cgroups facts for docker - name: Gather cgroups facts for docker
when: container_manager == 'docker'
block: block:
- name: Look up docker cgroup driver - name: Look up docker cgroup driver
shell: "set -o pipefail && docker info | grep 'Cgroup Driver' | awk -F': ' '{ print $2; }'" shell: "set -o pipefail && docker info | grep 'Cgroup Driver' | awk -F': ' '{ print $2; }'"
@ -12,9 +13,9 @@
- name: Set kubelet_cgroup_driver_detected fact for docker - name: Set kubelet_cgroup_driver_detected fact for docker
set_fact: set_fact:
kubelet_cgroup_driver_detected: "{{ docker_cgroup_driver_result.stdout }}" kubelet_cgroup_driver_detected: "{{ docker_cgroup_driver_result.stdout }}"
when: container_manager == 'docker'
- name: Gather cgroups facts for crio - name: Gather cgroups facts for crio
when: container_manager == 'crio'
block: block:
- name: Look up crio cgroup driver - name: Look up crio cgroup driver
shell: "set -o pipefail && {{ bin_dir }}/crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'" shell: "set -o pipefail && {{ bin_dir }}/crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'"
@ -26,13 +27,12 @@
- name: Set kubelet_cgroup_driver_detected fact for crio - name: Set kubelet_cgroup_driver_detected fact for crio
set_fact: set_fact:
kubelet_cgroup_driver_detected: "{{ crio_cgroup_driver_result.stdout }}" kubelet_cgroup_driver_detected: "{{ crio_cgroup_driver_result.stdout }}"
when: container_manager == 'crio'
- name: Set kubelet_cgroup_driver_detected fact for containerd - name: Set kubelet_cgroup_driver_detected fact for containerd
when: container_manager == 'containerd'
set_fact: set_fact:
kubelet_cgroup_driver_detected: >- kubelet_cgroup_driver_detected: >-
{%- if containerd_use_systemd_cgroup -%}systemd{%- else -%}cgroupfs{%- endif -%} {%- if containerd_use_systemd_cgroup -%}systemd{%- else -%}cgroupfs{%- endif -%}
when: container_manager == 'containerd'
- name: Set kubelet_cgroup_driver - name: Set kubelet_cgroup_driver
set_fact: set_fact:

View File

@ -69,6 +69,7 @@
register: resolvconf_stat register: resolvconf_stat
- name: Fetch resolconf - name: Fetch resolconf
when: resolvconf_stat.stat.exists is defined and resolvconf_stat.stat.exists
block: block:
- name: Get content of /etc/resolv.conf - name: Get content of /etc/resolv.conf
@ -81,8 +82,6 @@
configured_nameservers: "{{ resolvconf_slurp.content | b64decode | regex_findall('^nameserver\\s*(.*)', multiline=True) | ipaddr }}" configured_nameservers: "{{ resolvconf_slurp.content | b64decode | regex_findall('^nameserver\\s*(.*)', multiline=True) | ipaddr }}"
when: resolvconf_slurp.content is defined when: resolvconf_slurp.content is defined
when: resolvconf_stat.stat.exists is defined and resolvconf_stat.stat.exists
- name: Stop if /etc/resolv.conf not configured nameservers - name: Stop if /etc/resolv.conf not configured nameservers
assert: assert:
that: configured_nameservers | length>0 that: configured_nameservers | length>0

View File

@ -242,6 +242,8 @@
# TODO: Clean this task up when we drop backward compatibility support for `etcd_kubeadm_enabled` # TODO: Clean this task up when we drop backward compatibility support for `etcd_kubeadm_enabled`
- name: Stop if etcd deployment type is not host or kubeadm when container_manager != docker and etcd_kubeadm_enabled is not defined - name: Stop if etcd deployment type is not host or kubeadm when container_manager != docker and etcd_kubeadm_enabled is not defined
run_once: yes
when: etcd_kubeadm_enabled is defined
block: block:
- name: Warn the user if they are still using `etcd_kubeadm_enabled` - name: Warn the user if they are still using `etcd_kubeadm_enabled`
debug: debug:
@ -257,8 +259,6 @@
It is not possible to use `etcd_kubeadm_enabled` when `etcd_deployment_type` is set to {{ etcd_deployment_type }}. It is not possible to use `etcd_kubeadm_enabled` when `etcd_deployment_type` is set to {{ etcd_deployment_type }}.
Unset the `etcd_kubeadm_enabled` variable and set `etcd_deployment_type` to desired deployment type (`host`, `kubeadm`, `docker`) instead." Unset the `etcd_kubeadm_enabled` variable and set `etcd_deployment_type` to desired deployment type (`host`, `kubeadm`, `docker`) instead."
when: etcd_kubeadm_enabled when: etcd_kubeadm_enabled
run_once: yes
when: etcd_kubeadm_enabled is defined
- name: Stop if download_localhost is enabled but download_run_once is not - name: Stop if download_localhost is enabled but download_run_once is not
assert: assert:

View File

@ -10,6 +10,11 @@
tags: bootstrap-os tags: bootstrap-os
- name: Add debian 10 required repos - name: Add debian 10 required repos
when:
- ansible_distribution == "Debian"
- ansible_distribution_version == "10"
tags:
- bootstrap-os
block: block:
- name: Add Debian Backports apt repo - name: Add Debian Backports apt repo
apt_repository: apt_repository:
@ -26,11 +31,6 @@
dest: "/etc/apt/preferences.d/libseccomp2" dest: "/etc/apt/preferences.d/libseccomp2"
owner: "root" owner: "root"
mode: 0644 mode: 0644
when:
- ansible_distribution == "Debian"
- ansible_distribution_version == "10"
tags:
- bootstrap-os
- name: Update package management cache (APT) - name: Update package management cache (APT)
apt: apt:

View File

@ -1,5 +1,6 @@
--- ---
- name: Hosts | update inventory in hosts file - name: Hosts | update inventory in hosts file
when: populate_inventory_to_hosts_file
block: block:
- name: Hosts | create list from inventory - name: Hosts | create list from inventory
set_fact: set_fact:
@ -26,7 +27,6 @@
unsafe_writes: yes unsafe_writes: yes
marker: "# Ansible inventory hosts {mark}" marker: "# Ansible inventory hosts {mark}"
mode: 0644 mode: 0644
when: populate_inventory_to_hosts_file
- name: Hosts | populate kubernetes loadbalancer address into hosts file - name: Hosts | populate kubernetes loadbalancer address into hosts file
lineinfile: lineinfile:
@ -42,6 +42,7 @@
- loadbalancer_apiserver.address is defined - loadbalancer_apiserver.address is defined
- name: Hosts | Update localhost entries in hosts file - name: Hosts | Update localhost entries in hosts file
when: populate_localhost_entries_to_hosts_file
block: block:
- name: Hosts | Retrieve hosts file content - name: Hosts | Retrieve hosts file content
slurp: slurp:
@ -74,7 +75,6 @@
backup: yes backup: yes
unsafe_writes: yes unsafe_writes: yes
loop: "{{ etc_hosts_localhosts_dict_target | default({}) | dict2items }}" loop: "{{ etc_hosts_localhosts_dict_target | default({}) | dict2items }}"
when: populate_localhost_entries_to_hosts_file
# gather facts to update ansible_fqdn # gather facts to update ansible_fqdn
- name: Update facts - name: Update facts

View File

@ -62,6 +62,9 @@
when: calicoctl_sh_exists.stat.exists when: calicoctl_sh_exists.stat.exists
- name: Check that current calico version is enough for upgrade - name: Check that current calico version is enough for upgrade
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: calicoctl_sh_exists.stat.exists and calico_ready.rc == 0
block: block:
- name: Get current calico version - name: Get current calico version
shell: "set -o pipefail && {{ bin_dir }}/calicoctl.sh version | grep 'Client Version:' | awk '{ print $3}'" shell: "set -o pipefail && {{ bin_dir }}/calicoctl.sh version | grep 'Client Version:' | awk '{ print $3}'"
@ -78,9 +81,6 @@
Your version of calico is not fresh enough for upgrade. Your version of calico is not fresh enough for upgrade.
Minimum version is {{ calico_min_version_required }} supported by the previous kubespray release. Minimum version is {{ calico_min_version_required }} supported by the previous kubespray release.
But current version is {{ calico_version_on_server.stdout }}. But current version is {{ calico_version_on_server.stdout }}.
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: calicoctl_sh_exists.stat.exists and calico_ready.rc == 0
- name: "Check that cluster_id is set if calico_rr enabled" - name: "Check that cluster_id is set if calico_rr enabled"
assert: assert:
@ -121,13 +121,13 @@
delegate_to: "{{ groups['kube_control_plane'][0] }}" delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check ipip and vxlan mode defined correctly" - name: "Check ipip and vxlan mode defined correctly"
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
assert: assert:
that: that:
- "calico_ipip_mode in ['Always', 'CrossSubnet', 'Never']" - "calico_ipip_mode in ['Always', 'CrossSubnet', 'Never']"
- "calico_vxlan_mode in ['Always', 'CrossSubnet', 'Never']" - "calico_vxlan_mode in ['Always', 'CrossSubnet', 'Never']"
msg: "calico inter host encapsulation mode is not 'Always', 'CrossSubnet' or 'Never'" msg: "calico inter host encapsulation mode is not 'Always', 'CrossSubnet' or 'Never'"
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check ipip and vxlan mode if simultaneously enabled" - name: "Check ipip and vxlan mode if simultaneously enabled"
assert: assert:

View File

@ -120,6 +120,9 @@
- enable_dual_stack_networks - enable_dual_stack_networks
- name: Calico | kdd specific configuration - name: Calico | kdd specific configuration
when:
- inventory_hostname in groups['kube_control_plane']
- calico_datastore == "kdd"
block: block:
- name: Calico | Check if extra directory is needed - name: Calico | Check if extra directory is needed
stat: stat:
@ -154,11 +157,10 @@
retries: 5 retries: 5
when: when:
- inventory_hostname == groups['kube_control_plane'][0] - inventory_hostname == groups['kube_control_plane'][0]
when:
- inventory_hostname in groups['kube_control_plane']
- calico_datastore == "kdd"
- name: Calico | Configure Felix - name: Calico | Configure Felix
when:
- inventory_hostname == groups['kube_control_plane'][0]
block: block:
- name: Calico | Get existing FelixConfiguration - name: Calico | Get existing FelixConfiguration
command: "{{ bin_dir }}/calicoctl.sh get felixconfig default -o json" command: "{{ bin_dir }}/calicoctl.sh get felixconfig default -o json"
@ -200,10 +202,10 @@
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _felix_config is string | ternary(_felix_config, _felix_config | to_json) }}" stdin: "{{ _felix_config is string | ternary(_felix_config, _felix_config | to_json) }}"
changed_when: False changed_when: False
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Calico | Configure Calico IP Pool - name: Calico | Configure Calico IP Pool
when:
- inventory_hostname == groups['kube_control_plane'][0]
block: block:
- name: Calico | Get existing calico network pool - name: Calico | Get existing calico network pool
command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }} -o json" command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }} -o json"
@ -240,10 +242,11 @@
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _calico_pool is string | ternary(_calico_pool, _calico_pool | to_json) }}" stdin: "{{ _calico_pool is string | ternary(_calico_pool, _calico_pool | to_json) }}"
changed_when: False changed_when: False
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Calico | Configure Calico IPv6 Pool - name: Calico | Configure Calico IPv6 Pool
when:
- inventory_hostname == groups['kube_control_plane'][0]
- enable_dual_stack_networks | bool
block: block:
- name: Calico | Get existing calico ipv6 network pool - name: Calico | Get existing calico ipv6 network pool
command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }}-ipv6 -o json" command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }}-ipv6 -o json"
@ -280,9 +283,6 @@
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _calico_pool_ipv6 is string | ternary(_calico_pool_ipv6, _calico_pool_ipv6 | to_json) }}" stdin: "{{ _calico_pool_ipv6 is string | ternary(_calico_pool_ipv6, _calico_pool_ipv6 | to_json) }}"
changed_when: False changed_when: False
when:
- inventory_hostname == groups['kube_control_plane'][0]
- enable_dual_stack_networks | bool
- name: Populate Service External IPs - name: Populate Service External IPs
set_fact: set_fact:
@ -305,6 +305,8 @@
run_once: yes run_once: yes
- name: Calico | Configure Calico BGP - name: Calico | Configure Calico BGP
when:
- inventory_hostname == groups['kube_control_plane'][0]
block: block:
- name: Calico | Get existing BGP Configuration - name: Calico | Get existing BGP Configuration
command: "{{ bin_dir }}/calicoctl.sh get bgpconfig default -o json" command: "{{ bin_dir }}/calicoctl.sh get bgpconfig default -o json"
@ -345,8 +347,6 @@
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _bgp_config is string | ternary(_bgp_config, _bgp_config | to_json) }}" stdin: "{{ _bgp_config is string | ternary(_bgp_config, _bgp_config | to_json) }}"
changed_when: False changed_when: False
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Calico | Create calico manifests - name: Calico | Create calico manifests
template: template:

View File

@ -6,6 +6,9 @@
failed_when: false failed_when: false
- name: Gather calico facts - name: Gather calico facts
tags:
- facts
when: calico_cni_config_slurp.content is defined
block: block:
- name: Set fact calico_cni_config from slurped CNI config - name: Set fact calico_cni_config from slurped CNI config
set_fact: set_fact:
@ -16,7 +19,6 @@
when: when:
- "'plugins' in calico_cni_config" - "'plugins' in calico_cni_config"
- "'etcd_endpoints' in calico_cni_config.plugins.0" - "'etcd_endpoints' in calico_cni_config.plugins.0"
when: calico_cni_config_slurp.content is defined
- name: Calico | Get kubelet hostname - name: Calico | Get kubelet hostname
shell: >- shell: >-
@ -43,5 +45,3 @@
paths: paths:
- ../vars - ../vars
skip: true skip: true
tags:
- facts

View File

@ -33,11 +33,11 @@
- not cni_config_slurp.failed - not cni_config_slurp.failed
- name: Kube-router | Set host_subnet variable - name: Kube-router | Set host_subnet variable
set_fact:
host_subnet: "{{ cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | first }}"
when: when:
- cni_config is defined - cni_config is defined
- cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | length > 0 - cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | length > 0
set_fact:
host_subnet: "{{ cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | first }}"
- name: Kube-router | Create cni config - name: Kube-router | Create cni config
template: template:

View File

@ -28,6 +28,7 @@
when: ansible_os_family in ["Debian"] when: ansible_os_family in ["Debian"]
- name: Install macvlan config on RH distros - name: Install macvlan config on RH distros
when: ansible_os_family == "RedHat"
block: block:
- name: Macvlan | Install macvlan script on centos - name: Macvlan | Install macvlan script on centos
copy: copy:
@ -59,9 +60,8 @@
- {src: centos-postup-macvlan.cfg, dst: post-up-mac0 } - {src: centos-postup-macvlan.cfg, dst: post-up-mac0 }
notify: Macvlan | restart network notify: Macvlan | restart network
when: ansible_os_family == "RedHat"
- name: Install macvlan config on Flatcar - name: Install macvlan config on Flatcar
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
block: block:
- name: Macvlan | Install service nat via gateway on Flatcar Container Linux - name: Macvlan | Install service nat via gateway on Flatcar Container Linux
template: template:
@ -88,8 +88,6 @@
- {src: coreos-network-macvlan.cfg, dst: macvlan.network } - {src: coreos-network-macvlan.cfg, dst: macvlan.network }
notify: Macvlan | restart network notify: Macvlan | restart network
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- name: Macvlan | Install cni definition for Macvlan - name: Macvlan | Install cni definition for Macvlan
template: template:
src: 10-macvlan.conf.j2 src: 10-macvlan.conf.j2

View File

@ -47,6 +47,9 @@
{%- endif %} {%- endif %}
- name: Node draining - name: Node draining
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when:
- needs_cordoning
block: block:
- name: Cordon node - name: Cordon node
command: "{{ kubectl }} cordon {{ kube_override_hostname | default(inventory_hostname) }}" command: "{{ kubectl }} cordon {{ kube_override_hostname | default(inventory_hostname) }}"
@ -89,6 +92,10 @@
delay: "{{ drain_retry_delay_seconds }}" delay: "{{ drain_retry_delay_seconds }}"
- name: Drain fallback - name: Drain fallback
when:
- drain_nodes
- drain_fallback_enabled
- result.rc != 0
block: block:
- name: Set facts after regular drain has failed - name: Set facts after regular drain has failed
set_fact: set_fact:
@ -113,10 +120,6 @@
retries: "{{ drain_fallback_retries }}" retries: "{{ drain_fallback_retries }}"
delay: "{{ drain_fallback_retry_delay_seconds }}" delay: "{{ drain_fallback_retry_delay_seconds }}"
changed_when: drain_fallback_result.rc == 0 changed_when: drain_fallback_result.rc == 0
when:
- drain_nodes
- drain_fallback_enabled
- result.rc != 0
rescue: rescue:
- name: Set node back to schedulable - name: Set node back to schedulable
@ -126,6 +129,3 @@
fail: fail:
msg: "Failed to drain node {{ kube_override_hostname | default(inventory_hostname) }}" msg: "Failed to drain node {{ kube_override_hostname | default(inventory_hostname) }}"
when: upgrade_node_fail_if_drain_fails when: upgrade_node_fail_if_drain_fails
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when:
- needs_cordoning

View File

@ -8,6 +8,9 @@
tags: [init, cni] tags: [init, cni]
- name: Apply kube-proxy nodeselector - name: Apply kube-proxy nodeselector
tags: init
when:
- kube_proxy_deployed
block: block:
# Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch" # Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch"
- name: Check current nodeselector for kube-proxy daemonset - name: Check current nodeselector for kube-proxy daemonset
@ -36,6 +39,3 @@
- debug: # noqa name[missing] - debug: # noqa name[missing]
msg: "{{ patch_kube_proxy_state.stderr_lines }}" msg: "{{ patch_kube_proxy_state.stderr_lines }}"
when: patch_kube_proxy_state is not skipped when: patch_kube_proxy_state is not skipped
tags: init
when:
- kube_proxy_deployed

View File

@ -17,6 +17,9 @@
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- name: Check kubelet serving certificates approved with kubelet_csr_approver - name: Check kubelet serving certificates approved with kubelet_csr_approver
when:
- kubelet_rotate_server_certificates | default(false)
- kubelet_csr_approver_enabled | default(kubelet_rotate_server_certificates | default(false))
block: block:
- name: Get certificate signing requests - name: Get certificate signing requests
@ -41,11 +44,11 @@
assert: assert:
that: get_csr_denied_pending.stdout_lines | length == 0 that: get_csr_denied_pending.stdout_lines | length == 0
fail_msg: kubelet_csr_approver is enabled but CSRs are not approved fail_msg: kubelet_csr_approver is enabled but CSRs are not approved
when:
- kubelet_rotate_server_certificates | default(false)
- kubelet_csr_approver_enabled | default(kubelet_rotate_server_certificates | default(false))
- name: Approve kubelet serving certificates - name: Approve kubelet serving certificates
when:
- kubelet_rotate_server_certificates | default(false)
- not (kubelet_csr_approver_enabled | default(kubelet_rotate_server_certificates | default(false)))
block: block:
- name: Get certificate signing requests - name: Get certificate signing requests
@ -67,9 +70,6 @@
- debug: # noqa name[missing] - debug: # noqa name[missing]
msg: "{{ certificate_approve.stdout.split('\n') }}" msg: "{{ certificate_approve.stdout.split('\n') }}"
when:
- kubelet_rotate_server_certificates | default(false)
- not (kubelet_csr_approver_enabled | default(kubelet_rotate_server_certificates | default(false)))
- name: Create test namespace - name: Create test namespace
command: "{{ bin_dir }}/kubectl create namespace test" command: "{{ bin_dir }}/kubectl create namespace test"