project: resolve ansible-lint key-order rule (#10314)

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>
pull/10357/head
Arthur Outhenin-Chalandre 2023-08-10 09:57:27 +02:00 committed by GitHub
parent 2a7c9d27b2
commit d21bfb84ad
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 100 additions and 109 deletions

View File

@ -25,10 +25,6 @@ skip_list:
# We use template in names
- 'name[template]'
# order of keys errors
# (Disabled in June 2023 after ansible upgrade; FIXME)
- 'key-order'
# No changed-when on commands
# (Disabled in June 2023 after ansible upgrade; FIXME)
- 'no-changed-when'

View File

@ -1,5 +1,7 @@
---
- name: Disable firewalld and ufw
when:
- disable_service_firewall is defined and disable_service_firewall
block:
- name: List services
service_facts:
@ -19,6 +21,3 @@
enabled: no
when:
"'ufw.service' in services"
when:
- disable_service_firewall is defined and disable_service_firewall

View File

@ -112,6 +112,7 @@
notify: Restart containerd
- name: Containerd | Configure containerd registries
when: containerd_use_config_path is defined and containerd_use_config_path | bool and containerd_insecure_registries is defined
block:
- name: Containerd Create registry directories
file:
@ -131,7 +132,6 @@
capabilities = ["pull", "resolve", "push"]
skip_verify = true
with_dict: "{{ containerd_insecure_registries }}"
when: containerd_use_config_path is defined and containerd_use_config_path | bool and containerd_insecure_registries is defined
# you can sometimes end up in a state where everything is installed
# but containerd was not started / enabled

View File

@ -73,6 +73,14 @@
- facts
- name: Uninstall containerd
vars:
service_name: containerd.service
when:
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
- container_manager != "containerd"
- docker_installed.matched == 0
- containerd_installed.matched > 0
- ansible_facts.services[service_name]['state'] == 'running'
block:
- name: Drain node
include_role:
@ -91,16 +99,15 @@
name: container-engine/containerd
tasks_from: reset
handlers_from: reset
vars:
service_name: containerd.service
when:
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
- container_manager != "containerd"
- docker_installed.matched == 0
- containerd_installed.matched > 0
- ansible_facts.services[service_name]['state'] == 'running'
- name: Uninstall docker
vars:
service_name: docker.service
when:
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
- container_manager != "docker"
- docker_installed.matched > 0
- ansible_facts.services[service_name]['state'] == 'running'
block:
- name: Drain node
include_role:
@ -118,15 +125,15 @@
import_role:
name: container-engine/docker
tasks_from: reset
vars:
service_name: docker.service
when:
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
- container_manager != "docker"
- docker_installed.matched > 0
- ansible_facts.services[service_name]['state'] == 'running'
- name: Uninstall crio
vars:
service_name: crio.service
when:
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
- container_manager != "crio"
- crio_installed.matched > 0
- ansible_facts.services[service_name]['state'] == 'running'
block:
- name: Drain node
include_role:
@ -144,10 +151,3 @@
import_role:
name: container-engine/cri-o
tasks_from: reset
vars:
service_name: crio.service
when:
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
- container_manager != "crio"
- crio_installed.matched > 0
- ansible_facts.services[service_name]['state'] == 'running'

View File

@ -1,5 +1,7 @@
---
- block:
- tags:
- download
block:
- name: Set default values for flag variables
set_fact:
image_is_cached: false
@ -121,5 +123,3 @@
path: "{{ image_path_final }}"
when:
- not download_keep_remote_cache
tags:
- download

View File

@ -1,5 +1,7 @@
---
- name: "Download_file | download {{ download.dest }}"
tags:
- download
block:
- name: Prep_download | Set a few facts
set_fact:
@ -139,6 +141,3 @@
- name: "Download_file | Extract file archives"
include_tasks: "extract_file.yml"
tags:
- download

View File

@ -15,6 +15,7 @@
register: stat_etcdctl
- name: Remove old etcd binary
when: stat_etcdctl.stat.exists
block:
- name: Check version
command: "{{ bin_dir }}/etcdctl version"
@ -27,7 +28,6 @@
path: "{{ bin_dir }}/etcdctl"
state: absent
when: etcd_version.lstrip('v') not in etcdctl_version.stdout
when: stat_etcdctl.stat.exists
- name: Check if etcdctl still exist after version check
stat:
@ -38,6 +38,7 @@
register: stat_etcdctl
- name: Copy etcdctl script to host
when: not stat_etcdctl.stat.exists
block:
- name: Copy etcdctl script to host
shell: "{{ docker_bin_dir }}/docker cp \"$({{ docker_bin_dir }}/docker ps -qf ancestor={{ etcd_image_repo }}:{{ etcd_image_tag }})\":/usr/local/bin/etcdctl {{ etcd_data_dir }}/etcdctl"
@ -56,7 +57,6 @@
dest: "{{ bin_dir }}"
remote_src: true
mode: 0755
when: not stat_etcdctl.stat.exists
- name: Remove binary in etcd data dir
file:

View File

@ -54,6 +54,9 @@
- inventory_hostname == groups['kube_control_plane'][0]
- name: MetalLB | Address pools
when:
- inventory_hostname == groups['kube_control_plane'][0]
- metallb_config.address_pools is defined
block:
- name: MetalLB | Layout address pools template
ansible.builtin.template:
@ -69,11 +72,11 @@
filename: "{{ kube_config_dir }}/pools.yaml"
state: "{{ pools_rendering.changed | ternary('latest', 'present') }}"
become: true
when:
- inventory_hostname == groups['kube_control_plane'][0]
- metallb_config.address_pools is defined
- name: MetalLB | Layer2
when:
- inventory_hostname == groups['kube_control_plane'][0]
- metallb_config.layer2 is defined
block:
- name: MetalLB | Layout layer2 template
ansible.builtin.template:
@ -89,11 +92,11 @@
filename: "{{ kube_config_dir }}/layer2.yaml"
state: "{{ layer2_rendering.changed | ternary('latest', 'present') }}"
become: true
when:
- inventory_hostname == groups['kube_control_plane'][0]
- metallb_config.layer2 is defined
- name: MetalLB | Layer3
when:
- inventory_hostname == groups['kube_control_plane'][0]
- metallb_config.layer3 is defined
block:
- name: MetalLB | Layout layer3 template
ansible.builtin.template:
@ -109,9 +112,6 @@
filename: "{{ kube_config_dir }}/layer3.yaml"
state: "{{ layer3_rendering.changed | ternary('latest', 'present') }}"
become: true
when:
- inventory_hostname == groups['kube_control_plane'][0]
- metallb_config.layer3 is defined
- name: Kubernetes Apps | Delete MetalLB ConfigMap

View File

@ -109,6 +109,12 @@
loop: "{{ kube_apiserver_enable_admission_plugins }}"
- name: Kubeadm | Check apiserver.crt SANs
vars:
apiserver_ips: "{{ apiserver_sans | map('ipaddr') | reject('equalto', False) | list }}"
apiserver_hosts: "{{ apiserver_sans | difference(apiserver_ips) }}"
when:
- kubeadm_already_run.stat.exists
- not kube_external_ca_mode
block:
- name: Kubeadm | Check apiserver.crt SAN IPs
command:
@ -122,12 +128,6 @@
loop: "{{ apiserver_hosts }}"
register: apiserver_sans_host_check
changed_when: apiserver_sans_host_check.stdout is not search('does match certificate')
vars:
apiserver_ips: "{{ apiserver_sans | map('ipaddr') | reject('equalto', False) | list }}"
apiserver_hosts: "{{ apiserver_sans | difference(apiserver_ips) }}"
when:
- kubeadm_already_run.stat.exists
- not kube_external_ca_mode
- name: Kubeadm | regenerate apiserver cert 1/2
file:

View File

@ -1,5 +1,6 @@
---
- name: Gather cgroups facts for docker
when: container_manager == 'docker'
block:
- name: Look up docker cgroup driver
shell: "set -o pipefail && docker info | grep 'Cgroup Driver' | awk -F': ' '{ print $2; }'"
@ -12,9 +13,9 @@
- name: Set kubelet_cgroup_driver_detected fact for docker
set_fact:
kubelet_cgroup_driver_detected: "{{ docker_cgroup_driver_result.stdout }}"
when: container_manager == 'docker'
- name: Gather cgroups facts for crio
when: container_manager == 'crio'
block:
- name: Look up crio cgroup driver
shell: "set -o pipefail && {{ bin_dir }}/crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'"
@ -26,13 +27,12 @@
- name: Set kubelet_cgroup_driver_detected fact for crio
set_fact:
kubelet_cgroup_driver_detected: "{{ crio_cgroup_driver_result.stdout }}"
when: container_manager == 'crio'
- name: Set kubelet_cgroup_driver_detected fact for containerd
when: container_manager == 'containerd'
set_fact:
kubelet_cgroup_driver_detected: >-
{%- if containerd_use_systemd_cgroup -%}systemd{%- else -%}cgroupfs{%- endif -%}
when: container_manager == 'containerd'
- name: Set kubelet_cgroup_driver
set_fact:

View File

@ -69,6 +69,7 @@
register: resolvconf_stat
- name: Fetch resolconf
when: resolvconf_stat.stat.exists is defined and resolvconf_stat.stat.exists
block:
- name: Get content of /etc/resolv.conf
@ -81,8 +82,6 @@
configured_nameservers: "{{ resolvconf_slurp.content | b64decode | regex_findall('^nameserver\\s*(.*)', multiline=True) | ipaddr }}"
when: resolvconf_slurp.content is defined
when: resolvconf_stat.stat.exists is defined and resolvconf_stat.stat.exists
- name: Stop if /etc/resolv.conf not configured nameservers
assert:
that: configured_nameservers | length>0

View File

@ -242,6 +242,8 @@
# TODO: Clean this task up when we drop backward compatibility support for `etcd_kubeadm_enabled`
- name: Stop if etcd deployment type is not host or kubeadm when container_manager != docker and etcd_kubeadm_enabled is not defined
run_once: yes
when: etcd_kubeadm_enabled is defined
block:
- name: Warn the user if they are still using `etcd_kubeadm_enabled`
debug:
@ -257,8 +259,6 @@
It is not possible to use `etcd_kubeadm_enabled` when `etcd_deployment_type` is set to {{ etcd_deployment_type }}.
Unset the `etcd_kubeadm_enabled` variable and set `etcd_deployment_type` to desired deployment type (`host`, `kubeadm`, `docker`) instead."
when: etcd_kubeadm_enabled
run_once: yes
when: etcd_kubeadm_enabled is defined
- name: Stop if download_localhost is enabled but download_run_once is not
assert:

View File

@ -10,6 +10,11 @@
tags: bootstrap-os
- name: Add debian 10 required repos
when:
- ansible_distribution == "Debian"
- ansible_distribution_version == "10"
tags:
- bootstrap-os
block:
- name: Add Debian Backports apt repo
apt_repository:
@ -26,11 +31,6 @@
dest: "/etc/apt/preferences.d/libseccomp2"
owner: "root"
mode: 0644
when:
- ansible_distribution == "Debian"
- ansible_distribution_version == "10"
tags:
- bootstrap-os
- name: Update package management cache (APT)
apt:

View File

@ -1,5 +1,6 @@
---
- name: Hosts | update inventory in hosts file
when: populate_inventory_to_hosts_file
block:
- name: Hosts | create list from inventory
set_fact:
@ -26,7 +27,6 @@
unsafe_writes: yes
marker: "# Ansible inventory hosts {mark}"
mode: 0644
when: populate_inventory_to_hosts_file
- name: Hosts | populate kubernetes loadbalancer address into hosts file
lineinfile:
@ -42,6 +42,7 @@
- loadbalancer_apiserver.address is defined
- name: Hosts | Update localhost entries in hosts file
when: populate_localhost_entries_to_hosts_file
block:
- name: Hosts | Retrieve hosts file content
slurp:
@ -74,7 +75,6 @@
backup: yes
unsafe_writes: yes
loop: "{{ etc_hosts_localhosts_dict_target | default({}) | dict2items }}"
when: populate_localhost_entries_to_hosts_file
# gather facts to update ansible_fqdn
- name: Update facts

View File

@ -62,6 +62,9 @@
when: calicoctl_sh_exists.stat.exists
- name: Check that current calico version is enough for upgrade
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: calicoctl_sh_exists.stat.exists and calico_ready.rc == 0
block:
- name: Get current calico version
shell: "set -o pipefail && {{ bin_dir }}/calicoctl.sh version | grep 'Client Version:' | awk '{ print $3}'"
@ -78,9 +81,6 @@
Your version of calico is not fresh enough for upgrade.
Minimum version is {{ calico_min_version_required }} supported by the previous kubespray release.
But current version is {{ calico_version_on_server.stdout }}.
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: calicoctl_sh_exists.stat.exists and calico_ready.rc == 0
- name: "Check that cluster_id is set if calico_rr enabled"
assert:
@ -121,13 +121,13 @@
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check ipip and vxlan mode defined correctly"
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
assert:
that:
- "calico_ipip_mode in ['Always', 'CrossSubnet', 'Never']"
- "calico_vxlan_mode in ['Always', 'CrossSubnet', 'Never']"
msg: "calico inter host encapsulation mode is not 'Always', 'CrossSubnet' or 'Never'"
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check ipip and vxlan mode if simultaneously enabled"
assert:

View File

@ -120,6 +120,9 @@
- enable_dual_stack_networks
- name: Calico | kdd specific configuration
when:
- inventory_hostname in groups['kube_control_plane']
- calico_datastore == "kdd"
block:
- name: Calico | Check if extra directory is needed
stat:
@ -154,11 +157,10 @@
retries: 5
when:
- inventory_hostname == groups['kube_control_plane'][0]
when:
- inventory_hostname in groups['kube_control_plane']
- calico_datastore == "kdd"
- name: Calico | Configure Felix
when:
- inventory_hostname == groups['kube_control_plane'][0]
block:
- name: Calico | Get existing FelixConfiguration
command: "{{ bin_dir }}/calicoctl.sh get felixconfig default -o json"
@ -200,10 +202,10 @@
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _felix_config is string | ternary(_felix_config, _felix_config | to_json) }}"
changed_when: False
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Calico | Configure Calico IP Pool
when:
- inventory_hostname == groups['kube_control_plane'][0]
block:
- name: Calico | Get existing calico network pool
command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }} -o json"
@ -240,10 +242,11 @@
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _calico_pool is string | ternary(_calico_pool, _calico_pool | to_json) }}"
changed_when: False
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Calico | Configure Calico IPv6 Pool
when:
- inventory_hostname == groups['kube_control_plane'][0]
- enable_dual_stack_networks | bool
block:
- name: Calico | Get existing calico ipv6 network pool
command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }}-ipv6 -o json"
@ -280,9 +283,6 @@
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _calico_pool_ipv6 is string | ternary(_calico_pool_ipv6, _calico_pool_ipv6 | to_json) }}"
changed_when: False
when:
- inventory_hostname == groups['kube_control_plane'][0]
- enable_dual_stack_networks | bool
- name: Populate Service External IPs
set_fact:
@ -305,6 +305,8 @@
run_once: yes
- name: Calico | Configure Calico BGP
when:
- inventory_hostname == groups['kube_control_plane'][0]
block:
- name: Calico | Get existing BGP Configuration
command: "{{ bin_dir }}/calicoctl.sh get bgpconfig default -o json"
@ -345,8 +347,6 @@
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _bgp_config is string | ternary(_bgp_config, _bgp_config | to_json) }}"
changed_when: False
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Calico | Create calico manifests
template:

View File

@ -6,6 +6,9 @@
failed_when: false
- name: Gather calico facts
tags:
- facts
when: calico_cni_config_slurp.content is defined
block:
- name: Set fact calico_cni_config from slurped CNI config
set_fact:
@ -16,7 +19,6 @@
when:
- "'plugins' in calico_cni_config"
- "'etcd_endpoints' in calico_cni_config.plugins.0"
when: calico_cni_config_slurp.content is defined
- name: Calico | Get kubelet hostname
shell: >-
@ -43,5 +45,3 @@
paths:
- ../vars
skip: true
tags:
- facts

View File

@ -33,11 +33,11 @@
- not cni_config_slurp.failed
- name: Kube-router | Set host_subnet variable
set_fact:
host_subnet: "{{ cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | first }}"
when:
- cni_config is defined
- cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | length > 0
set_fact:
host_subnet: "{{ cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | first }}"
- name: Kube-router | Create cni config
template:

View File

@ -28,6 +28,7 @@
when: ansible_os_family in ["Debian"]
- name: Install macvlan config on RH distros
when: ansible_os_family == "RedHat"
block:
- name: Macvlan | Install macvlan script on centos
copy:
@ -59,9 +60,8 @@
- {src: centos-postup-macvlan.cfg, dst: post-up-mac0 }
notify: Macvlan | restart network
when: ansible_os_family == "RedHat"
- name: Install macvlan config on Flatcar
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
block:
- name: Macvlan | Install service nat via gateway on Flatcar Container Linux
template:
@ -88,8 +88,6 @@
- {src: coreos-network-macvlan.cfg, dst: macvlan.network }
notify: Macvlan | restart network
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- name: Macvlan | Install cni definition for Macvlan
template:
src: 10-macvlan.conf.j2

View File

@ -47,6 +47,9 @@
{%- endif %}
- name: Node draining
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when:
- needs_cordoning
block:
- name: Cordon node
command: "{{ kubectl }} cordon {{ kube_override_hostname | default(inventory_hostname) }}"
@ -89,6 +92,10 @@
delay: "{{ drain_retry_delay_seconds }}"
- name: Drain fallback
when:
- drain_nodes
- drain_fallback_enabled
- result.rc != 0
block:
- name: Set facts after regular drain has failed
set_fact:
@ -113,10 +120,6 @@
retries: "{{ drain_fallback_retries }}"
delay: "{{ drain_fallback_retry_delay_seconds }}"
changed_when: drain_fallback_result.rc == 0
when:
- drain_nodes
- drain_fallback_enabled
- result.rc != 0
rescue:
- name: Set node back to schedulable
@ -126,6 +129,3 @@
fail:
msg: "Failed to drain node {{ kube_override_hostname | default(inventory_hostname) }}"
when: upgrade_node_fail_if_drain_fails
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when:
- needs_cordoning

View File

@ -8,6 +8,9 @@
tags: [init, cni]
- name: Apply kube-proxy nodeselector
tags: init
when:
- kube_proxy_deployed
block:
# Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch"
- name: Check current nodeselector for kube-proxy daemonset
@ -36,6 +39,3 @@
- debug: # noqa name[missing]
msg: "{{ patch_kube_proxy_state.stderr_lines }}"
when: patch_kube_proxy_state is not skipped
tags: init
when:
- kube_proxy_deployed

View File

@ -17,6 +17,9 @@
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- name: Check kubelet serving certificates approved with kubelet_csr_approver
when:
- kubelet_rotate_server_certificates | default(false)
- kubelet_csr_approver_enabled | default(kubelet_rotate_server_certificates | default(false))
block:
- name: Get certificate signing requests
@ -41,11 +44,11 @@
assert:
that: get_csr_denied_pending.stdout_lines | length == 0
fail_msg: kubelet_csr_approver is enabled but CSRs are not approved
when:
- kubelet_rotate_server_certificates | default(false)
- kubelet_csr_approver_enabled | default(kubelet_rotate_server_certificates | default(false))
- name: Approve kubelet serving certificates
when:
- kubelet_rotate_server_certificates | default(false)
- not (kubelet_csr_approver_enabled | default(kubelet_rotate_server_certificates | default(false)))
block:
- name: Get certificate signing requests
@ -67,9 +70,6 @@
- debug: # noqa name[missing]
msg: "{{ certificate_approve.stdout.split('\n') }}"
when:
- kubelet_rotate_server_certificates | default(false)
- not (kubelet_csr_approver_enabled | default(kubelet_rotate_server_certificates | default(false)))
- name: Create test namespace
command: "{{ bin_dir }}/kubectl create namespace test"