From 25cb90bc2d6353ee4d49942c5e03d82187f07fe0 Mon Sep 17 00:00:00 2001 From: Arthur Outhenin-Chalandre Date: Mon, 26 Jun 2023 12:15:45 +0200 Subject: [PATCH] Upgrade ansible (#10190) * project: update all dependencies including ansible Upgrade to ansible 7.x and ansible-core 2.14.x. There seems to be issue with ansible 8/ansible-core 2.15 so we remain on those versions for now. It's quite a big bump already anyway. Signed-off-by: Arthur Outhenin-Chalandre * tests: install aws galaxy collection Signed-off-by: Arthur Outhenin-Chalandre * ansible-lint: disable various rules after ansible upgrade Temporarily disable a bunch of linting action following ansible upgrade. Those should be taken care of separately. Signed-off-by: Arthur Outhenin-Chalandre * project: resolve deprecated-module ansible-lint error Signed-off-by: Arthur Outhenin-Chalandre * project: resolve no-free-form ansible-lint error Signed-off-by: Arthur Outhenin-Chalandre * project: resolve schema[meta] ansible-lint error Signed-off-by: Arthur Outhenin-Chalandre * project: resolve schema[playbook] ansible-lint error Signed-off-by: Arthur Outhenin-Chalandre * project: resolve schema[tasks] ansible-lint error Signed-off-by: Arthur Outhenin-Chalandre * project: resolve risky-file-permissions ansible-lint error Signed-off-by: Arthur Outhenin-Chalandre * project: resolve risky-shell-pipe ansible-lint error Signed-off-by: Arthur Outhenin-Chalandre * project: remove deprecated warn args Signed-off-by: Arthur Outhenin-Chalandre * project: use fqcn for non builtin tasks Signed-off-by: Arthur Outhenin-Chalandre * project: resolve syntax-check[missing-file] for contrib playbook Signed-off-by: Arthur Outhenin-Chalandre * project: use arithmetic inside jinja to fix ansible 6 upgrade Signed-off-by: Arthur Outhenin-Chalandre --------- Signed-off-by: Arthur Outhenin-Chalandre --- .ansible-lint | 35 +++++++++++++++ .gitlab-ci.yml | 1 + .gitlab-ci/lint.yml | 1 + .gitlab-ci/molecule.yml | 1 + .gitlab-ci/vagrant.yml | 1 + CONTRIBUTING.md | 1 + .../dind/roles/dind-cluster/tasks/main.yaml | 2 +- contrib/dind/roles/dind-host/tasks/main.yaml | 2 +- contrib/kvm-setup/kvm-setup.yml | 4 +- .../kvm-setup/roles/kvm-setup/tasks/main.yml | 4 +- .../roles/kvm-setup/tasks/sysctl.yml | 6 +-- contrib/mitogen/mitogen.yml | 7 +-- .../roles/glusterfs/client/meta/main.yml | 6 +-- .../roles/glusterfs/client/tasks/main.yml | 9 ++-- .../glusterfs/client/tasks/setup-RedHat.yml | 8 +++- .../roles/glusterfs/server/meta/main.yml | 6 +-- .../roles/glusterfs/server/tasks/main.yml | 43 +++++++++++++------ .../glusterfs/server/tasks/setup-RedHat.yml | 8 +++- .../heketi/roles/prepare/tasks/main.yml | 2 +- contrib/offline/generate_list.yml | 1 + playbooks/ansible_version.yml | 4 +- playbooks/reset.yml | 15 ++++--- requirements.txt | 10 ++--- roles/bootstrap-os/tasks/bootstrap-centos.yml | 9 ++-- roles/bootstrap-os/tasks/bootstrap-fedora.yml | 2 +- .../bootstrap-os/tasks/bootstrap-opensuse.yml | 6 +-- roles/bootstrap-os/tasks/bootstrap-redhat.yml | 10 ++--- .../container-engine/cri-o/tasks/cleanup.yaml | 2 +- roles/container-engine/cri-o/tasks/main.yaml | 4 +- roles/container-engine/cri-o/tasks/reset.yml | 2 - .../cri-o/tasks/setup-amazon.yaml | 2 +- .../kata-containers/tasks/main.yml | 2 +- roles/download/tasks/download_container.yml | 4 +- roles/download/tasks/download_file.yml | 4 +- roles/etcd/handlers/backup.yml | 7 +-- roles/etcd/handlers/backup_cleanup.yml | 3 +- roles/etcd/tasks/configure.yml | 20 +++++---- roles/etcd/tasks/gen_certs_script.yml | 33 +++++++------- roles/etcd/tasks/gen_nodes_certs_script.yml | 1 - roles/etcd/tasks/join_etcd-events_member.yml | 6 +-- roles/etcd/tasks/join_etcd_member.yml | 6 +-- roles/kubernetes-apps/argocd/tasks/main.yml | 4 +- .../cloud_controller/oci/tasks/main.yml | 2 +- .../cluster_roles/tasks/main.yml | 5 ++- .../control-plane/handlers/main.yml | 24 ++++++++--- roles/kubernetes/node/tasks/facts.yml | 9 ++-- roles/kubernetes/node/tasks/main.yml | 10 ++--- roles/kubernetes/preinstall/handlers/main.yml | 16 +++++-- .../preinstall/tasks/0010-swapoff.yml | 2 +- .../tasks/0063-networkmanager-dns.yml | 6 +-- .../tasks/0080-system-configurations.yml | 14 +++--- .../tasks/0081-ntp-configurations.yml | 2 +- roles/kubernetes/tokens/tasks/gen_tokens.yml | 1 - roles/kubespray-defaults/defaults/main.yaml | 8 ++-- roles/network_plugin/calico/handlers/main.yml | 8 +++- roles/network_plugin/cilium/tasks/install.yml | 2 +- .../kube-router/handlers/main.yml | 8 +++- .../network_plugin/kube-router/tasks/main.yml | 2 +- roles/network_plugin/macvlan/tasks/main.yml | 6 +-- roles/network_plugin/multus/tasks/main.yml | 1 + .../control-plane/tasks/main.yml | 4 +- .../recover_control_plane/etcd/tasks/main.yml | 20 ++++----- .../etcd/tasks/recover_lost_quorum.yml | 20 ++++----- .../remove-etcd-node/tasks/main.yml | 8 ++-- roles/reset/tasks/main.yml | 2 +- run.rc | 1 + scripts/collect-info.yaml | 16 ++++--- .../roles/kubevirt-images/tasks/main.yml | 1 + tests/cloud_playbooks/create-aws.yml | 2 +- tests/cloud_playbooks/create-do.yml | 5 ++- tests/cloud_playbooks/create-gce.yml | 4 +- tests/cloud_playbooks/delete-aws.yml | 4 +- .../roles/packet-ci/tasks/delete-vms.yml | 2 +- tests/cloud_playbooks/upload-logs-gcs.yml | 9 ++-- tests/requirements.txt | 8 ++-- tests/requirements.yml | 4 ++ tests/scripts/testcases_prepare.sh | 1 + tests/testcases/030_check-network.yml | 4 +- tests/testcases/040_check-network-adv.yml | 4 ++ tests/testcases/100_check-k8s-conformance.yml | 1 + .../roles/cluster-dump/tasks/main.yml | 2 +- 81 files changed, 345 insertions(+), 207 deletions(-) create mode 100644 tests/requirements.yml diff --git a/.ansible-lint b/.ansible-lint index d84419e6a..9ea65c48b 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -35,6 +35,41 @@ skip_list: # Roles in kubespray don't need fully qualified collection names # (Disabled in Feb 2023) - 'fqcn-builtins' + + # names should start with an uppercase letter + # (Disabled in June 2023 after ansible upgrade; FIXME) + - 'name[casing]' + + # Everything should be named + # (Disabled in June 2023 after ansible upgrade; FIXME) + - 'name[play]' + - 'name[missing]' + + # templates should only be at the end of 'name' + # (Disabled in June 2023 after ansible upgrade; FIXME) + - 'name[jinja]' + - 'name[template]' + + # order of keys errors + # (Disabled in June 2023 after ansible upgrade; FIXME) + - 'key-order' + + # No changed-when on commands + # (Disabled in June 2023 after ansible upgrade; FIXME) + - 'no-changed-when' + + # Disable galaxy rules + # (Disabled in June 2023 after ansible upgrade; FIXME) + - 'galaxy' + + # Disable run-once check with free strategy + # (Disabled in June 2023 after ansible upgrade; FIXME) + - 'run-once[task]' + + # Disable outdated-tag check + # (Disabled in June 2023 after ansible upgrade; FIXME) + - 'warning[outdated-tag]' exclude_paths: # Generated files - tests/files/custom_cni/cilium.yaml + - venv diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9b805e296..5b6a9e41f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -42,6 +42,7 @@ before_script: - update-alternatives --install /usr/bin/python python /usr/bin/python3 1 - python -m pip uninstall -y ansible ansible-base ansible-core - python -m pip install -r tests/requirements.txt + - ansible-galaxy install -r tests/requirements.yml - mkdir -p /.ssh .job: &job diff --git a/.gitlab-ci/lint.yml b/.gitlab-ci/lint.yml index f063ea085..51560118d 100644 --- a/.gitlab-ci/lint.yml +++ b/.gitlab-ci/lint.yml @@ -71,6 +71,7 @@ tox-inventory-builder: - update-alternatives --install /usr/bin/python python /usr/bin/python3 10 - python -m pip uninstall -y ansible ansible-base ansible-core - python -m pip install -r tests/requirements.txt + - ansible-galaxy install -r tests/requirements.yml script: - pip3 install tox - cd contrib/inventory_builder && tox diff --git a/.gitlab-ci/molecule.yml b/.gitlab-ci/molecule.yml index 736c0ffd7..901f5fc35 100644 --- a/.gitlab-ci/molecule.yml +++ b/.gitlab-ci/molecule.yml @@ -13,6 +13,7 @@ - update-alternatives --install /usr/bin/python python /usr/bin/python3 10 - python -m pip uninstall -y ansible ansible-base ansible-core - python -m pip install -r tests/requirements.txt + - ansible-galaxy install -r tests/requirements.yml - ./tests/scripts/vagrant_clean.sh script: - ./tests/scripts/molecule_run.sh diff --git a/.gitlab-ci/vagrant.yml b/.gitlab-ci/vagrant.yml index e7dbf73ff..dba24bbb2 100644 --- a/.gitlab-ci/vagrant.yml +++ b/.gitlab-ci/vagrant.yml @@ -17,6 +17,7 @@ - update-alternatives --install /usr/bin/python python /usr/bin/python3 10 - python -m pip uninstall -y ansible ansible-base ansible-core - python -m pip install -r tests/requirements.txt + - ansible-galaxy install -r tests/requirements.yml - ./tests/scripts/vagrant_clean.sh script: - ./tests/scripts/testcases_run.sh diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6986c0f14..08f2f9475 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -12,6 +12,7 @@ To install development dependencies you can set up a python virtual env with the virtualenv venv source venv/bin/activate pip install -r tests/requirements.txt +ansible-galaxy install -r tests/requirements.yml ``` #### Linting diff --git a/contrib/dind/roles/dind-cluster/tasks/main.yaml b/contrib/dind/roles/dind-cluster/tasks/main.yaml index 04609bb01..59023df3c 100644 --- a/contrib/dind/roles/dind-cluster/tasks/main.yaml +++ b/contrib/dind/roles/dind-cluster/tasks/main.yaml @@ -67,7 +67,7 @@ mode: 0640 - name: Add my pubkey to "{{ distro_user }}" user authorized keys - authorized_key: + ansible.posix.authorized_key: user: "{{ distro_user }}" state: present key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}" diff --git a/contrib/dind/roles/dind-host/tasks/main.yaml b/contrib/dind/roles/dind-host/tasks/main.yaml index 5b63a6b37..2541a9319 100644 --- a/contrib/dind/roles/dind-host/tasks/main.yaml +++ b/contrib/dind/roles/dind-host/tasks/main.yaml @@ -13,7 +13,7 @@ distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}" - name: Create dind node containers from "containers" inventory section - docker_container: + community.docker.docker_container: image: "{{ distro_image }}" name: "{{ item }}" state: started diff --git a/contrib/kvm-setup/kvm-setup.yml b/contrib/kvm-setup/kvm-setup.yml index 18b720668..0496d78b7 100644 --- a/contrib/kvm-setup/kvm-setup.yml +++ b/contrib/kvm-setup/kvm-setup.yml @@ -3,6 +3,6 @@ gather_facts: False become: yes vars: - - bootstrap_os: none + bootstrap_os: none roles: - - kvm-setup + - { role: kvm-setup } diff --git a/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml b/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml index a033c4ee9..fa89836d4 100644 --- a/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml +++ b/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml @@ -23,8 +23,8 @@ when: ansible_os_family == "Debian" # Create deployment user if required -- include: user.yml +- include_tasks: user.yml when: k8s_deployment_user is defined # Set proper sysctl values -- include: sysctl.yml +- import_tasks: sysctl.yml diff --git a/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml b/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml index d991b10ac..52bc83f09 100644 --- a/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml +++ b/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml @@ -1,6 +1,6 @@ --- - name: Load br_netfilter module - modprobe: + community.general.modprobe: name: br_netfilter state: present register: br_netfilter @@ -25,7 +25,7 @@ - name: Enable net.ipv4.ip_forward in sysctl - sysctl: + ansible.posix.sysctl: name: net.ipv4.ip_forward value: 1 sysctl_file: "{{ sysctl_file_path }}" @@ -33,7 +33,7 @@ reload: yes - name: Set bridge-nf-call-{arptables,iptables} to 0 - sysctl: + ansible.posix.sysctl: name: "{{ item }}" state: present value: 0 diff --git a/contrib/mitogen/mitogen.yml b/contrib/mitogen/mitogen.yml index 4dbd0fb76..7b93faf2f 100644 --- a/contrib/mitogen/mitogen.yml +++ b/contrib/mitogen/mitogen.yml @@ -1,6 +1,6 @@ --- - name: Check ansible version - import_playbook: ansible_version.yml + import_playbook: kubernetes_sigs.kubespray.ansible_version - hosts: localhost strategy: linear @@ -24,6 +24,7 @@ url: "{{ mitogen_url }}" dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz" validate_certs: true + mode: 0644 - name: extract archive unarchive: @@ -31,12 +32,12 @@ dest: "{{ playbook_dir }}/dist/" - name: copy plugin - synchronize: + ansible.posix.synchronize: src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/" dest: "{{ playbook_dir }}/plugins/mitogen" - name: add strategy to ansible.cfg - ini_file: + community.general.ini_file: path: ansible.cfg mode: 0644 section: "{{ item.section | d('defaults') }}" diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/client/meta/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/client/meta/main.yml index 8d3513f02..b7fe4962e 100644 --- a/contrib/network-storage/glusterfs/roles/glusterfs/client/meta/main.yml +++ b/contrib/network-storage/glusterfs/roles/glusterfs/client/meta/main.yml @@ -6,12 +6,12 @@ galaxy_info: description: GlusterFS installation for Linux. company: "Midwestern Mac, LLC" license: "license (BSD, MIT)" - min_ansible_version: 2.0 + min_ansible_version: "2.0" platforms: - name: EL versions: - - 6 - - 7 + - "6" + - "7" - name: Ubuntu versions: - precise diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml index e6c3dacb0..151ea5751 100644 --- a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml +++ b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml @@ -3,14 +3,17 @@ # hyperkube and needs to be installed as part of the system. # Setup/install tasks. -- include: setup-RedHat.yml +- include_tasks: setup-RedHat.yml when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined -- include: setup-Debian.yml +- include_tasks: setup-Debian.yml when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined - name: Ensure Gluster mount directories exist. - file: "path={{ item }} state=directory mode=0775" + file: + path: "{{ item }}" + state: directory + mode: 0775 with_items: - "{{ gluster_mount_dir }}" when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-RedHat.yml b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-RedHat.yml index 86827efcd..d2ee36aa7 100644 --- a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-RedHat.yml +++ b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-RedHat.yml @@ -1,10 +1,14 @@ --- - name: Install Prerequisites - package: name={{ item }} state=present + package: + name: "{{ item }}" + state: present with_items: - "centos-release-gluster{{ glusterfs_default_release }}" - name: Install Packages - package: name={{ item }} state=present + package: + name: "{{ item }}" + state: present with_items: - glusterfs-client diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/meta/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/meta/main.yml index 8d3513f02..b7fe4962e 100644 --- a/contrib/network-storage/glusterfs/roles/glusterfs/server/meta/main.yml +++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/meta/main.yml @@ -6,12 +6,12 @@ galaxy_info: description: GlusterFS installation for Linux. company: "Midwestern Mac, LLC" license: "license (BSD, MIT)" - min_ansible_version: 2.0 + min_ansible_version: "2.0" platforms: - name: EL versions: - - 6 - - 7 + - "6" + - "7" - name: Ubuntu versions: - precise diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml index 0a5859850..db82d5f11 100644 --- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml +++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml @@ -5,39 +5,55 @@ # Install xfs package - name: install xfs Debian - apt: name=xfsprogs state=present + apt: + name: xfsprogs + state: present when: ansible_os_family == "Debian" - name: install xfs RedHat - package: name=xfsprogs state=present + package: + name: xfsprogs + state: present when: ansible_os_family == "RedHat" # Format external volumes in xfs - name: Format volumes in xfs - filesystem: "fstype=xfs dev={{ disk_volume_device_1 }}" + community.general.filesystem: + fstype: xfs + dev: "{{ disk_volume_device_1 }}" # Mount external volumes - name: mounting new xfs filesystem - mount: "name={{ gluster_volume_node_mount_dir }} src={{ disk_volume_device_1 }} fstype=xfs state=mounted" + ansible.posix.mount: + name: "{{ gluster_volume_node_mount_dir }}" + src: "{{ disk_volume_device_1 }}" + fstype: xfs + state: mounted" # Setup/install tasks. -- include: setup-RedHat.yml +- include_tasks: setup-RedHat.yml when: ansible_os_family == 'RedHat' -- include: setup-Debian.yml +- include_tasks: setup-Debian.yml when: ansible_os_family == 'Debian' - name: Ensure GlusterFS is started and enabled at boot. - service: "name={{ glusterfs_daemon }} state=started enabled=yes" + service: + name: "{{ glusterfs_daemon }}" + state: started + enabled: yes - name: Ensure Gluster brick and mount directories exist. - file: "path={{ item }} state=directory mode=0775" + file: + path: "{{ item }}" + state: directory + mode: 0775 with_items: - "{{ gluster_brick_dir }}" - "{{ gluster_mount_dir }}" - name: Configure Gluster volume with replicas - gluster_volume: + gluster.gluster.gluster_volume: state: present name: "{{ gluster_brick_name }}" brick: "{{ gluster_brick_dir }}" @@ -49,7 +65,7 @@ when: groups['gfs-cluster']|length > 1 - name: Configure Gluster volume without replicas - gluster_volume: + gluster.gluster.gluster_volume: state: present name: "{{ gluster_brick_name }}" brick: "{{ gluster_brick_dir }}" @@ -60,7 +76,7 @@ when: groups['gfs-cluster']|length <= 1 - name: Mount glusterfs to retrieve disk size - mount: + ansible.posix.mount: name: "{{ gluster_mount_dir }}" src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster" fstype: glusterfs @@ -69,7 +85,8 @@ when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0] - name: Get Gluster disk size - setup: filter=ansible_mounts + setup: + filter: ansible_mounts register: mounts_data when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0] @@ -86,7 +103,7 @@ when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0] - name: Unmount glusterfs - mount: + ansible.posix.mount: name: "{{ gluster_mount_dir }}" fstype: glusterfs src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster" diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-RedHat.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-RedHat.yml index 9dc8f0bb4..5a4e09ef3 100644 --- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-RedHat.yml +++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-RedHat.yml @@ -1,11 +1,15 @@ --- - name: Install Prerequisites - package: name={{ item }} state=present + package: + name: "{{ item }}" + state: present with_items: - "centos-release-gluster{{ glusterfs_default_release }}" - name: Install Packages - package: name={{ item }} state=present + package: + name: "{{ item }}" + state: present with_items: - glusterfs-server - glusterfs-client diff --git a/contrib/network-storage/heketi/roles/prepare/tasks/main.yml b/contrib/network-storage/heketi/roles/prepare/tasks/main.yml index dad3bae97..20012b120 100644 --- a/contrib/network-storage/heketi/roles/prepare/tasks/main.yml +++ b/contrib/network-storage/heketi/roles/prepare/tasks/main.yml @@ -5,7 +5,7 @@ - "dm_snapshot" - "dm_mirror" - "dm_thin_pool" - modprobe: + community.general.modprobe: name: "{{ item }}" state: "present" diff --git a/contrib/offline/generate_list.yml b/contrib/offline/generate_list.yml index c3458e675..5442425bc 100644 --- a/contrib/offline/generate_list.yml +++ b/contrib/offline/generate_list.yml @@ -14,6 +14,7 @@ - template: src: ./contrib/offline/temp/{{ item }}.list.template dest: ./contrib/offline/temp/{{ item }}.list + mode: 0644 with_items: - files - images diff --git a/playbooks/ansible_version.yml b/playbooks/ansible_version.yml index 84aad69c8..7e8a0df4c 100644 --- a/playbooks/ansible_version.yml +++ b/playbooks/ansible_version.yml @@ -3,8 +3,8 @@ gather_facts: false become: no vars: - minimal_ansible_version: 2.12.0 - maximal_ansible_version: 2.13.0 + minimal_ansible_version: 2.14.0 + maximal_ansible_version: 2.16.0 ansible_connection: local tags: always tasks: diff --git a/playbooks/reset.yml b/playbooks/reset.yml index 6fa9fa3ac..654f0a1b7 100644 --- a/playbooks/reset.yml +++ b/playbooks/reset.yml @@ -17,14 +17,15 @@ - hosts: etcd:k8s_cluster:calico_rr gather_facts: False - vars_prompt: - name: "reset_confirmation" - prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster." - default: "no" - private: no - pre_tasks: - - name: check confirmation + - name: Reset Confirmation + pause: + prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster." + register: reset_confirmation + run_once: True + when: + - not (skip_confirmation | default(false) | bool) + - name: Check confirmation fail: msg: "Reset confirmation failed" when: reset_confirmation != "yes" diff --git a/requirements.txt b/requirements.txt index 3b97a13a5..d2724e99e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,10 @@ -ansible==5.7.1 -ansible-core==2.12.10 -cryptography==3.4.8 +ansible==7.6.0 +ansible-core==2.14.6 +cryptography==41.0.1 jinja2==3.1.2 jmespath==1.0.1 -MarkupSafe==2.1.2 +MarkupSafe==2.1.3 netaddr==0.8.0 pbr==5.11.1 -ruamel.yaml==0.17.21 +ruamel.yaml==0.17.31 ruamel.yaml.clib==0.2.7 diff --git a/roles/bootstrap-os/tasks/bootstrap-centos.yml b/roles/bootstrap-os/tasks/bootstrap-centos.yml index 19555005a..aaab37202 100644 --- a/roles/bootstrap-os/tasks/bootstrap-centos.yml +++ b/roles/bootstrap-os/tasks/bootstrap-centos.yml @@ -5,7 +5,7 @@ filter: ansible_distribution_*version - name: Add proxy to yum.conf or dnf.conf if http_proxy is defined - ini_file: + community.general.ini_file: path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}" section: main option: proxy @@ -21,6 +21,7 @@ get_url: url: https://yum.oracle.com/public-yum-ol7.repo dest: /etc/yum.repos.d/public-yum-ol7.repo + mode: 0644 when: - use_oracle_public_repo|default(true) - '''ID="ol"'' in os_release.stdout_lines' @@ -28,7 +29,7 @@ environment: "{{ proxy_env }}" - name: Enable Oracle Linux repo - ini_file: + community.general.ini_file: dest: /etc/yum.repos.d/public-yum-ol7.repo section: "{{ item }}" option: enabled @@ -53,7 +54,7 @@ - (ansible_distribution_version | float) >= 7.6 - name: Enable Oracle Linux repo - ini_file: + community.general.ini_file: dest: "/etc/yum.repos.d/oracle-linux-ol{{ ansible_distribution_major_version }}.repo" section: "ol{{ ansible_distribution_major_version }}_addons" option: "{{ item.option }}" @@ -69,7 +70,7 @@ - (ansible_distribution_version | float) >= 7.6 - name: Enable Centos extra repo for Oracle Linux - ini_file: + community.general.ini_file: dest: "/etc/yum.repos.d/centos-extras.repo" section: "extras" option: "{{ item.option }}" diff --git a/roles/bootstrap-os/tasks/bootstrap-fedora.yml b/roles/bootstrap-os/tasks/bootstrap-fedora.yml index 161317315..4ce77b44a 100644 --- a/roles/bootstrap-os/tasks/bootstrap-fedora.yml +++ b/roles/bootstrap-os/tasks/bootstrap-fedora.yml @@ -10,7 +10,7 @@ - facts - name: Add proxy to dnf.conf if http_proxy is defined - ini_file: + community.general.ini_file: path: "/etc/dnf/dnf.conf" section: main option: proxy diff --git a/roles/bootstrap-os/tasks/bootstrap-opensuse.yml b/roles/bootstrap-os/tasks/bootstrap-opensuse.yml index c833bfd0d..9b69dcd89 100644 --- a/roles/bootstrap-os/tasks/bootstrap-opensuse.yml +++ b/roles/bootstrap-os/tasks/bootstrap-opensuse.yml @@ -58,7 +58,7 @@ # Without this package, the get_url module fails when trying to handle https - name: Install python-cryptography - zypper: + community.general.zypper: name: python-cryptography state: present update_cache: true @@ -67,7 +67,7 @@ - ansible_distribution_version is version('15.4', '<') - name: Install python3-cryptography - zypper: + community.general.zypper: name: python3-cryptography state: present update_cache: true @@ -77,7 +77,7 @@ # Nerdctl needs some basic packages to get an environment up - name: Install basic dependencies - zypper: + community.general.zypper: name: - iptables - apparmor-parser diff --git a/roles/bootstrap-os/tasks/bootstrap-redhat.yml b/roles/bootstrap-os/tasks/bootstrap-redhat.yml index def816465..c9e53525b 100644 --- a/roles/bootstrap-os/tasks/bootstrap-redhat.yml +++ b/roles/bootstrap-os/tasks/bootstrap-redhat.yml @@ -5,7 +5,7 @@ filter: ansible_distribution_*version - name: Add proxy to yum.conf or dnf.conf if http_proxy is defined - ini_file: + community.general.ini_file: path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}" section: main option: proxy @@ -31,7 +31,7 @@ become: true - name: RHEL subscription Organization ID/Activation Key registration - redhat_subscription: + community.general.redhat_subscription: state: present org_id: "{{ rh_subscription_org_id }}" activationkey: "{{ rh_subscription_activation_key }}" @@ -50,7 +50,7 @@ # this task has no_log set to prevent logging security sensitive information such as subscription passwords - name: RHEL subscription Username/Password registration - redhat_subscription: + community.general.redhat_subscription: state: present username: "{{ rh_subscription_username }}" password: "{{ rh_subscription_password }}" @@ -70,7 +70,7 @@ # container-selinux is in extras repo - name: Enable RHEL 7 repos - rhsm_repository: + community.general.rhsm_repository: name: - "rhel-7-server-rpms" - "rhel-7-server-extras-rpms" @@ -81,7 +81,7 @@ # container-selinux is in appstream repo - name: Enable RHEL 8 repos - rhsm_repository: + community.general.rhsm_repository: name: - "rhel-8-for-*-baseos-rpms" - "rhel-8-for-*-appstream-rpms" diff --git a/roles/container-engine/cri-o/tasks/cleanup.yaml b/roles/container-engine/cri-o/tasks/cleanup.yaml index ab06ca01a..fd2f119af 100644 --- a/roles/container-engine/cri-o/tasks/cleanup.yaml +++ b/roles/container-engine/cri-o/tasks/cleanup.yaml @@ -83,7 +83,7 @@ when: ansible_distribution in ["Amazon"] - name: Disable modular repos for CRI-O - ini_file: + community.general.ini_file: path: "/etc/yum.repos.d/{{ item.repo }}.repo" section: "{{ item.section }}" option: enabled diff --git a/roles/container-engine/cri-o/tasks/main.yaml b/roles/container-engine/cri-o/tasks/main.yaml index d6c7eb099..bdd300b2a 100644 --- a/roles/container-engine/cri-o/tasks/main.yaml +++ b/roles/container-engine/cri-o/tasks/main.yaml @@ -122,7 +122,7 @@ mode: 0755 - name: cri-o | set overlay driver - ini_file: + community.general.ini_file: dest: /etc/containers/storage.conf section: storage option: "{{ item.option }}" @@ -136,7 +136,7 @@ # metacopy=on is available since 4.19 and was backported to RHEL 4.18 kernel - name: cri-o | set metacopy mount options correctly - ini_file: + community.general.ini_file: dest: /etc/containers/storage.conf section: storage.options.overlay option: mountopt diff --git a/roles/container-engine/cri-o/tasks/reset.yml b/roles/container-engine/cri-o/tasks/reset.yml index 9c8c0aac1..460382766 100644 --- a/roles/container-engine/cri-o/tasks/reset.yml +++ b/roles/container-engine/cri-o/tasks/reset.yml @@ -43,8 +43,6 @@ - name: CRI-O | Run yum-clean-metadata command: yum clean metadata - args: - warn: no when: - ansible_os_family == "RedHat" tags: diff --git a/roles/container-engine/cri-o/tasks/setup-amazon.yaml b/roles/container-engine/cri-o/tasks/setup-amazon.yaml index 369036725..843bc2029 100644 --- a/roles/container-engine/cri-o/tasks/setup-amazon.yaml +++ b/roles/container-engine/cri-o/tasks/setup-amazon.yaml @@ -14,7 +14,7 @@ - amzn2_extras_file_stat.stat.exists - name: Remove docker repository - ini_file: + community.general.ini_file: dest: /etc/yum.repos.d/amzn2-extras.repo section: amzn2extra-docker option: enabled diff --git a/roles/container-engine/kata-containers/tasks/main.yml b/roles/container-engine/kata-containers/tasks/main.yml index e5b7cd8db..9d1bf9126 100644 --- a/roles/container-engine/kata-containers/tasks/main.yml +++ b/roles/container-engine/kata-containers/tasks/main.yml @@ -36,7 +36,7 @@ - qemu - name: kata-containers | Load vhost kernel modules - modprobe: + community.general.modprobe: state: present name: "{{ item }}" with_items: diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml index 39e0e34c2..e956b6ff2 100644 --- a/roles/download/tasks/download_container.yml +++ b/roles/download/tasks/download_container.yml @@ -80,7 +80,7 @@ - download_run_once - name: download_container | Copy image to ansible host cache - synchronize: + ansible.posix.synchronize: src: "{{ image_path_final }}" dest: "{{ image_path_cached }}" use_ssh_args: true @@ -92,7 +92,7 @@ - download_delegate == inventory_hostname - name: download_container | Upload image to node if it is cached - synchronize: + ansible.posix.synchronize: src: "{{ image_path_cached }}" dest: "{{ image_path_final }}" use_ssh_args: true diff --git a/roles/download/tasks/download_file.yml b/roles/download/tasks/download_file.yml index e6576f29e..fba76405e 100644 --- a/roles/download/tasks/download_file.yml +++ b/roles/download/tasks/download_file.yml @@ -105,7 +105,7 @@ no_log: "{{ not (unsafe_show_logs|bool) }}" - name: download_file | Copy file back to ansible host file cache - synchronize: + ansible.posix.synchronize: src: "{{ file_path_cached }}" dest: "{{ file_path_cached }}" use_ssh_args: true @@ -116,7 +116,7 @@ - download_delegate == inventory_hostname - name: download_file | Copy file from cache to nodes, if it is available - synchronize: + ansible.posix.synchronize: src: "{{ file_path_cached }}" dest: "{{ download.dest }}" use_ssh_args: true diff --git a/roles/etcd/handlers/backup.yml b/roles/etcd/handlers/backup.yml index d848cdbcb..2c5577862 100644 --- a/roles/etcd/handlers/backup.yml +++ b/roles/etcd/handlers/backup.yml @@ -11,7 +11,8 @@ when: etcd_cluster_is_healthy.rc == 0 - name: Refresh Time Fact - setup: filter=ansible_date_time + setup: + filter: ansible_date_time - name: Set Backup Directory set_fact: @@ -40,7 +41,7 @@ --data-dir {{ etcd_data_dir }} --backup-dir {{ etcd_backup_directory }} environment: - ETCDCTL_API: 2 + ETCDCTL_API: "2" retries: 3 register: backup_v2_command until: backup_v2_command.rc == 0 @@ -51,7 +52,7 @@ {{ bin_dir }}/etcdctl snapshot save {{ etcd_backup_directory }}/snapshot.db environment: - ETCDCTL_API: 3 + ETCDCTL_API: "3" ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses.split(',') | first }}" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" diff --git a/roles/etcd/handlers/backup_cleanup.yml b/roles/etcd/handlers/backup_cleanup.yml index e670f46f8..3cebfd046 100644 --- a/roles/etcd/handlers/backup_cleanup.yml +++ b/roles/etcd/handlers/backup_cleanup.yml @@ -7,5 +7,6 @@ - name: Remove old etcd backups shell: chdir: "{{ etcd_backup_prefix }}" - cmd: "find . -name 'etcd-*' -type d | sort -n | head -n -{{ etcd_backup_retention_count }} | xargs rm -rf" + cmd: "set -o pipefail && find . -name 'etcd-*' -type d | sort -n | head -n -{{ etcd_backup_retention_count }} | xargs rm -rf" + executable: /bin/bash when: etcd_backup_retention_count >= 0 diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml index 7534e4176..3fdedccac 100644 --- a/roles/etcd/tasks/configure.yml +++ b/roles/etcd/tasks/configure.yml @@ -8,11 +8,13 @@ changed_when: false check_mode: no run_once: yes - when: is_etcd_master and etcd_cluster_setup + when: + - is_etcd_master + - etcd_cluster_setup tags: - facts environment: - ETCDCTL_API: 3 + ETCDCTL_API: "3" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" @@ -27,11 +29,13 @@ changed_when: false check_mode: no run_once: yes - when: is_etcd_master and etcd_events_cluster_setup + when: + - is_etcd_master + - etcd_events_cluster_setup tags: - facts environment: - ETCDCTL_API: 3 + ETCDCTL_API: "3" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" @@ -96,7 +100,7 @@ tags: - facts environment: - ETCDCTL_API: 3 + ETCDCTL_API: "3" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" @@ -119,7 +123,7 @@ tags: - facts environment: - ETCDCTL_API: 3 + ETCDCTL_API: "3" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" @@ -135,7 +139,7 @@ tags: - facts environment: - ETCDCTL_API: 3 + ETCDCTL_API: "3" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" @@ -151,7 +155,7 @@ tags: - facts environment: - ETCDCTL_API: 3 + ETCDCTL_API: "3" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml index 9f01b1ffb..7beda4d78 100644 --- a/roles/etcd/tasks/gen_certs_script.yml +++ b/roles/etcd/tasks/gen_certs_script.yml @@ -41,16 +41,18 @@ - name: Gen_certs | run cert generation script for etcd and kube control plane nodes command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}" environment: - - MASTERS: "{% for m in groups['etcd'] %} - {% if gen_master_certs[m] %} - {{ m }} - {% endif %} - {% endfor %}" - - HOSTS: "{% for h in groups['kube_control_plane'] %} - {% if gen_node_certs[h] %} - {{ h }} - {% endif %} - {% endfor %}" + MASTERS: |- + {% for m in groups['etcd'] %} + {% if gen_master_certs[m] %} + {{ m }} + {% endif %} + {% endfor %} + HOSTS: |- + {% for h in groups['kube_control_plane'] %} + {% if gen_node_certs[h] %} + {{ h }} + {% endif %} + {% endfor %} run_once: yes delegate_to: "{{ groups['etcd'][0] }}" when: gen_certs|default(false) @@ -59,11 +61,12 @@ - name: Gen_certs | run cert generation script for all clients command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}" environment: - - HOSTS: "{% for h in groups['k8s_cluster'] %} - {% if gen_node_certs[h] %} - {{ h }} - {% endif %} - {% endfor %}" + HOSTS: |- + {% for h in groups['k8s_cluster'] %} + {% if gen_node_certs[h] %} + {{ h }} + {% endif %} + {% endfor %} run_once: yes delegate_to: "{{ groups['etcd'][0] }}" when: diff --git a/roles/etcd/tasks/gen_nodes_certs_script.yml b/roles/etcd/tasks/gen_nodes_certs_script.yml index d176e01aa..73e64c29f 100644 --- a/roles/etcd/tasks/gen_nodes_certs_script.yml +++ b/roles/etcd/tasks/gen_nodes_certs_script.yml @@ -17,7 +17,6 @@ shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs|join(' ') }} | base64 --wrap=0" args: executable: /bin/bash - warn: false no_log: "{{ not (unsafe_show_logs|bool) }}" register: etcd_node_certs check_mode: no diff --git a/roles/etcd/tasks/join_etcd-events_member.yml b/roles/etcd/tasks/join_etcd-events_member.yml index 8336f1a40..4bdd225fb 100644 --- a/roles/etcd/tasks/join_etcd-events_member.yml +++ b/roles/etcd/tasks/join_etcd-events_member.yml @@ -1,12 +1,12 @@ --- - name: Join Member | Add member to etcd-events cluster # noqa 301 305 - shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}" + command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}" register: member_add_result until: member_add_result.rc == 0 retries: "{{ etcd_retries }}" delay: "{{ retry_stagger | random + 3 }}" environment: - ETCDCTL_API: 3 + ETCDCTL_API: "3" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" @@ -34,7 +34,7 @@ tags: - facts environment: - ETCDCTL_API: 3 + ETCDCTL_API: "3" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" diff --git a/roles/etcd/tasks/join_etcd_member.yml b/roles/etcd/tasks/join_etcd_member.yml index 22440394f..6bc28f861 100644 --- a/roles/etcd/tasks/join_etcd_member.yml +++ b/roles/etcd/tasks/join_etcd_member.yml @@ -1,13 +1,13 @@ --- - name: Join Member | Add member to etcd cluster # noqa 301 305 - shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}" + command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}" register: member_add_result until: member_add_result.rc == 0 or 'Peer URLs already exists' in member_add_result.stderr failed_when: member_add_result.rc != 0 and 'Peer URLs already exists' not in member_add_result.stderr retries: "{{ etcd_retries }}" delay: "{{ retry_stagger | random + 3 }}" environment: - ETCDCTL_API: 3 + ETCDCTL_API: "3" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" @@ -38,7 +38,7 @@ tags: - facts environment: - ETCDCTL_API: 3 + ETCDCTL_API: "3" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" diff --git a/roles/kubernetes-apps/argocd/tasks/main.yml b/roles/kubernetes-apps/argocd/tasks/main.yml index dd66d7375..e11f0976b 100644 --- a/roles/kubernetes-apps/argocd/tasks/main.yml +++ b/roles/kubernetes-apps/argocd/tasks/main.yml @@ -5,7 +5,7 @@ download: "{{ download_defaults | combine(downloads.yq) }}" - name: Kubernetes Apps | Copy yq binary from download dir - synchronize: + ansible.posix.synchronize: src: "{{ downloads.yq.dest }}" dest: "{{ bin_dir }}/yq" compress: no @@ -46,7 +46,7 @@ - "inventory_hostname == groups['kube_control_plane'][0]" - name: Kubernetes Apps | Copy ArgoCD remote manifests from download dir - synchronize: + ansible.posix.synchronize: src: "{{ local_release_dir }}/{{ item.file }}" dest: "{{ kube_config_dir }}/{{ item.file }}" compress: no diff --git a/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml b/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml index 2224ae5f1..528519bee 100644 --- a/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml +++ b/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml @@ -1,6 +1,6 @@ --- -- include: credentials-check.yml +- import_tasks: credentials-check.yml - name: "OCI Cloud Controller | Generate Cloud Provider Configuration" template: diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml index f0e07018c..643c0ce45 100644 --- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml +++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml @@ -66,7 +66,10 @@ - cloud_provider == 'oci' - name: PriorityClass | Copy k8s-cluster-critical-pc.yml file - copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml mode=0640 + copy: + src: k8s-cluster-critical-pc.yml + dest: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml" + mode: 0640 when: inventory_hostname == groups['kube_control_plane']|last - name: PriorityClass | Create k8s-cluster-critical diff --git a/roles/kubernetes/control-plane/handlers/main.yml b/roles/kubernetes/control-plane/handlers/main.yml index e6bc321e2..d5f17963f 100644 --- a/roles/kubernetes/control-plane/handlers/main.yml +++ b/roles/kubernetes/control-plane/handlers/main.yml @@ -44,7 +44,9 @@ state: restarted - name: Master | Remove apiserver container docker - shell: docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f + shell: "set -o pipefail && docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f" + args: + executable: /bin/bash register: remove_apiserver_container retries: 10 until: remove_apiserver_container.rc == 0 @@ -52,7 +54,9 @@ when: container_manager == "docker" - name: Master | Remove apiserver container containerd/crio - shell: "{{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" + shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" + args: + executable: /bin/bash register: remove_apiserver_container retries: 10 until: remove_apiserver_container.rc == 0 @@ -60,7 +64,9 @@ when: container_manager in ['containerd', 'crio'] - name: Master | Remove scheduler container docker - shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" + shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" + args: + executable: /bin/bash register: remove_scheduler_container retries: 10 until: remove_scheduler_container.rc == 0 @@ -68,7 +74,9 @@ when: container_manager == "docker" - name: Master | Remove scheduler container containerd/crio - shell: "{{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" + shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" + args: + executable: /bin/bash register: remove_scheduler_container retries: 10 until: remove_scheduler_container.rc == 0 @@ -76,7 +84,9 @@ when: container_manager in ['containerd', 'crio'] - name: Master | Remove controller manager container docker - shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" + shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" + args: + executable: /bin/bash register: remove_cm_container retries: 10 until: remove_cm_container.rc == 0 @@ -84,7 +94,9 @@ when: container_manager == "docker" - name: Master | Remove controller manager container containerd/crio - shell: "{{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" + shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" + args: + executable: /bin/bash register: remove_cm_container retries: 10 until: remove_cm_container.rc == 0 diff --git a/roles/kubernetes/node/tasks/facts.yml b/roles/kubernetes/node/tasks/facts.yml index 97d52e8c3..d68d5bdde 100644 --- a/roles/kubernetes/node/tasks/facts.yml +++ b/roles/kubernetes/node/tasks/facts.yml @@ -1,7 +1,9 @@ --- - block: - name: look up docker cgroup driver - shell: "docker info | grep 'Cgroup Driver' | awk -F': ' '{ print $2; }'" + shell: "set -o pipefail && docker info | grep 'Cgroup Driver' | awk -F': ' '{ print $2; }'" + args: + executable: /bin/bash register: docker_cgroup_driver_result changed_when: false check_mode: no @@ -13,7 +15,9 @@ - block: - name: look up crio cgroup driver - shell: "{{ bin_dir }}/crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'" + shell: "set -o pipefail && {{ bin_dir }}/crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'" + args: + executable: /bin/bash register: crio_cgroup_driver_result changed_when: false @@ -40,7 +44,6 @@ when: kubelet_cgroup_driver == 'cgroupfs' - name: set kubelet_config_extra_args options when cgroupfs is used - vars: set_fact: kubelet_config_extra_args: "{{ kubelet_config_extra_args | combine(kubelet_config_extra_args_cgroupfs) }}" when: kubelet_cgroup_driver == 'cgroupfs' diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index 59dc3007a..99babd64f 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -41,7 +41,7 @@ - haproxy - name: Ensure nodePort range is reserved - sysctl: + ansible.posix.sysctl: name: net.ipv4.ip_local_reserved_ports value: "{{ kube_apiserver_node_port_range }}" sysctl_set: yes @@ -68,7 +68,7 @@ mode: 0755 - name: Enable br_netfilter module - modprobe: + community.general.modprobe: name: br_netfilter state: present when: modinfo_br_netfilter.rc == 0 @@ -89,7 +89,7 @@ register: sysctl_bridge_nf_call_iptables - name: Enable bridge-nf-call tables - sysctl: + ansible.posix.sysctl: name: "{{ item }}" state: present sysctl_file: "{{ sysctl_file_path }}" @@ -102,7 +102,7 @@ - net.bridge.bridge-nf-call-ip6tables - name: Modprobe Kernel Module for IPVS - modprobe: + community.general.modprobe: name: "{{ item }}" state: present with_items: @@ -115,7 +115,7 @@ - kube-proxy - name: Modprobe nf_conntrack_ipv4 - modprobe: + community.general.modprobe: name: nf_conntrack_ipv4 state: present register: modprobe_nf_conntrack_ipv4 diff --git a/roles/kubernetes/preinstall/handlers/main.yml b/roles/kubernetes/preinstall/handlers/main.yml index 0212530f4..7cb0c3185 100644 --- a/roles/kubernetes/preinstall/handlers/main.yml +++ b/roles/kubernetes/preinstall/handlers/main.yml @@ -68,7 +68,9 @@ when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf' - name: Preinstall | restart kube-controller-manager docker - shell: "{{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" + shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" + args: + executable: /bin/bash when: - container_manager == "docker" - inventory_hostname in groups['kube_control_plane'] @@ -77,7 +79,9 @@ - kube_controller_set.stat.exists - name: Preinstall | restart kube-controller-manager crio/containerd - shell: "{{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" + shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" + args: + executable: /bin/bash register: preinstall_restart_controller_manager retries: 10 delay: 1 @@ -90,7 +94,9 @@ - kube_controller_set.stat.exists - name: Preinstall | restart kube-apiserver docker - shell: "{{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-apiserver* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" + shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-apiserver* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" + args: + executable: /bin/bash when: - container_manager == "docker" - inventory_hostname in groups['kube_control_plane'] @@ -99,7 +105,9 @@ - kube_apiserver_set.stat.exists - name: Preinstall | restart kube-apiserver crio/containerd - shell: "{{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" + shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" + args: + executable: /bin/bash register: preinstall_restart_apiserver retries: 10 until: preinstall_restart_apiserver.rc == 0 diff --git a/roles/kubernetes/preinstall/tasks/0010-swapoff.yml b/roles/kubernetes/preinstall/tasks/0010-swapoff.yml index 997a18c85..ce574f86c 100644 --- a/roles/kubernetes/preinstall/tasks/0010-swapoff.yml +++ b/roles/kubernetes/preinstall/tasks/0010-swapoff.yml @@ -1,6 +1,6 @@ --- - name: Remove swapfile from /etc/fstab - mount: + ansible.posix.mount: name: "{{ item }}" fstype: swap state: absent diff --git a/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml b/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml index 7249ac898..9ad5f7d10 100644 --- a/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml +++ b/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml @@ -1,6 +1,6 @@ --- - name: NetworkManager | Add nameservers to NM configuration - ini_file: + community.general.ini_file: path: /etc/NetworkManager/conf.d/dns.conf section: global-dns-domain-* option: servers @@ -15,7 +15,7 @@ when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0) - name: NetworkManager | Add DNS search to NM configuration - ini_file: + community.general.ini_file: path: /etc/NetworkManager/conf.d/dns.conf section: global-dns option: searches @@ -25,7 +25,7 @@ notify: Preinstall | update resolvconf for networkmanager - name: NetworkManager | Add DNS options to NM configuration - ini_file: + community.general.ini_file: path: /etc/NetworkManager/conf.d/dns.conf section: global-dns option: options diff --git a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml index d4fa45b8b..2bf552351 100644 --- a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml +++ b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml @@ -12,7 +12,7 @@ register: slc - name: Set selinux policy - selinux: + ansible.posix.selinux: policy: targeted state: "{{ preinstall_selinux_state }}" when: @@ -71,7 +71,7 @@ mode: 0755 - name: Enable ip forwarding - sysctl: + ansible.posix.sysctl: sysctl_file: "{{ sysctl_file_path }}" name: net.ipv4.ip_forward value: "1" @@ -79,7 +79,7 @@ reload: yes - name: Enable ipv6 forwarding - sysctl: + ansible.posix.sysctl: sysctl_file: "{{ sysctl_file_path }}" name: net.ipv6.conf.all.forwarding value: "1" @@ -97,7 +97,7 @@ ignore_errors: true # noqa ignore-errors - name: Set fs.may_detach_mounts if needed - sysctl: + ansible.posix.sysctl: sysctl_file: "{{ sysctl_file_path }}" name: fs.may_detach_mounts value: 1 @@ -106,7 +106,7 @@ when: fs_may_detach_mounts.stat.exists | d(false) - name: Ensure kube-bench parameters are set - sysctl: + ansible.posix.sysctl: sysctl_file: "{{ sysctl_file_path }}" name: "{{ item.name }}" value: "{{ item.value }}" @@ -122,14 +122,14 @@ when: kubelet_protect_kernel_defaults|bool - name: Check dummy module - modprobe: + community.general.modprobe: name: dummy state: present params: 'numdummies=0' when: enable_nodelocaldns - name: Set additional sysctl variables - sysctl: + ansible.posix.sysctl: sysctl_file: "{{ sysctl_file_path }}" name: "{{ item.name }}" value: "{{ item.value }}" diff --git a/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml b/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml index b6c6b2e95..c2e42366d 100644 --- a/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml +++ b/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml @@ -78,7 +78,7 @@ - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] - name: Set timezone - timezone: + community.general.timezone: name: "{{ ntp_timezone }}" when: - ntp_timezone diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml index aa1cf214a..e80e56d6f 100644 --- a/roles/kubernetes/tokens/tasks/gen_tokens.yml +++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml @@ -45,7 +45,6 @@ - name: Gen_tokens | Gather tokens shell: "set -o pipefail && tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0" args: - warn: false executable: /bin/bash register: tokens_data check_mode: no diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 333446e60..ec5b8e6a3 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -33,13 +33,13 @@ kubeadm_init_timeout: 300s kubeadm_init_phases_skip_default: [ "addon/coredns" ] kubeadm_init_phases_skip: >- {%- if kube_network_plugin == 'kube-router' and (kube_router_run_service_proxy is defined and kube_router_run_service_proxy) -%} - {{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ] + {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }} {%- elif kube_network_plugin == 'cilium' and (cilium_kube_proxy_replacement is defined and cilium_kube_proxy_replacement == 'strict') -%} - {{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ] + {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }} {%- elif kube_network_plugin == 'calico' and (calico_bpf_enabled is defined and calico_bpf_enabled) -%} - {{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ] + {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }} {%- elif kube_proxy_remove is defined and kube_proxy_remove -%} - {{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ] + {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }} {%- else -%} {{ kubeadm_init_phases_skip_default }} {%- endif -%} diff --git a/roles/network_plugin/calico/handlers/main.yml b/roles/network_plugin/calico/handlers/main.yml index b4b7af860..fbcae3a24 100644 --- a/roles/network_plugin/calico/handlers/main.yml +++ b/roles/network_plugin/calico/handlers/main.yml @@ -13,14 +13,18 @@ state: absent - name: Calico | delete calico-node docker containers - shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_POD_calico-node* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" + shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_POD_calico-node* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" + args: + executable: /bin/bash register: docker_calico_node_remove until: docker_calico_node_remove is succeeded retries: 5 when: container_manager in ["docker"] - name: Calico | delete calico-node crio/containerd containers - shell: '{{ bin_dir }}/crictl pods --name calico-node-* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"' + shell: 'set -o pipefail && {{ bin_dir }}/crictl pods --name calico-node-* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"' + args: + executable: /bin/bash register: crictl_calico_node_remove until: crictl_calico_node_remove is succeeded retries: 5 diff --git a/roles/network_plugin/cilium/tasks/install.yml b/roles/network_plugin/cilium/tasks/install.yml index 06e054a28..7678e7d4a 100644 --- a/roles/network_plugin/cilium/tasks/install.yml +++ b/roles/network_plugin/cilium/tasks/install.yml @@ -1,6 +1,6 @@ --- - name: Cilium | Ensure BPFFS mounted - mount: + ansible.posix.mount: fstype: bpf path: /sys/fs/bpf src: bpffs diff --git a/roles/network_plugin/kube-router/handlers/main.yml b/roles/network_plugin/kube-router/handlers/main.yml index 7bdfc5d42..c0ddb33ad 100644 --- a/roles/network_plugin/kube-router/handlers/main.yml +++ b/roles/network_plugin/kube-router/handlers/main.yml @@ -6,14 +6,18 @@ - Kube-router | delete kube-router crio/containerd containers - name: Kube-router | delete kube-router docker containers - shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_POD_kube-router* -q | xargs --no-run-if-empty docker rm -f" + shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_POD_kube-router* -q | xargs --no-run-if-empty docker rm -f" + args: + executable: /bin/bash register: docker_kube_router_remove until: docker_kube_router_remove is succeeded retries: 5 when: container_manager in ["docker"] - name: Kube-router | delete kube-router crio/containerd containers - shell: '{{ bin_dir }}/crictl pods --name kube-router* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"' + shell: 'set -o pipefail && {{ bin_dir }}/crictl pods --name kube-router* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"' + args: + executable: /bin/bash register: crictl_kube_router_remove until: crictl_kube_router_remove is succeeded retries: 5 diff --git a/roles/network_plugin/kube-router/tasks/main.yml b/roles/network_plugin/kube-router/tasks/main.yml index 4cc078ae7..23b3af964 100644 --- a/roles/network_plugin/kube-router/tasks/main.yml +++ b/roles/network_plugin/kube-router/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: kube-router | Create annotations - include: annotate.yml + import_tasks: annotate.yml tags: annotate - name: kube-router | Create config directory diff --git a/roles/network_plugin/macvlan/tasks/main.yml b/roles/network_plugin/macvlan/tasks/main.yml index bdc2dbc26..2b486cce2 100644 --- a/roles/network_plugin/macvlan/tasks/main.yml +++ b/roles/network_plugin/macvlan/tasks/main.yml @@ -7,7 +7,7 @@ - name: Macvlan | set node_pod_cidr set_fact: - node_pod_cidr={{ node_pod_cidr_cmd.stdout }} + node_pod_cidr: "{{ node_pod_cidr_cmd.stdout }}" - name: Macvlan | Retrieve default gateway network interface become: false @@ -17,7 +17,7 @@ - name: Macvlan | set node_default_gateway_interface set_fact: - node_default_gateway_interface={{ node_default_gateway_interface_cmd.stdout | trim }} + node_default_gateway_interface: "{{ node_default_gateway_interface_cmd.stdout | trim }}" - name: Macvlan | Install network gateway interface on debian template: @@ -101,7 +101,7 @@ mode: 0644 - name: Enable net.ipv4.conf.all.arp_notify in sysctl - sysctl: + ansible.posix.sysctl: name: net.ipv4.conf.all.arp_notify value: 1 sysctl_set: yes diff --git a/roles/network_plugin/multus/tasks/main.yml b/roles/network_plugin/multus/tasks/main.yml index 9bf1842be..ab76268a5 100644 --- a/roles/network_plugin/multus/tasks/main.yml +++ b/roles/network_plugin/multus/tasks/main.yml @@ -20,6 +20,7 @@ template: src: multus-daemonset.yml.j2 dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 with_items: - {name: multus-daemonset-containerd, file: multus-daemonset-containerd.yml, type: daemonset, engine: containerd } - {name: multus-daemonset-docker, file: multus-daemonset-docker.yml, type: daemonset, engine: docker } diff --git a/roles/recover_control_plane/control-plane/tasks/main.yml b/roles/recover_control_plane/control-plane/tasks/main.yml index 4a4e3eb7e..ec50f3ffd 100644 --- a/roles/recover_control_plane/control-plane/tasks/main.yml +++ b/roles/recover_control_plane/control-plane/tasks/main.yml @@ -2,7 +2,7 @@ - name: Wait for apiserver command: "{{ kubectl }} get nodes" environment: - - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" + KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" register: apiserver_is_ready until: apiserver_is_ready.rc == 0 retries: 6 @@ -13,7 +13,7 @@ - name: Delete broken kube_control_plane nodes from cluster command: "{{ kubectl }} delete node {{ item }}" environment: - - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" + KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" with_items: "{{ groups['broken_kube_control_plane'] }}" register: delete_broken_kube_masters failed_when: false diff --git a/roles/recover_control_plane/etcd/tasks/main.yml b/roles/recover_control_plane/etcd/tasks/main.yml index 45e2c65e4..1944f50d2 100644 --- a/roles/recover_control_plane/etcd/tasks/main.yml +++ b/roles/recover_control_plane/etcd/tasks/main.yml @@ -6,25 +6,25 @@ changed_when: false check_mode: no environment: - ETCDCTL_API: 3 + ETCDCTL_API: "3" ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" when: - - groups['broken_etcd'] + - inventory_hostname in groups['broken_etcd'] - name: Set healthy fact set_fact: healthy: "{{ etcd_endpoint_health.stderr is match('Error: unhealthy cluster') }}" when: - - groups['broken_etcd'] + - inventory_hostname in groups['broken_etcd'] - name: Set has_quorum fact set_fact: has_quorum: "{{ etcd_endpoint_health.stdout_lines | select('match', '.*is healthy.*') | list | length >= etcd_endpoint_health.stderr_lines | select('match', '.*is unhealthy.*') | list | length }}" when: - - groups['broken_etcd'] + - inventory_hostname in groups['broken_etcd'] - include_tasks: recover_lost_quorum.yml when: @@ -39,7 +39,7 @@ with_items: "{{ groups['broken_etcd'] }}" ignore_errors: true # noqa ignore-errors when: - - groups['broken_etcd'] + - inventory_hostname in groups['broken_etcd'] - has_quorum - name: Delete old certificates @@ -56,7 +56,7 @@ loop: "{{ delete_old_cerificates.results }}" changed_when: false when: - - groups['broken_etcd'] + - inventory_hostname in groups['broken_etcd'] - "item.rc != 0 and not 'No such file or directory' in item.stderr" - name: Get etcd cluster members @@ -65,20 +65,20 @@ changed_when: false check_mode: no environment: - ETCDCTL_API: 3 + ETCDCTL_API: "3" ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" when: - - groups['broken_etcd'] + - inventory_hostname in groups['broken_etcd'] - not healthy - has_quorum - name: Remove broken cluster members command: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}" environment: - ETCDCTL_API: 3 + ETCDCTL_API: "3" ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" @@ -87,7 +87,7 @@ - "{{ groups['broken_etcd'] }}" - "{{ member_list.stdout_lines }}" when: - - groups['broken_etcd'] + - inventory_hostname in groups['broken_etcd'] - not healthy - has_quorum - hostvars[item[0]]['etcd_member_name'] == item[1].replace(' ','').split(',')[2] diff --git a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml index 1ecc90fef..86096fed9 100644 --- a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml +++ b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml @@ -2,11 +2,11 @@ - name: Save etcd snapshot command: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db" environment: - - ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" - - ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - - ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" - - ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses.split(',') | first }}" - - ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses.split(',') | first }}" + ETCDCTL_API: "3" when: etcd_snapshot is not defined - name: Transfer etcd snapshot to host @@ -29,11 +29,11 @@ - name: Restore etcd snapshot # noqa 301 305 shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}" environment: - - ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" - - ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - - ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" - - ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" - - ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + ETCDCTL_API: "3" - name: Remove etcd snapshot file: diff --git a/roles/remove-node/remove-etcd-node/tasks/main.yml b/roles/remove-node/remove-etcd-node/tasks/main.yml index e9ef0cf6b..f7729ea79 100644 --- a/roles/remove-node/remove-etcd-node/tasks/main.yml +++ b/roles/remove-node/remove-etcd-node/tasks/main.yml @@ -26,7 +26,9 @@ - inventory_hostname in groups['etcd'] - name: Lookup etcd member id - shell: "{{ bin_dir }}/etcdctl member list | grep {{ node_ip }} | cut -d, -f1" + shell: "set -o pipefail && {{ bin_dir }}/etcdctl member list | grep {{ node_ip }} | cut -d, -f1" + args: + executable: /bin/bash register: etcd_member_id ignore_errors: true # noqa ignore-errors changed_when: false @@ -34,7 +36,7 @@ tags: - facts environment: - ETCDCTL_API: 3 + ETCDCTL_API: "3" ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '.pem' }}" ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '-key.pem' }}" ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}" @@ -45,7 +47,7 @@ - name: Remove etcd member from cluster command: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}" environment: - ETCDCTL_API: 3 + ETCDCTL_API: "3" ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '.pem' }}" ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '-key.pem' }}" ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}" diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 729be0908..f6394c366 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -178,7 +178,6 @@ shell: set -o pipefail && mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac args: executable: /bin/bash - warn: false check_mode: no register: mounted_dirs failed_when: false @@ -279,6 +278,7 @@ path: "{{ filedir_path }}" state: touch attributes: "-i" + mode: 0644 loop: "{{ var_lib_kubelet_files_dirs_w_attrs.stdout_lines|select('search', 'Immutable')|list }}" loop_control: loop_var: file_dir_line diff --git a/run.rc b/run.rc index f87ad4e0d..570f0dd80 100644 --- a/run.rc +++ b/run.rc @@ -7,6 +7,7 @@ pip install wheel pip install --upgrade setuptools pip install -r requirements.txt pip install -r tests/requirements.txt +ansible-galaxy install -r tests/requirements.yml pre-commit install # prepare an inventory to test with INV=inventory/lab diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml index 3f31217d3..feb309d38 100644 --- a/scripts/collect-info.yaml +++ b/scripts/collect-info.yaml @@ -97,7 +97,7 @@ - /var/log/dmesg environment: - ETCDCTL_API: 3 + ETCDCTL_API: "3" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" @@ -120,18 +120,22 @@ no_log: True - name: Fetch results - fetch: src={{ item.name }} dest=/tmp/{{ archive_dirname }}/commands + fetch: + src: "{{ item.name }}" + dest: "/tmp/{{ archive_dirname }}/commands" with_items: "{{ commands }}" when: item.when | default(True) failed_when: false - name: Fetch logs - fetch: src={{ item }} dest=/tmp/{{ archive_dirname }}/logs + fetch: + src: "{{ item }}" + dest: "/tmp/{{ archive_dirname }}/logs" with_items: "{{ logs }}" failed_when: false - name: Pack results and logs - archive: + community.general.archive: path: "/tmp/{{ archive_dirname }}" dest: "{{ dir|default('.') }}/logs.tar.gz" remove: true @@ -142,5 +146,7 @@ run_once: true - name: Clean up collected command outputs - file: path={{ item.name }} state=absent + file: + path: "{{ item.name }}" + state: absent with_items: "{{ commands }}" diff --git a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml index a0b36bebb..832f9dd7f 100644 --- a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml +++ b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml @@ -11,6 +11,7 @@ url: "{{ item.value.url }}" dest: "{{ images_dir }}/{{ item.value.filename }}" checksum: "{{ item.value.checksum }}" + mode: 0644 loop: "{{ images|dict2items }}" - name: Unxz compressed images diff --git a/tests/cloud_playbooks/create-aws.yml b/tests/cloud_playbooks/create-aws.yml index 8a03c9259..453c1139d 100644 --- a/tests/cloud_playbooks/create-aws.yml +++ b/tests/cloud_playbooks/create-aws.yml @@ -5,7 +5,7 @@ tasks: - name: Provision a set of instances - ec2: + amazon.aws.ec2_instance: key_name: "{{ aws.key_name }}" aws_access_key: "{{ aws.access_key }}" aws_secret_key: "{{ aws.secret_key }}" diff --git a/tests/cloud_playbooks/create-do.yml b/tests/cloud_playbooks/create-do.yml index 3726eb158..f95cbe516 100644 --- a/tests/cloud_playbooks/create-do.yml +++ b/tests/cloud_playbooks/create-do.yml @@ -52,7 +52,8 @@ test_name: "{{ test_id |regex_replace('\\.', '-') }}" - name: show vars - debug: msg="{{ cloud_region }}, {{ cloud_image }}" + debug: + msg: "{{ cloud_region }}, {{ cloud_image }}" - name: set instance names set_fact: @@ -64,7 +65,7 @@ {%- endif -%} - name: Manage DO instances | {{ state }} - digital_ocean: + community.digitalocean.digital_ocean: unique_name: yes api_token: "{{ lookup('env','DO_API_TOKEN') }}" command: "droplet" diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml index f94b05bcb..dae55a2c5 100644 --- a/tests/cloud_playbooks/create-gce.yml +++ b/tests/cloud_playbooks/create-gce.yml @@ -46,7 +46,9 @@ register: gce - name: Add instances to host group - add_host: hostname={{ item.public_ip }} groupname="waitfor_hosts" + add_host: + hostname: "{{ item.public_ip }}" + groupname: "waitfor_hosts" with_items: '{{ gce.instance_data }}' - name: Template the inventory # noqa 404 CI inventory templates are not in role_path diff --git a/tests/cloud_playbooks/delete-aws.yml b/tests/cloud_playbooks/delete-aws.yml index 02f9b06c7..e207a9844 100644 --- a/tests/cloud_playbooks/delete-aws.yml +++ b/tests/cloud_playbooks/delete-aws.yml @@ -4,10 +4,10 @@ tasks: - name: Gather EC2 facts - action: ec2_facts + amazon.aws.ec2_metadata_facts: - name: Terminate EC2 instances - ec2: + amazon.aws.ec2_instance: aws_access_key: "{{ aws_access_key }}" aws_secret_key: "{{ aws_secret_key }}" state: absent diff --git a/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml b/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml index 353f9910a..98bd05a61 100644 --- a/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml +++ b/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml @@ -25,6 +25,6 @@ changed_when: - delete_namespace.rc == 0 retries: 12 - delay: "10" + delay: 10 until: - delete_namespace.rc != 0 diff --git a/tests/cloud_playbooks/upload-logs-gcs.yml b/tests/cloud_playbooks/upload-logs-gcs.yml index eeb0edb79..2f5c9d897 100644 --- a/tests/cloud_playbooks/upload-logs-gcs.yml +++ b/tests/cloud_playbooks/upload-logs-gcs.yml @@ -21,7 +21,7 @@ file_name: "{{ ostype }}-{{ kube_network_plugin }}-{{ commit }}-logs.tar.gz" - name: Create a bucket - gc_storage: + community.google.gc_storage: bucket: "{{ test_name }}" mode: create permission: public-read @@ -46,11 +46,12 @@ get_url: url: https://dl.google.com/dl/cloudsdk/channels/rapid/install_google_cloud_sdk.bash dest: "{{ dir }}/gcp-installer.sh" + mode: 0644 - name: Get gsutil tool - script: "{{ dir }}/gcp-installer.sh" + command: "{{ dir }}/gcp-installer.sh" environment: - CLOUDSDK_CORE_DISABLE_PROMPTS: 1 + CLOUDSDK_CORE_DISABLE_PROMPTS: "1" CLOUDSDK_INSTALL_DIR: "{{ dir }}" no_log: True failed_when: false @@ -63,7 +64,7 @@ no_log: True - name: Upload collected diagnostic info - gc_storage: + community.google.gc_storage: bucket: "{{ test_name }}" mode: put permission: public-read diff --git a/tests/requirements.txt b/tests/requirements.txt index e3c4482d7..19474ab09 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -1,11 +1,11 @@ -r ../requirements.txt -ansible-lint==5.4.0 +ansible-lint==6.16.2 apache-libcloud==3.7.0 ara[server]==1.6.1 dopy==0.3.7 molecule==5.0.1 molecule-plugins[vagrant]==23.4.1 python-vagrant==1.0.0 -pytest-testinfra==7.0.0 -tox==4.5.1 -yamllint==1.31.0 +pytest-testinfra==8.1.0 +tox==4.5.2 +yamllint==1.32.0 diff --git a/tests/requirements.yml b/tests/requirements.yml new file mode 100644 index 000000000..2bedd2359 --- /dev/null +++ b/tests/requirements.yml @@ -0,0 +1,4 @@ +--- +collections: + - name: amazon.aws + version: 6.0.1 diff --git a/tests/scripts/testcases_prepare.sh b/tests/scripts/testcases_prepare.sh index 38191cebd..84d0a99ba 100755 --- a/tests/scripts/testcases_prepare.sh +++ b/tests/scripts/testcases_prepare.sh @@ -3,6 +3,7 @@ set -euxo pipefail /usr/bin/python -m pip uninstall -y ansible ansible-base ansible-core /usr/bin/python -m pip install -r tests/requirements.txt +ansible-galaxy install -r tests/requirements.yml mkdir -p /.ssh mkdir -p cluster-dump mkdir -p $HOME/.ssh diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml index e2287f9e4..c736ac730 100644 --- a/tests/testcases/030_check-network.yml +++ b/tests/testcases/030_check-network.yml @@ -32,7 +32,7 @@ fail_msg: kubelet_rotate_server_certificates is {{ kubelet_rotate_server_certificates }} but no csr's found - name: Get Denied/Pending certificate signing requests - shell: "{{ bin_dir }}/kubectl get csr | grep -e Denied -e Pending || true" + shell: "set -o pipefail && {{ bin_dir }}/kubectl get csr | grep -e Denied -e Pending || true" register: get_csr_denied_pending changed_when: false @@ -87,6 +87,7 @@ - name: Run 2 agnhost pods in test ns shell: cmd: | + set -o pipefail cat <