Upgrade ansible (#10190)

* project: update all dependencies including ansible

Upgrade to ansible 7.x and ansible-core 2.14.x. There seems to be issue
with ansible 8/ansible-core 2.15 so we remain on those versions for now.
It's quite a big bump already anyway.

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* tests: install aws galaxy collection

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* ansible-lint: disable various rules after ansible upgrade

Temporarily disable a bunch of linting action following ansible upgrade.
Those should be taken care of separately.

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: resolve deprecated-module ansible-lint error

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: resolve no-free-form ansible-lint error

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: resolve schema[meta] ansible-lint error

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: resolve schema[playbook] ansible-lint error

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: resolve schema[tasks] ansible-lint error

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: resolve risky-file-permissions ansible-lint error

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: resolve risky-shell-pipe ansible-lint error

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: remove deprecated warn args

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: use fqcn for non builtin tasks

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: resolve syntax-check[missing-file] for contrib playbook

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: use arithmetic inside jinja to fix ansible 6 upgrade

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

---------

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>
pull/10256/head
Arthur Outhenin-Chalandre 2023-06-26 12:15:45 +02:00 committed by GitHub
parent 3311e0a296
commit 25cb90bc2d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
81 changed files with 345 additions and 207 deletions

View File

@ -35,6 +35,41 @@ skip_list:
# Roles in kubespray don't need fully qualified collection names # Roles in kubespray don't need fully qualified collection names
# (Disabled in Feb 2023) # (Disabled in Feb 2023)
- 'fqcn-builtins' - 'fqcn-builtins'
# names should start with an uppercase letter
# (Disabled in June 2023 after ansible upgrade; FIXME)
- 'name[casing]'
# Everything should be named
# (Disabled in June 2023 after ansible upgrade; FIXME)
- 'name[play]'
- 'name[missing]'
# templates should only be at the end of 'name'
# (Disabled in June 2023 after ansible upgrade; FIXME)
- 'name[jinja]'
- 'name[template]'
# order of keys errors
# (Disabled in June 2023 after ansible upgrade; FIXME)
- 'key-order'
# No changed-when on commands
# (Disabled in June 2023 after ansible upgrade; FIXME)
- 'no-changed-when'
# Disable galaxy rules
# (Disabled in June 2023 after ansible upgrade; FIXME)
- 'galaxy'
# Disable run-once check with free strategy
# (Disabled in June 2023 after ansible upgrade; FIXME)
- 'run-once[task]'
# Disable outdated-tag check
# (Disabled in June 2023 after ansible upgrade; FIXME)
- 'warning[outdated-tag]'
exclude_paths: exclude_paths:
# Generated files # Generated files
- tests/files/custom_cni/cilium.yaml - tests/files/custom_cni/cilium.yaml
- venv

View File

@ -42,6 +42,7 @@ before_script:
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1 - update-alternatives --install /usr/bin/python python /usr/bin/python3 1
- python -m pip uninstall -y ansible ansible-base ansible-core - python -m pip uninstall -y ansible ansible-base ansible-core
- python -m pip install -r tests/requirements.txt - python -m pip install -r tests/requirements.txt
- ansible-galaxy install -r tests/requirements.yml
- mkdir -p /.ssh - mkdir -p /.ssh
.job: &job .job: &job

View File

@ -71,6 +71,7 @@ tox-inventory-builder:
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10 - update-alternatives --install /usr/bin/python python /usr/bin/python3 10
- python -m pip uninstall -y ansible ansible-base ansible-core - python -m pip uninstall -y ansible ansible-base ansible-core
- python -m pip install -r tests/requirements.txt - python -m pip install -r tests/requirements.txt
- ansible-galaxy install -r tests/requirements.yml
script: script:
- pip3 install tox - pip3 install tox
- cd contrib/inventory_builder && tox - cd contrib/inventory_builder && tox

View File

@ -13,6 +13,7 @@
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10 - update-alternatives --install /usr/bin/python python /usr/bin/python3 10
- python -m pip uninstall -y ansible ansible-base ansible-core - python -m pip uninstall -y ansible ansible-base ansible-core
- python -m pip install -r tests/requirements.txt - python -m pip install -r tests/requirements.txt
- ansible-galaxy install -r tests/requirements.yml
- ./tests/scripts/vagrant_clean.sh - ./tests/scripts/vagrant_clean.sh
script: script:
- ./tests/scripts/molecule_run.sh - ./tests/scripts/molecule_run.sh

View File

@ -17,6 +17,7 @@
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10 - update-alternatives --install /usr/bin/python python /usr/bin/python3 10
- python -m pip uninstall -y ansible ansible-base ansible-core - python -m pip uninstall -y ansible ansible-base ansible-core
- python -m pip install -r tests/requirements.txt - python -m pip install -r tests/requirements.txt
- ansible-galaxy install -r tests/requirements.yml
- ./tests/scripts/vagrant_clean.sh - ./tests/scripts/vagrant_clean.sh
script: script:
- ./tests/scripts/testcases_run.sh - ./tests/scripts/testcases_run.sh

View File

@ -12,6 +12,7 @@ To install development dependencies you can set up a python virtual env with the
virtualenv venv virtualenv venv
source venv/bin/activate source venv/bin/activate
pip install -r tests/requirements.txt pip install -r tests/requirements.txt
ansible-galaxy install -r tests/requirements.yml
``` ```
#### Linting #### Linting

View File

@ -67,7 +67,7 @@
mode: 0640 mode: 0640
- name: Add my pubkey to "{{ distro_user }}" user authorized keys - name: Add my pubkey to "{{ distro_user }}" user authorized keys
authorized_key: ansible.posix.authorized_key:
user: "{{ distro_user }}" user: "{{ distro_user }}"
state: present state: present
key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}" key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"

View File

@ -13,7 +13,7 @@
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}" distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
- name: Create dind node containers from "containers" inventory section - name: Create dind node containers from "containers" inventory section
docker_container: community.docker.docker_container:
image: "{{ distro_image }}" image: "{{ distro_image }}"
name: "{{ item }}" name: "{{ item }}"
state: started state: started

View File

@ -3,6 +3,6 @@
gather_facts: False gather_facts: False
become: yes become: yes
vars: vars:
- bootstrap_os: none bootstrap_os: none
roles: roles:
- kvm-setup - { role: kvm-setup }

View File

@ -23,8 +23,8 @@
when: ansible_os_family == "Debian" when: ansible_os_family == "Debian"
# Create deployment user if required # Create deployment user if required
- include: user.yml - include_tasks: user.yml
when: k8s_deployment_user is defined when: k8s_deployment_user is defined
# Set proper sysctl values # Set proper sysctl values
- include: sysctl.yml - import_tasks: sysctl.yml

View File

@ -1,6 +1,6 @@
--- ---
- name: Load br_netfilter module - name: Load br_netfilter module
modprobe: community.general.modprobe:
name: br_netfilter name: br_netfilter
state: present state: present
register: br_netfilter register: br_netfilter
@ -25,7 +25,7 @@
- name: Enable net.ipv4.ip_forward in sysctl - name: Enable net.ipv4.ip_forward in sysctl
sysctl: ansible.posix.sysctl:
name: net.ipv4.ip_forward name: net.ipv4.ip_forward
value: 1 value: 1
sysctl_file: "{{ sysctl_file_path }}" sysctl_file: "{{ sysctl_file_path }}"
@ -33,7 +33,7 @@
reload: yes reload: yes
- name: Set bridge-nf-call-{arptables,iptables} to 0 - name: Set bridge-nf-call-{arptables,iptables} to 0
sysctl: ansible.posix.sysctl:
name: "{{ item }}" name: "{{ item }}"
state: present state: present
value: 0 value: 0

View File

@ -1,6 +1,6 @@
--- ---
- name: Check ansible version - name: Check ansible version
import_playbook: ansible_version.yml import_playbook: kubernetes_sigs.kubespray.ansible_version
- hosts: localhost - hosts: localhost
strategy: linear strategy: linear
@ -24,6 +24,7 @@
url: "{{ mitogen_url }}" url: "{{ mitogen_url }}"
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz" dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
validate_certs: true validate_certs: true
mode: 0644
- name: extract archive - name: extract archive
unarchive: unarchive:
@ -31,12 +32,12 @@
dest: "{{ playbook_dir }}/dist/" dest: "{{ playbook_dir }}/dist/"
- name: copy plugin - name: copy plugin
synchronize: ansible.posix.synchronize:
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/" src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
dest: "{{ playbook_dir }}/plugins/mitogen" dest: "{{ playbook_dir }}/plugins/mitogen"
- name: add strategy to ansible.cfg - name: add strategy to ansible.cfg
ini_file: community.general.ini_file:
path: ansible.cfg path: ansible.cfg
mode: 0644 mode: 0644
section: "{{ item.section | d('defaults') }}" section: "{{ item.section | d('defaults') }}"

View File

@ -6,12 +6,12 @@ galaxy_info:
description: GlusterFS installation for Linux. description: GlusterFS installation for Linux.
company: "Midwestern Mac, LLC" company: "Midwestern Mac, LLC"
license: "license (BSD, MIT)" license: "license (BSD, MIT)"
min_ansible_version: 2.0 min_ansible_version: "2.0"
platforms: platforms:
- name: EL - name: EL
versions: versions:
- 6 - "6"
- 7 - "7"
- name: Ubuntu - name: Ubuntu
versions: versions:
- precise - precise

View File

@ -3,14 +3,17 @@
# hyperkube and needs to be installed as part of the system. # hyperkube and needs to be installed as part of the system.
# Setup/install tasks. # Setup/install tasks.
- include: setup-RedHat.yml - include_tasks: setup-RedHat.yml
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
- include: setup-Debian.yml - include_tasks: setup-Debian.yml
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
- name: Ensure Gluster mount directories exist. - name: Ensure Gluster mount directories exist.
file: "path={{ item }} state=directory mode=0775" file:
path: "{{ item }}"
state: directory
mode: 0775
with_items: with_items:
- "{{ gluster_mount_dir }}" - "{{ gluster_mount_dir }}"
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined

View File

@ -1,10 +1,14 @@
--- ---
- name: Install Prerequisites - name: Install Prerequisites
package: name={{ item }} state=present package:
name: "{{ item }}"
state: present
with_items: with_items:
- "centos-release-gluster{{ glusterfs_default_release }}" - "centos-release-gluster{{ glusterfs_default_release }}"
- name: Install Packages - name: Install Packages
package: name={{ item }} state=present package:
name: "{{ item }}"
state: present
with_items: with_items:
- glusterfs-client - glusterfs-client

View File

@ -6,12 +6,12 @@ galaxy_info:
description: GlusterFS installation for Linux. description: GlusterFS installation for Linux.
company: "Midwestern Mac, LLC" company: "Midwestern Mac, LLC"
license: "license (BSD, MIT)" license: "license (BSD, MIT)"
min_ansible_version: 2.0 min_ansible_version: "2.0"
platforms: platforms:
- name: EL - name: EL
versions: versions:
- 6 - "6"
- 7 - "7"
- name: Ubuntu - name: Ubuntu
versions: versions:
- precise - precise

View File

@ -5,39 +5,55 @@
# Install xfs package # Install xfs package
- name: install xfs Debian - name: install xfs Debian
apt: name=xfsprogs state=present apt:
name: xfsprogs
state: present
when: ansible_os_family == "Debian" when: ansible_os_family == "Debian"
- name: install xfs RedHat - name: install xfs RedHat
package: name=xfsprogs state=present package:
name: xfsprogs
state: present
when: ansible_os_family == "RedHat" when: ansible_os_family == "RedHat"
# Format external volumes in xfs # Format external volumes in xfs
- name: Format volumes in xfs - name: Format volumes in xfs
filesystem: "fstype=xfs dev={{ disk_volume_device_1 }}" community.general.filesystem:
fstype: xfs
dev: "{{ disk_volume_device_1 }}"
# Mount external volumes # Mount external volumes
- name: mounting new xfs filesystem - name: mounting new xfs filesystem
mount: "name={{ gluster_volume_node_mount_dir }} src={{ disk_volume_device_1 }} fstype=xfs state=mounted" ansible.posix.mount:
name: "{{ gluster_volume_node_mount_dir }}"
src: "{{ disk_volume_device_1 }}"
fstype: xfs
state: mounted"
# Setup/install tasks. # Setup/install tasks.
- include: setup-RedHat.yml - include_tasks: setup-RedHat.yml
when: ansible_os_family == 'RedHat' when: ansible_os_family == 'RedHat'
- include: setup-Debian.yml - include_tasks: setup-Debian.yml
when: ansible_os_family == 'Debian' when: ansible_os_family == 'Debian'
- name: Ensure GlusterFS is started and enabled at boot. - name: Ensure GlusterFS is started and enabled at boot.
service: "name={{ glusterfs_daemon }} state=started enabled=yes" service:
name: "{{ glusterfs_daemon }}"
state: started
enabled: yes
- name: Ensure Gluster brick and mount directories exist. - name: Ensure Gluster brick and mount directories exist.
file: "path={{ item }} state=directory mode=0775" file:
path: "{{ item }}"
state: directory
mode: 0775
with_items: with_items:
- "{{ gluster_brick_dir }}" - "{{ gluster_brick_dir }}"
- "{{ gluster_mount_dir }}" - "{{ gluster_mount_dir }}"
- name: Configure Gluster volume with replicas - name: Configure Gluster volume with replicas
gluster_volume: gluster.gluster.gluster_volume:
state: present state: present
name: "{{ gluster_brick_name }}" name: "{{ gluster_brick_name }}"
brick: "{{ gluster_brick_dir }}" brick: "{{ gluster_brick_dir }}"
@ -49,7 +65,7 @@
when: groups['gfs-cluster']|length > 1 when: groups['gfs-cluster']|length > 1
- name: Configure Gluster volume without replicas - name: Configure Gluster volume without replicas
gluster_volume: gluster.gluster.gluster_volume:
state: present state: present
name: "{{ gluster_brick_name }}" name: "{{ gluster_brick_name }}"
brick: "{{ gluster_brick_dir }}" brick: "{{ gluster_brick_dir }}"
@ -60,7 +76,7 @@
when: groups['gfs-cluster']|length <= 1 when: groups['gfs-cluster']|length <= 1
- name: Mount glusterfs to retrieve disk size - name: Mount glusterfs to retrieve disk size
mount: ansible.posix.mount:
name: "{{ gluster_mount_dir }}" name: "{{ gluster_mount_dir }}"
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster" src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
fstype: glusterfs fstype: glusterfs
@ -69,7 +85,8 @@
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0] when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
- name: Get Gluster disk size - name: Get Gluster disk size
setup: filter=ansible_mounts setup:
filter: ansible_mounts
register: mounts_data register: mounts_data
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0] when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
@ -86,7 +103,7 @@
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0] when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
- name: Unmount glusterfs - name: Unmount glusterfs
mount: ansible.posix.mount:
name: "{{ gluster_mount_dir }}" name: "{{ gluster_mount_dir }}"
fstype: glusterfs fstype: glusterfs
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster" src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"

View File

@ -1,11 +1,15 @@
--- ---
- name: Install Prerequisites - name: Install Prerequisites
package: name={{ item }} state=present package:
name: "{{ item }}"
state: present
with_items: with_items:
- "centos-release-gluster{{ glusterfs_default_release }}" - "centos-release-gluster{{ glusterfs_default_release }}"
- name: Install Packages - name: Install Packages
package: name={{ item }} state=present package:
name: "{{ item }}"
state: present
with_items: with_items:
- glusterfs-server - glusterfs-server
- glusterfs-client - glusterfs-client

View File

@ -5,7 +5,7 @@
- "dm_snapshot" - "dm_snapshot"
- "dm_mirror" - "dm_mirror"
- "dm_thin_pool" - "dm_thin_pool"
modprobe: community.general.modprobe:
name: "{{ item }}" name: "{{ item }}"
state: "present" state: "present"

View File

@ -14,6 +14,7 @@
- template: - template:
src: ./contrib/offline/temp/{{ item }}.list.template src: ./contrib/offline/temp/{{ item }}.list.template
dest: ./contrib/offline/temp/{{ item }}.list dest: ./contrib/offline/temp/{{ item }}.list
mode: 0644
with_items: with_items:
- files - files
- images - images

View File

@ -3,8 +3,8 @@
gather_facts: false gather_facts: false
become: no become: no
vars: vars:
minimal_ansible_version: 2.12.0 minimal_ansible_version: 2.14.0
maximal_ansible_version: 2.13.0 maximal_ansible_version: 2.16.0
ansible_connection: local ansible_connection: local
tags: always tags: always
tasks: tasks:

View File

@ -17,14 +17,15 @@
- hosts: etcd:k8s_cluster:calico_rr - hosts: etcd:k8s_cluster:calico_rr
gather_facts: False gather_facts: False
vars_prompt:
name: "reset_confirmation"
prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
default: "no"
private: no
pre_tasks: pre_tasks:
- name: check confirmation - name: Reset Confirmation
pause:
prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
register: reset_confirmation
run_once: True
when:
- not (skip_confirmation | default(false) | bool)
- name: Check confirmation
fail: fail:
msg: "Reset confirmation failed" msg: "Reset confirmation failed"
when: reset_confirmation != "yes" when: reset_confirmation != "yes"

View File

@ -1,10 +1,10 @@
ansible==5.7.1 ansible==7.6.0
ansible-core==2.12.10 ansible-core==2.14.6
cryptography==3.4.8 cryptography==41.0.1
jinja2==3.1.2 jinja2==3.1.2
jmespath==1.0.1 jmespath==1.0.1
MarkupSafe==2.1.2 MarkupSafe==2.1.3
netaddr==0.8.0 netaddr==0.8.0
pbr==5.11.1 pbr==5.11.1
ruamel.yaml==0.17.21 ruamel.yaml==0.17.31
ruamel.yaml.clib==0.2.7 ruamel.yaml.clib==0.2.7

View File

@ -5,7 +5,7 @@
filter: ansible_distribution_*version filter: ansible_distribution_*version
- name: Add proxy to yum.conf or dnf.conf if http_proxy is defined - name: Add proxy to yum.conf or dnf.conf if http_proxy is defined
ini_file: community.general.ini_file:
path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}" path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}"
section: main section: main
option: proxy option: proxy
@ -21,6 +21,7 @@
get_url: get_url:
url: https://yum.oracle.com/public-yum-ol7.repo url: https://yum.oracle.com/public-yum-ol7.repo
dest: /etc/yum.repos.d/public-yum-ol7.repo dest: /etc/yum.repos.d/public-yum-ol7.repo
mode: 0644
when: when:
- use_oracle_public_repo|default(true) - use_oracle_public_repo|default(true)
- '''ID="ol"'' in os_release.stdout_lines' - '''ID="ol"'' in os_release.stdout_lines'
@ -28,7 +29,7 @@
environment: "{{ proxy_env }}" environment: "{{ proxy_env }}"
- name: Enable Oracle Linux repo - name: Enable Oracle Linux repo
ini_file: community.general.ini_file:
dest: /etc/yum.repos.d/public-yum-ol7.repo dest: /etc/yum.repos.d/public-yum-ol7.repo
section: "{{ item }}" section: "{{ item }}"
option: enabled option: enabled
@ -53,7 +54,7 @@
- (ansible_distribution_version | float) >= 7.6 - (ansible_distribution_version | float) >= 7.6
- name: Enable Oracle Linux repo - name: Enable Oracle Linux repo
ini_file: community.general.ini_file:
dest: "/etc/yum.repos.d/oracle-linux-ol{{ ansible_distribution_major_version }}.repo" dest: "/etc/yum.repos.d/oracle-linux-ol{{ ansible_distribution_major_version }}.repo"
section: "ol{{ ansible_distribution_major_version }}_addons" section: "ol{{ ansible_distribution_major_version }}_addons"
option: "{{ item.option }}" option: "{{ item.option }}"
@ -69,7 +70,7 @@
- (ansible_distribution_version | float) >= 7.6 - (ansible_distribution_version | float) >= 7.6
- name: Enable Centos extra repo for Oracle Linux - name: Enable Centos extra repo for Oracle Linux
ini_file: community.general.ini_file:
dest: "/etc/yum.repos.d/centos-extras.repo" dest: "/etc/yum.repos.d/centos-extras.repo"
section: "extras" section: "extras"
option: "{{ item.option }}" option: "{{ item.option }}"

View File

@ -10,7 +10,7 @@
- facts - facts
- name: Add proxy to dnf.conf if http_proxy is defined - name: Add proxy to dnf.conf if http_proxy is defined
ini_file: community.general.ini_file:
path: "/etc/dnf/dnf.conf" path: "/etc/dnf/dnf.conf"
section: main section: main
option: proxy option: proxy

View File

@ -58,7 +58,7 @@
# Without this package, the get_url module fails when trying to handle https # Without this package, the get_url module fails when trying to handle https
- name: Install python-cryptography - name: Install python-cryptography
zypper: community.general.zypper:
name: python-cryptography name: python-cryptography
state: present state: present
update_cache: true update_cache: true
@ -67,7 +67,7 @@
- ansible_distribution_version is version('15.4', '<') - ansible_distribution_version is version('15.4', '<')
- name: Install python3-cryptography - name: Install python3-cryptography
zypper: community.general.zypper:
name: python3-cryptography name: python3-cryptography
state: present state: present
update_cache: true update_cache: true
@ -77,7 +77,7 @@
# Nerdctl needs some basic packages to get an environment up # Nerdctl needs some basic packages to get an environment up
- name: Install basic dependencies - name: Install basic dependencies
zypper: community.general.zypper:
name: name:
- iptables - iptables
- apparmor-parser - apparmor-parser

View File

@ -5,7 +5,7 @@
filter: ansible_distribution_*version filter: ansible_distribution_*version
- name: Add proxy to yum.conf or dnf.conf if http_proxy is defined - name: Add proxy to yum.conf or dnf.conf if http_proxy is defined
ini_file: community.general.ini_file:
path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}" path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}"
section: main section: main
option: proxy option: proxy
@ -31,7 +31,7 @@
become: true become: true
- name: RHEL subscription Organization ID/Activation Key registration - name: RHEL subscription Organization ID/Activation Key registration
redhat_subscription: community.general.redhat_subscription:
state: present state: present
org_id: "{{ rh_subscription_org_id }}" org_id: "{{ rh_subscription_org_id }}"
activationkey: "{{ rh_subscription_activation_key }}" activationkey: "{{ rh_subscription_activation_key }}"
@ -50,7 +50,7 @@
# this task has no_log set to prevent logging security sensitive information such as subscription passwords # this task has no_log set to prevent logging security sensitive information such as subscription passwords
- name: RHEL subscription Username/Password registration - name: RHEL subscription Username/Password registration
redhat_subscription: community.general.redhat_subscription:
state: present state: present
username: "{{ rh_subscription_username }}" username: "{{ rh_subscription_username }}"
password: "{{ rh_subscription_password }}" password: "{{ rh_subscription_password }}"
@ -70,7 +70,7 @@
# container-selinux is in extras repo # container-selinux is in extras repo
- name: Enable RHEL 7 repos - name: Enable RHEL 7 repos
rhsm_repository: community.general.rhsm_repository:
name: name:
- "rhel-7-server-rpms" - "rhel-7-server-rpms"
- "rhel-7-server-extras-rpms" - "rhel-7-server-extras-rpms"
@ -81,7 +81,7 @@
# container-selinux is in appstream repo # container-selinux is in appstream repo
- name: Enable RHEL 8 repos - name: Enable RHEL 8 repos
rhsm_repository: community.general.rhsm_repository:
name: name:
- "rhel-8-for-*-baseos-rpms" - "rhel-8-for-*-baseos-rpms"
- "rhel-8-for-*-appstream-rpms" - "rhel-8-for-*-appstream-rpms"

View File

@ -83,7 +83,7 @@
when: ansible_distribution in ["Amazon"] when: ansible_distribution in ["Amazon"]
- name: Disable modular repos for CRI-O - name: Disable modular repos for CRI-O
ini_file: community.general.ini_file:
path: "/etc/yum.repos.d/{{ item.repo }}.repo" path: "/etc/yum.repos.d/{{ item.repo }}.repo"
section: "{{ item.section }}" section: "{{ item.section }}"
option: enabled option: enabled

View File

@ -122,7 +122,7 @@
mode: 0755 mode: 0755
- name: cri-o | set overlay driver - name: cri-o | set overlay driver
ini_file: community.general.ini_file:
dest: /etc/containers/storage.conf dest: /etc/containers/storage.conf
section: storage section: storage
option: "{{ item.option }}" option: "{{ item.option }}"
@ -136,7 +136,7 @@
# metacopy=on is available since 4.19 and was backported to RHEL 4.18 kernel # metacopy=on is available since 4.19 and was backported to RHEL 4.18 kernel
- name: cri-o | set metacopy mount options correctly - name: cri-o | set metacopy mount options correctly
ini_file: community.general.ini_file:
dest: /etc/containers/storage.conf dest: /etc/containers/storage.conf
section: storage.options.overlay section: storage.options.overlay
option: mountopt option: mountopt

View File

@ -43,8 +43,6 @@
- name: CRI-O | Run yum-clean-metadata - name: CRI-O | Run yum-clean-metadata
command: yum clean metadata command: yum clean metadata
args:
warn: no
when: when:
- ansible_os_family == "RedHat" - ansible_os_family == "RedHat"
tags: tags:

View File

@ -14,7 +14,7 @@
- amzn2_extras_file_stat.stat.exists - amzn2_extras_file_stat.stat.exists
- name: Remove docker repository - name: Remove docker repository
ini_file: community.general.ini_file:
dest: /etc/yum.repos.d/amzn2-extras.repo dest: /etc/yum.repos.d/amzn2-extras.repo
section: amzn2extra-docker section: amzn2extra-docker
option: enabled option: enabled

View File

@ -36,7 +36,7 @@
- qemu - qemu
- name: kata-containers | Load vhost kernel modules - name: kata-containers | Load vhost kernel modules
modprobe: community.general.modprobe:
state: present state: present
name: "{{ item }}" name: "{{ item }}"
with_items: with_items:

View File

@ -80,7 +80,7 @@
- download_run_once - download_run_once
- name: download_container | Copy image to ansible host cache - name: download_container | Copy image to ansible host cache
synchronize: ansible.posix.synchronize:
src: "{{ image_path_final }}" src: "{{ image_path_final }}"
dest: "{{ image_path_cached }}" dest: "{{ image_path_cached }}"
use_ssh_args: true use_ssh_args: true
@ -92,7 +92,7 @@
- download_delegate == inventory_hostname - download_delegate == inventory_hostname
- name: download_container | Upload image to node if it is cached - name: download_container | Upload image to node if it is cached
synchronize: ansible.posix.synchronize:
src: "{{ image_path_cached }}" src: "{{ image_path_cached }}"
dest: "{{ image_path_final }}" dest: "{{ image_path_final }}"
use_ssh_args: true use_ssh_args: true

View File

@ -105,7 +105,7 @@
no_log: "{{ not (unsafe_show_logs|bool) }}" no_log: "{{ not (unsafe_show_logs|bool) }}"
- name: download_file | Copy file back to ansible host file cache - name: download_file | Copy file back to ansible host file cache
synchronize: ansible.posix.synchronize:
src: "{{ file_path_cached }}" src: "{{ file_path_cached }}"
dest: "{{ file_path_cached }}" dest: "{{ file_path_cached }}"
use_ssh_args: true use_ssh_args: true
@ -116,7 +116,7 @@
- download_delegate == inventory_hostname - download_delegate == inventory_hostname
- name: download_file | Copy file from cache to nodes, if it is available - name: download_file | Copy file from cache to nodes, if it is available
synchronize: ansible.posix.synchronize:
src: "{{ file_path_cached }}" src: "{{ file_path_cached }}"
dest: "{{ download.dest }}" dest: "{{ download.dest }}"
use_ssh_args: true use_ssh_args: true

View File

@ -11,7 +11,8 @@
when: etcd_cluster_is_healthy.rc == 0 when: etcd_cluster_is_healthy.rc == 0
- name: Refresh Time Fact - name: Refresh Time Fact
setup: filter=ansible_date_time setup:
filter: ansible_date_time
- name: Set Backup Directory - name: Set Backup Directory
set_fact: set_fact:
@ -40,7 +41,7 @@
--data-dir {{ etcd_data_dir }} --data-dir {{ etcd_data_dir }}
--backup-dir {{ etcd_backup_directory }} --backup-dir {{ etcd_backup_directory }}
environment: environment:
ETCDCTL_API: 2 ETCDCTL_API: "2"
retries: 3 retries: 3
register: backup_v2_command register: backup_v2_command
until: backup_v2_command.rc == 0 until: backup_v2_command.rc == 0
@ -51,7 +52,7 @@
{{ bin_dir }}/etcdctl {{ bin_dir }}/etcdctl
snapshot save {{ etcd_backup_directory }}/snapshot.db snapshot save {{ etcd_backup_directory }}/snapshot.db
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: "3"
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses.split(',') | first }}" ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses.split(',') | first }}"
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"

View File

@ -7,5 +7,6 @@
- name: Remove old etcd backups - name: Remove old etcd backups
shell: shell:
chdir: "{{ etcd_backup_prefix }}" chdir: "{{ etcd_backup_prefix }}"
cmd: "find . -name 'etcd-*' -type d | sort -n | head -n -{{ etcd_backup_retention_count }} | xargs rm -rf" cmd: "set -o pipefail && find . -name 'etcd-*' -type d | sort -n | head -n -{{ etcd_backup_retention_count }} | xargs rm -rf"
executable: /bin/bash
when: etcd_backup_retention_count >= 0 when: etcd_backup_retention_count >= 0

View File

@ -8,11 +8,13 @@
changed_when: false changed_when: false
check_mode: no check_mode: no
run_once: yes run_once: yes
when: is_etcd_master and etcd_cluster_setup when:
- is_etcd_master
- etcd_cluster_setup
tags: tags:
- facts - facts
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: "3"
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
@ -27,11 +29,13 @@
changed_when: false changed_when: false
check_mode: no check_mode: no
run_once: yes run_once: yes
when: is_etcd_master and etcd_events_cluster_setup when:
- is_etcd_master
- etcd_events_cluster_setup
tags: tags:
- facts - facts
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: "3"
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
@ -96,7 +100,7 @@
tags: tags:
- facts - facts
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: "3"
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
@ -119,7 +123,7 @@
tags: tags:
- facts - facts
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: "3"
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
@ -135,7 +139,7 @@
tags: tags:
- facts - facts
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: "3"
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
@ -151,7 +155,7 @@
tags: tags:
- facts - facts
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: "3"
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"

View File

@ -41,16 +41,18 @@
- name: Gen_certs | run cert generation script for etcd and kube control plane nodes - name: Gen_certs | run cert generation script for etcd and kube control plane nodes
command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}" command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
environment: environment:
- MASTERS: "{% for m in groups['etcd'] %} MASTERS: |-
{% if gen_master_certs[m] %} {% for m in groups['etcd'] %}
{{ m }} {% if gen_master_certs[m] %}
{% endif %} {{ m }}
{% endfor %}" {% endif %}
- HOSTS: "{% for h in groups['kube_control_plane'] %} {% endfor %}
{% if gen_node_certs[h] %} HOSTS: |-
{{ h }} {% for h in groups['kube_control_plane'] %}
{% endif %} {% if gen_node_certs[h] %}
{% endfor %}" {{ h }}
{% endif %}
{% endfor %}
run_once: yes run_once: yes
delegate_to: "{{ groups['etcd'][0] }}" delegate_to: "{{ groups['etcd'][0] }}"
when: gen_certs|default(false) when: gen_certs|default(false)
@ -59,11 +61,12 @@
- name: Gen_certs | run cert generation script for all clients - name: Gen_certs | run cert generation script for all clients
command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}" command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
environment: environment:
- HOSTS: "{% for h in groups['k8s_cluster'] %} HOSTS: |-
{% if gen_node_certs[h] %} {% for h in groups['k8s_cluster'] %}
{{ h }} {% if gen_node_certs[h] %}
{% endif %} {{ h }}
{% endfor %}" {% endif %}
{% endfor %}
run_once: yes run_once: yes
delegate_to: "{{ groups['etcd'][0] }}" delegate_to: "{{ groups['etcd'][0] }}"
when: when:

View File

@ -17,7 +17,6 @@
shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs|join(' ') }} | base64 --wrap=0" shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs|join(' ') }} | base64 --wrap=0"
args: args:
executable: /bin/bash executable: /bin/bash
warn: false
no_log: "{{ not (unsafe_show_logs|bool) }}" no_log: "{{ not (unsafe_show_logs|bool) }}"
register: etcd_node_certs register: etcd_node_certs
check_mode: no check_mode: no

View File

@ -1,12 +1,12 @@
--- ---
- name: Join Member | Add member to etcd-events cluster # noqa 301 305 - name: Join Member | Add member to etcd-events cluster # noqa 301 305
shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}" command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}"
register: member_add_result register: member_add_result
until: member_add_result.rc == 0 until: member_add_result.rc == 0
retries: "{{ etcd_retries }}" retries: "{{ etcd_retries }}"
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: "3"
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
@ -34,7 +34,7 @@
tags: tags:
- facts - facts
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: "3"
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"

View File

@ -1,13 +1,13 @@
--- ---
- name: Join Member | Add member to etcd cluster # noqa 301 305 - name: Join Member | Add member to etcd cluster # noqa 301 305
shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}" command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}"
register: member_add_result register: member_add_result
until: member_add_result.rc == 0 or 'Peer URLs already exists' in member_add_result.stderr until: member_add_result.rc == 0 or 'Peer URLs already exists' in member_add_result.stderr
failed_when: member_add_result.rc != 0 and 'Peer URLs already exists' not in member_add_result.stderr failed_when: member_add_result.rc != 0 and 'Peer URLs already exists' not in member_add_result.stderr
retries: "{{ etcd_retries }}" retries: "{{ etcd_retries }}"
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: "3"
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
@ -38,7 +38,7 @@
tags: tags:
- facts - facts
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: "3"
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"

View File

@ -5,7 +5,7 @@
download: "{{ download_defaults | combine(downloads.yq) }}" download: "{{ download_defaults | combine(downloads.yq) }}"
- name: Kubernetes Apps | Copy yq binary from download dir - name: Kubernetes Apps | Copy yq binary from download dir
synchronize: ansible.posix.synchronize:
src: "{{ downloads.yq.dest }}" src: "{{ downloads.yq.dest }}"
dest: "{{ bin_dir }}/yq" dest: "{{ bin_dir }}/yq"
compress: no compress: no
@ -46,7 +46,7 @@
- "inventory_hostname == groups['kube_control_plane'][0]" - "inventory_hostname == groups['kube_control_plane'][0]"
- name: Kubernetes Apps | Copy ArgoCD remote manifests from download dir - name: Kubernetes Apps | Copy ArgoCD remote manifests from download dir
synchronize: ansible.posix.synchronize:
src: "{{ local_release_dir }}/{{ item.file }}" src: "{{ local_release_dir }}/{{ item.file }}"
dest: "{{ kube_config_dir }}/{{ item.file }}" dest: "{{ kube_config_dir }}/{{ item.file }}"
compress: no compress: no

View File

@ -1,6 +1,6 @@
--- ---
- include: credentials-check.yml - import_tasks: credentials-check.yml
- name: "OCI Cloud Controller | Generate Cloud Provider Configuration" - name: "OCI Cloud Controller | Generate Cloud Provider Configuration"
template: template:

View File

@ -66,7 +66,10 @@
- cloud_provider == 'oci' - cloud_provider == 'oci'
- name: PriorityClass | Copy k8s-cluster-critical-pc.yml file - name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml mode=0640 copy:
src: k8s-cluster-critical-pc.yml
dest: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
mode: 0640
when: inventory_hostname == groups['kube_control_plane']|last when: inventory_hostname == groups['kube_control_plane']|last
- name: PriorityClass | Create k8s-cluster-critical - name: PriorityClass | Create k8s-cluster-critical

View File

@ -44,7 +44,9 @@
state: restarted state: restarted
- name: Master | Remove apiserver container docker - name: Master | Remove apiserver container docker
shell: docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f shell: "set -o pipefail && docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f"
args:
executable: /bin/bash
register: remove_apiserver_container register: remove_apiserver_container
retries: 10 retries: 10
until: remove_apiserver_container.rc == 0 until: remove_apiserver_container.rc == 0
@ -52,7 +54,9 @@
when: container_manager == "docker" when: container_manager == "docker"
- name: Master | Remove apiserver container containerd/crio - name: Master | Remove apiserver container containerd/crio
shell: "{{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
args:
executable: /bin/bash
register: remove_apiserver_container register: remove_apiserver_container
retries: 10 retries: 10
until: remove_apiserver_container.rc == 0 until: remove_apiserver_container.rc == 0
@ -60,7 +64,9 @@
when: container_manager in ['containerd', 'crio'] when: container_manager in ['containerd', 'crio']
- name: Master | Remove scheduler container docker - name: Master | Remove scheduler container docker
shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
args:
executable: /bin/bash
register: remove_scheduler_container register: remove_scheduler_container
retries: 10 retries: 10
until: remove_scheduler_container.rc == 0 until: remove_scheduler_container.rc == 0
@ -68,7 +74,9 @@
when: container_manager == "docker" when: container_manager == "docker"
- name: Master | Remove scheduler container containerd/crio - name: Master | Remove scheduler container containerd/crio
shell: "{{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
args:
executable: /bin/bash
register: remove_scheduler_container register: remove_scheduler_container
retries: 10 retries: 10
until: remove_scheduler_container.rc == 0 until: remove_scheduler_container.rc == 0
@ -76,7 +84,9 @@
when: container_manager in ['containerd', 'crio'] when: container_manager in ['containerd', 'crio']
- name: Master | Remove controller manager container docker - name: Master | Remove controller manager container docker
shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
args:
executable: /bin/bash
register: remove_cm_container register: remove_cm_container
retries: 10 retries: 10
until: remove_cm_container.rc == 0 until: remove_cm_container.rc == 0
@ -84,7 +94,9 @@
when: container_manager == "docker" when: container_manager == "docker"
- name: Master | Remove controller manager container containerd/crio - name: Master | Remove controller manager container containerd/crio
shell: "{{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
args:
executable: /bin/bash
register: remove_cm_container register: remove_cm_container
retries: 10 retries: 10
until: remove_cm_container.rc == 0 until: remove_cm_container.rc == 0

View File

@ -1,7 +1,9 @@
--- ---
- block: - block:
- name: look up docker cgroup driver - name: look up docker cgroup driver
shell: "docker info | grep 'Cgroup Driver' | awk -F': ' '{ print $2; }'" shell: "set -o pipefail && docker info | grep 'Cgroup Driver' | awk -F': ' '{ print $2; }'"
args:
executable: /bin/bash
register: docker_cgroup_driver_result register: docker_cgroup_driver_result
changed_when: false changed_when: false
check_mode: no check_mode: no
@ -13,7 +15,9 @@
- block: - block:
- name: look up crio cgroup driver - name: look up crio cgroup driver
shell: "{{ bin_dir }}/crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'" shell: "set -o pipefail && {{ bin_dir }}/crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'"
args:
executable: /bin/bash
register: crio_cgroup_driver_result register: crio_cgroup_driver_result
changed_when: false changed_when: false
@ -40,7 +44,6 @@
when: kubelet_cgroup_driver == 'cgroupfs' when: kubelet_cgroup_driver == 'cgroupfs'
- name: set kubelet_config_extra_args options when cgroupfs is used - name: set kubelet_config_extra_args options when cgroupfs is used
vars:
set_fact: set_fact:
kubelet_config_extra_args: "{{ kubelet_config_extra_args | combine(kubelet_config_extra_args_cgroupfs) }}" kubelet_config_extra_args: "{{ kubelet_config_extra_args | combine(kubelet_config_extra_args_cgroupfs) }}"
when: kubelet_cgroup_driver == 'cgroupfs' when: kubelet_cgroup_driver == 'cgroupfs'

View File

@ -41,7 +41,7 @@
- haproxy - haproxy
- name: Ensure nodePort range is reserved - name: Ensure nodePort range is reserved
sysctl: ansible.posix.sysctl:
name: net.ipv4.ip_local_reserved_ports name: net.ipv4.ip_local_reserved_ports
value: "{{ kube_apiserver_node_port_range }}" value: "{{ kube_apiserver_node_port_range }}"
sysctl_set: yes sysctl_set: yes
@ -68,7 +68,7 @@
mode: 0755 mode: 0755
- name: Enable br_netfilter module - name: Enable br_netfilter module
modprobe: community.general.modprobe:
name: br_netfilter name: br_netfilter
state: present state: present
when: modinfo_br_netfilter.rc == 0 when: modinfo_br_netfilter.rc == 0
@ -89,7 +89,7 @@
register: sysctl_bridge_nf_call_iptables register: sysctl_bridge_nf_call_iptables
- name: Enable bridge-nf-call tables - name: Enable bridge-nf-call tables
sysctl: ansible.posix.sysctl:
name: "{{ item }}" name: "{{ item }}"
state: present state: present
sysctl_file: "{{ sysctl_file_path }}" sysctl_file: "{{ sysctl_file_path }}"
@ -102,7 +102,7 @@
- net.bridge.bridge-nf-call-ip6tables - net.bridge.bridge-nf-call-ip6tables
- name: Modprobe Kernel Module for IPVS - name: Modprobe Kernel Module for IPVS
modprobe: community.general.modprobe:
name: "{{ item }}" name: "{{ item }}"
state: present state: present
with_items: with_items:
@ -115,7 +115,7 @@
- kube-proxy - kube-proxy
- name: Modprobe nf_conntrack_ipv4 - name: Modprobe nf_conntrack_ipv4
modprobe: community.general.modprobe:
name: nf_conntrack_ipv4 name: nf_conntrack_ipv4
state: present state: present
register: modprobe_nf_conntrack_ipv4 register: modprobe_nf_conntrack_ipv4

View File

@ -68,7 +68,9 @@
when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf' when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
- name: Preinstall | restart kube-controller-manager docker - name: Preinstall | restart kube-controller-manager docker
shell: "{{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
args:
executable: /bin/bash
when: when:
- container_manager == "docker" - container_manager == "docker"
- inventory_hostname in groups['kube_control_plane'] - inventory_hostname in groups['kube_control_plane']
@ -77,7 +79,9 @@
- kube_controller_set.stat.exists - kube_controller_set.stat.exists
- name: Preinstall | restart kube-controller-manager crio/containerd - name: Preinstall | restart kube-controller-manager crio/containerd
shell: "{{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
args:
executable: /bin/bash
register: preinstall_restart_controller_manager register: preinstall_restart_controller_manager
retries: 10 retries: 10
delay: 1 delay: 1
@ -90,7 +94,9 @@
- kube_controller_set.stat.exists - kube_controller_set.stat.exists
- name: Preinstall | restart kube-apiserver docker - name: Preinstall | restart kube-apiserver docker
shell: "{{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-apiserver* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-apiserver* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
args:
executable: /bin/bash
when: when:
- container_manager == "docker" - container_manager == "docker"
- inventory_hostname in groups['kube_control_plane'] - inventory_hostname in groups['kube_control_plane']
@ -99,7 +105,9 @@
- kube_apiserver_set.stat.exists - kube_apiserver_set.stat.exists
- name: Preinstall | restart kube-apiserver crio/containerd - name: Preinstall | restart kube-apiserver crio/containerd
shell: "{{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
args:
executable: /bin/bash
register: preinstall_restart_apiserver register: preinstall_restart_apiserver
retries: 10 retries: 10
until: preinstall_restart_apiserver.rc == 0 until: preinstall_restart_apiserver.rc == 0

View File

@ -1,6 +1,6 @@
--- ---
- name: Remove swapfile from /etc/fstab - name: Remove swapfile from /etc/fstab
mount: ansible.posix.mount:
name: "{{ item }}" name: "{{ item }}"
fstype: swap fstype: swap
state: absent state: absent

View File

@ -1,6 +1,6 @@
--- ---
- name: NetworkManager | Add nameservers to NM configuration - name: NetworkManager | Add nameservers to NM configuration
ini_file: community.general.ini_file:
path: /etc/NetworkManager/conf.d/dns.conf path: /etc/NetworkManager/conf.d/dns.conf
section: global-dns-domain-* section: global-dns-domain-*
option: servers option: servers
@ -15,7 +15,7 @@
when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0) when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0)
- name: NetworkManager | Add DNS search to NM configuration - name: NetworkManager | Add DNS search to NM configuration
ini_file: community.general.ini_file:
path: /etc/NetworkManager/conf.d/dns.conf path: /etc/NetworkManager/conf.d/dns.conf
section: global-dns section: global-dns
option: searches option: searches
@ -25,7 +25,7 @@
notify: Preinstall | update resolvconf for networkmanager notify: Preinstall | update resolvconf for networkmanager
- name: NetworkManager | Add DNS options to NM configuration - name: NetworkManager | Add DNS options to NM configuration
ini_file: community.general.ini_file:
path: /etc/NetworkManager/conf.d/dns.conf path: /etc/NetworkManager/conf.d/dns.conf
section: global-dns section: global-dns
option: options option: options

View File

@ -12,7 +12,7 @@
register: slc register: slc
- name: Set selinux policy - name: Set selinux policy
selinux: ansible.posix.selinux:
policy: targeted policy: targeted
state: "{{ preinstall_selinux_state }}" state: "{{ preinstall_selinux_state }}"
when: when:
@ -71,7 +71,7 @@
mode: 0755 mode: 0755
- name: Enable ip forwarding - name: Enable ip forwarding
sysctl: ansible.posix.sysctl:
sysctl_file: "{{ sysctl_file_path }}" sysctl_file: "{{ sysctl_file_path }}"
name: net.ipv4.ip_forward name: net.ipv4.ip_forward
value: "1" value: "1"
@ -79,7 +79,7 @@
reload: yes reload: yes
- name: Enable ipv6 forwarding - name: Enable ipv6 forwarding
sysctl: ansible.posix.sysctl:
sysctl_file: "{{ sysctl_file_path }}" sysctl_file: "{{ sysctl_file_path }}"
name: net.ipv6.conf.all.forwarding name: net.ipv6.conf.all.forwarding
value: "1" value: "1"
@ -97,7 +97,7 @@
ignore_errors: true # noqa ignore-errors ignore_errors: true # noqa ignore-errors
- name: Set fs.may_detach_mounts if needed - name: Set fs.may_detach_mounts if needed
sysctl: ansible.posix.sysctl:
sysctl_file: "{{ sysctl_file_path }}" sysctl_file: "{{ sysctl_file_path }}"
name: fs.may_detach_mounts name: fs.may_detach_mounts
value: 1 value: 1
@ -106,7 +106,7 @@
when: fs_may_detach_mounts.stat.exists | d(false) when: fs_may_detach_mounts.stat.exists | d(false)
- name: Ensure kube-bench parameters are set - name: Ensure kube-bench parameters are set
sysctl: ansible.posix.sysctl:
sysctl_file: "{{ sysctl_file_path }}" sysctl_file: "{{ sysctl_file_path }}"
name: "{{ item.name }}" name: "{{ item.name }}"
value: "{{ item.value }}" value: "{{ item.value }}"
@ -122,14 +122,14 @@
when: kubelet_protect_kernel_defaults|bool when: kubelet_protect_kernel_defaults|bool
- name: Check dummy module - name: Check dummy module
modprobe: community.general.modprobe:
name: dummy name: dummy
state: present state: present
params: 'numdummies=0' params: 'numdummies=0'
when: enable_nodelocaldns when: enable_nodelocaldns
- name: Set additional sysctl variables - name: Set additional sysctl variables
sysctl: ansible.posix.sysctl:
sysctl_file: "{{ sysctl_file_path }}" sysctl_file: "{{ sysctl_file_path }}"
name: "{{ item.name }}" name: "{{ item.name }}"
value: "{{ item.value }}" value: "{{ item.value }}"

View File

@ -78,7 +78,7 @@
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- name: Set timezone - name: Set timezone
timezone: community.general.timezone:
name: "{{ ntp_timezone }}" name: "{{ ntp_timezone }}"
when: when:
- ntp_timezone - ntp_timezone

View File

@ -45,7 +45,6 @@
- name: Gen_tokens | Gather tokens - name: Gen_tokens | Gather tokens
shell: "set -o pipefail && tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0" shell: "set -o pipefail && tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0"
args: args:
warn: false
executable: /bin/bash executable: /bin/bash
register: tokens_data register: tokens_data
check_mode: no check_mode: no

View File

@ -33,13 +33,13 @@ kubeadm_init_timeout: 300s
kubeadm_init_phases_skip_default: [ "addon/coredns" ] kubeadm_init_phases_skip_default: [ "addon/coredns" ]
kubeadm_init_phases_skip: >- kubeadm_init_phases_skip: >-
{%- if kube_network_plugin == 'kube-router' and (kube_router_run_service_proxy is defined and kube_router_run_service_proxy) -%} {%- if kube_network_plugin == 'kube-router' and (kube_router_run_service_proxy is defined and kube_router_run_service_proxy) -%}
{{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ] {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }}
{%- elif kube_network_plugin == 'cilium' and (cilium_kube_proxy_replacement is defined and cilium_kube_proxy_replacement == 'strict') -%} {%- elif kube_network_plugin == 'cilium' and (cilium_kube_proxy_replacement is defined and cilium_kube_proxy_replacement == 'strict') -%}
{{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ] {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }}
{%- elif kube_network_plugin == 'calico' and (calico_bpf_enabled is defined and calico_bpf_enabled) -%} {%- elif kube_network_plugin == 'calico' and (calico_bpf_enabled is defined and calico_bpf_enabled) -%}
{{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ] {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }}
{%- elif kube_proxy_remove is defined and kube_proxy_remove -%} {%- elif kube_proxy_remove is defined and kube_proxy_remove -%}
{{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ] {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }}
{%- else -%} {%- else -%}
{{ kubeadm_init_phases_skip_default }} {{ kubeadm_init_phases_skip_default }}
{%- endif -%} {%- endif -%}

View File

@ -13,14 +13,18 @@
state: absent state: absent
- name: Calico | delete calico-node docker containers - name: Calico | delete calico-node docker containers
shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_POD_calico-node* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_POD_calico-node* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
args:
executable: /bin/bash
register: docker_calico_node_remove register: docker_calico_node_remove
until: docker_calico_node_remove is succeeded until: docker_calico_node_remove is succeeded
retries: 5 retries: 5
when: container_manager in ["docker"] when: container_manager in ["docker"]
- name: Calico | delete calico-node crio/containerd containers - name: Calico | delete calico-node crio/containerd containers
shell: '{{ bin_dir }}/crictl pods --name calico-node-* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"' shell: 'set -o pipefail && {{ bin_dir }}/crictl pods --name calico-node-* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"'
args:
executable: /bin/bash
register: crictl_calico_node_remove register: crictl_calico_node_remove
until: crictl_calico_node_remove is succeeded until: crictl_calico_node_remove is succeeded
retries: 5 retries: 5

View File

@ -1,6 +1,6 @@
--- ---
- name: Cilium | Ensure BPFFS mounted - name: Cilium | Ensure BPFFS mounted
mount: ansible.posix.mount:
fstype: bpf fstype: bpf
path: /sys/fs/bpf path: /sys/fs/bpf
src: bpffs src: bpffs

View File

@ -6,14 +6,18 @@
- Kube-router | delete kube-router crio/containerd containers - Kube-router | delete kube-router crio/containerd containers
- name: Kube-router | delete kube-router docker containers - name: Kube-router | delete kube-router docker containers
shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_POD_kube-router* -q | xargs --no-run-if-empty docker rm -f" shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_POD_kube-router* -q | xargs --no-run-if-empty docker rm -f"
args:
executable: /bin/bash
register: docker_kube_router_remove register: docker_kube_router_remove
until: docker_kube_router_remove is succeeded until: docker_kube_router_remove is succeeded
retries: 5 retries: 5
when: container_manager in ["docker"] when: container_manager in ["docker"]
- name: Kube-router | delete kube-router crio/containerd containers - name: Kube-router | delete kube-router crio/containerd containers
shell: '{{ bin_dir }}/crictl pods --name kube-router* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"' shell: 'set -o pipefail && {{ bin_dir }}/crictl pods --name kube-router* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"'
args:
executable: /bin/bash
register: crictl_kube_router_remove register: crictl_kube_router_remove
until: crictl_kube_router_remove is succeeded until: crictl_kube_router_remove is succeeded
retries: 5 retries: 5

View File

@ -1,6 +1,6 @@
--- ---
- name: kube-router | Create annotations - name: kube-router | Create annotations
include: annotate.yml import_tasks: annotate.yml
tags: annotate tags: annotate
- name: kube-router | Create config directory - name: kube-router | Create config directory

View File

@ -7,7 +7,7 @@
- name: Macvlan | set node_pod_cidr - name: Macvlan | set node_pod_cidr
set_fact: set_fact:
node_pod_cidr={{ node_pod_cidr_cmd.stdout }} node_pod_cidr: "{{ node_pod_cidr_cmd.stdout }}"
- name: Macvlan | Retrieve default gateway network interface - name: Macvlan | Retrieve default gateway network interface
become: false become: false
@ -17,7 +17,7 @@
- name: Macvlan | set node_default_gateway_interface - name: Macvlan | set node_default_gateway_interface
set_fact: set_fact:
node_default_gateway_interface={{ node_default_gateway_interface_cmd.stdout | trim }} node_default_gateway_interface: "{{ node_default_gateway_interface_cmd.stdout | trim }}"
- name: Macvlan | Install network gateway interface on debian - name: Macvlan | Install network gateway interface on debian
template: template:
@ -101,7 +101,7 @@
mode: 0644 mode: 0644
- name: Enable net.ipv4.conf.all.arp_notify in sysctl - name: Enable net.ipv4.conf.all.arp_notify in sysctl
sysctl: ansible.posix.sysctl:
name: net.ipv4.conf.all.arp_notify name: net.ipv4.conf.all.arp_notify
value: 1 value: 1
sysctl_set: yes sysctl_set: yes

View File

@ -20,6 +20,7 @@
template: template:
src: multus-daemonset.yml.j2 src: multus-daemonset.yml.j2
dest: "{{ kube_config_dir }}/{{ item.file }}" dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items: with_items:
- {name: multus-daemonset-containerd, file: multus-daemonset-containerd.yml, type: daemonset, engine: containerd } - {name: multus-daemonset-containerd, file: multus-daemonset-containerd.yml, type: daemonset, engine: containerd }
- {name: multus-daemonset-docker, file: multus-daemonset-docker.yml, type: daemonset, engine: docker } - {name: multus-daemonset-docker, file: multus-daemonset-docker.yml, type: daemonset, engine: docker }

View File

@ -2,7 +2,7 @@
- name: Wait for apiserver - name: Wait for apiserver
command: "{{ kubectl }} get nodes" command: "{{ kubectl }} get nodes"
environment: environment:
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
register: apiserver_is_ready register: apiserver_is_ready
until: apiserver_is_ready.rc == 0 until: apiserver_is_ready.rc == 0
retries: 6 retries: 6
@ -13,7 +13,7 @@
- name: Delete broken kube_control_plane nodes from cluster - name: Delete broken kube_control_plane nodes from cluster
command: "{{ kubectl }} delete node {{ item }}" command: "{{ kubectl }} delete node {{ item }}"
environment: environment:
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
with_items: "{{ groups['broken_kube_control_plane'] }}" with_items: "{{ groups['broken_kube_control_plane'] }}"
register: delete_broken_kube_masters register: delete_broken_kube_masters
failed_when: false failed_when: false

View File

@ -6,25 +6,25 @@
changed_when: false changed_when: false
check_mode: no check_mode: no
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: "3"
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
when: when:
- groups['broken_etcd'] - inventory_hostname in groups['broken_etcd']
- name: Set healthy fact - name: Set healthy fact
set_fact: set_fact:
healthy: "{{ etcd_endpoint_health.stderr is match('Error: unhealthy cluster') }}" healthy: "{{ etcd_endpoint_health.stderr is match('Error: unhealthy cluster') }}"
when: when:
- groups['broken_etcd'] - inventory_hostname in groups['broken_etcd']
- name: Set has_quorum fact - name: Set has_quorum fact
set_fact: set_fact:
has_quorum: "{{ etcd_endpoint_health.stdout_lines | select('match', '.*is healthy.*') | list | length >= etcd_endpoint_health.stderr_lines | select('match', '.*is unhealthy.*') | list | length }}" has_quorum: "{{ etcd_endpoint_health.stdout_lines | select('match', '.*is healthy.*') | list | length >= etcd_endpoint_health.stderr_lines | select('match', '.*is unhealthy.*') | list | length }}"
when: when:
- groups['broken_etcd'] - inventory_hostname in groups['broken_etcd']
- include_tasks: recover_lost_quorum.yml - include_tasks: recover_lost_quorum.yml
when: when:
@ -39,7 +39,7 @@
with_items: "{{ groups['broken_etcd'] }}" with_items: "{{ groups['broken_etcd'] }}"
ignore_errors: true # noqa ignore-errors ignore_errors: true # noqa ignore-errors
when: when:
- groups['broken_etcd'] - inventory_hostname in groups['broken_etcd']
- has_quorum - has_quorum
- name: Delete old certificates - name: Delete old certificates
@ -56,7 +56,7 @@
loop: "{{ delete_old_cerificates.results }}" loop: "{{ delete_old_cerificates.results }}"
changed_when: false changed_when: false
when: when:
- groups['broken_etcd'] - inventory_hostname in groups['broken_etcd']
- "item.rc != 0 and not 'No such file or directory' in item.stderr" - "item.rc != 0 and not 'No such file or directory' in item.stderr"
- name: Get etcd cluster members - name: Get etcd cluster members
@ -65,20 +65,20 @@
changed_when: false changed_when: false
check_mode: no check_mode: no
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: "3"
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
when: when:
- groups['broken_etcd'] - inventory_hostname in groups['broken_etcd']
- not healthy - not healthy
- has_quorum - has_quorum
- name: Remove broken cluster members - name: Remove broken cluster members
command: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}" command: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}"
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: "3"
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
@ -87,7 +87,7 @@
- "{{ groups['broken_etcd'] }}" - "{{ groups['broken_etcd'] }}"
- "{{ member_list.stdout_lines }}" - "{{ member_list.stdout_lines }}"
when: when:
- groups['broken_etcd'] - inventory_hostname in groups['broken_etcd']
- not healthy - not healthy
- has_quorum - has_quorum
- hostvars[item[0]]['etcd_member_name'] == item[1].replace(' ','').split(',')[2] - hostvars[item[0]]['etcd_member_name'] == item[1].replace(' ','').split(',')[2]

View File

@ -2,11 +2,11 @@
- name: Save etcd snapshot - name: Save etcd snapshot
command: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db" command: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db"
environment: environment:
- ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
- ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
- ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
- ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses.split(',') | first }}" ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses.split(',') | first }}"
- ETCDCTL_API: 3 ETCDCTL_API: "3"
when: etcd_snapshot is not defined when: etcd_snapshot is not defined
- name: Transfer etcd snapshot to host - name: Transfer etcd snapshot to host
@ -29,11 +29,11 @@
- name: Restore etcd snapshot # noqa 301 305 - name: Restore etcd snapshot # noqa 301 305
shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}" shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}"
environment: environment:
- ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
- ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
- ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
- ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
- ETCDCTL_API: 3 ETCDCTL_API: "3"
- name: Remove etcd snapshot - name: Remove etcd snapshot
file: file:

View File

@ -26,7 +26,9 @@
- inventory_hostname in groups['etcd'] - inventory_hostname in groups['etcd']
- name: Lookup etcd member id - name: Lookup etcd member id
shell: "{{ bin_dir }}/etcdctl member list | grep {{ node_ip }} | cut -d, -f1" shell: "set -o pipefail && {{ bin_dir }}/etcdctl member list | grep {{ node_ip }} | cut -d, -f1"
args:
executable: /bin/bash
register: etcd_member_id register: etcd_member_id
ignore_errors: true # noqa ignore-errors ignore_errors: true # noqa ignore-errors
changed_when: false changed_when: false
@ -34,7 +36,7 @@
tags: tags:
- facts - facts
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: "3"
ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '.pem' }}" ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '.pem' }}"
ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '-key.pem' }}" ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '-key.pem' }}"
ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}" ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}"
@ -45,7 +47,7 @@
- name: Remove etcd member from cluster - name: Remove etcd member from cluster
command: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}" command: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}"
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: "3"
ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '.pem' }}" ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '.pem' }}"
ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '-key.pem' }}" ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '-key.pem' }}"
ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}" ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}"

View File

@ -178,7 +178,6 @@
shell: set -o pipefail && mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac shell: set -o pipefail && mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac
args: args:
executable: /bin/bash executable: /bin/bash
warn: false
check_mode: no check_mode: no
register: mounted_dirs register: mounted_dirs
failed_when: false failed_when: false
@ -279,6 +278,7 @@
path: "{{ filedir_path }}" path: "{{ filedir_path }}"
state: touch state: touch
attributes: "-i" attributes: "-i"
mode: 0644
loop: "{{ var_lib_kubelet_files_dirs_w_attrs.stdout_lines|select('search', 'Immutable')|list }}" loop: "{{ var_lib_kubelet_files_dirs_w_attrs.stdout_lines|select('search', 'Immutable')|list }}"
loop_control: loop_control:
loop_var: file_dir_line loop_var: file_dir_line

1
run.rc
View File

@ -7,6 +7,7 @@ pip install wheel
pip install --upgrade setuptools pip install --upgrade setuptools
pip install -r requirements.txt pip install -r requirements.txt
pip install -r tests/requirements.txt pip install -r tests/requirements.txt
ansible-galaxy install -r tests/requirements.yml
pre-commit install pre-commit install
# prepare an inventory to test with # prepare an inventory to test with
INV=inventory/lab INV=inventory/lab

View File

@ -97,7 +97,7 @@
- /var/log/dmesg - /var/log/dmesg
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: "3"
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
@ -120,18 +120,22 @@
no_log: True no_log: True
- name: Fetch results - name: Fetch results
fetch: src={{ item.name }} dest=/tmp/{{ archive_dirname }}/commands fetch:
src: "{{ item.name }}"
dest: "/tmp/{{ archive_dirname }}/commands"
with_items: "{{ commands }}" with_items: "{{ commands }}"
when: item.when | default(True) when: item.when | default(True)
failed_when: false failed_when: false
- name: Fetch logs - name: Fetch logs
fetch: src={{ item }} dest=/tmp/{{ archive_dirname }}/logs fetch:
src: "{{ item }}"
dest: "/tmp/{{ archive_dirname }}/logs"
with_items: "{{ logs }}" with_items: "{{ logs }}"
failed_when: false failed_when: false
- name: Pack results and logs - name: Pack results and logs
archive: community.general.archive:
path: "/tmp/{{ archive_dirname }}" path: "/tmp/{{ archive_dirname }}"
dest: "{{ dir|default('.') }}/logs.tar.gz" dest: "{{ dir|default('.') }}/logs.tar.gz"
remove: true remove: true
@ -142,5 +146,7 @@
run_once: true run_once: true
- name: Clean up collected command outputs - name: Clean up collected command outputs
file: path={{ item.name }} state=absent file:
path: "{{ item.name }}"
state: absent
with_items: "{{ commands }}" with_items: "{{ commands }}"

View File

@ -11,6 +11,7 @@
url: "{{ item.value.url }}" url: "{{ item.value.url }}"
dest: "{{ images_dir }}/{{ item.value.filename }}" dest: "{{ images_dir }}/{{ item.value.filename }}"
checksum: "{{ item.value.checksum }}" checksum: "{{ item.value.checksum }}"
mode: 0644
loop: "{{ images|dict2items }}" loop: "{{ images|dict2items }}"
- name: Unxz compressed images - name: Unxz compressed images

View File

@ -5,7 +5,7 @@
tasks: tasks:
- name: Provision a set of instances - name: Provision a set of instances
ec2: amazon.aws.ec2_instance:
key_name: "{{ aws.key_name }}" key_name: "{{ aws.key_name }}"
aws_access_key: "{{ aws.access_key }}" aws_access_key: "{{ aws.access_key }}"
aws_secret_key: "{{ aws.secret_key }}" aws_secret_key: "{{ aws.secret_key }}"

View File

@ -52,7 +52,8 @@
test_name: "{{ test_id |regex_replace('\\.', '-') }}" test_name: "{{ test_id |regex_replace('\\.', '-') }}"
- name: show vars - name: show vars
debug: msg="{{ cloud_region }}, {{ cloud_image }}" debug:
msg: "{{ cloud_region }}, {{ cloud_image }}"
- name: set instance names - name: set instance names
set_fact: set_fact:
@ -64,7 +65,7 @@
{%- endif -%} {%- endif -%}
- name: Manage DO instances | {{ state }} - name: Manage DO instances | {{ state }}
digital_ocean: community.digitalocean.digital_ocean:
unique_name: yes unique_name: yes
api_token: "{{ lookup('env','DO_API_TOKEN') }}" api_token: "{{ lookup('env','DO_API_TOKEN') }}"
command: "droplet" command: "droplet"

View File

@ -46,7 +46,9 @@
register: gce register: gce
- name: Add instances to host group - name: Add instances to host group
add_host: hostname={{ item.public_ip }} groupname="waitfor_hosts" add_host:
hostname: "{{ item.public_ip }}"
groupname: "waitfor_hosts"
with_items: '{{ gce.instance_data }}' with_items: '{{ gce.instance_data }}'
- name: Template the inventory # noqa 404 CI inventory templates are not in role_path - name: Template the inventory # noqa 404 CI inventory templates are not in role_path

View File

@ -4,10 +4,10 @@
tasks: tasks:
- name: Gather EC2 facts - name: Gather EC2 facts
action: ec2_facts amazon.aws.ec2_metadata_facts:
- name: Terminate EC2 instances - name: Terminate EC2 instances
ec2: amazon.aws.ec2_instance:
aws_access_key: "{{ aws_access_key }}" aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}" aws_secret_key: "{{ aws_secret_key }}"
state: absent state: absent

View File

@ -25,6 +25,6 @@
changed_when: changed_when:
- delete_namespace.rc == 0 - delete_namespace.rc == 0
retries: 12 retries: 12
delay: "10" delay: 10
until: until:
- delete_namespace.rc != 0 - delete_namespace.rc != 0

View File

@ -21,7 +21,7 @@
file_name: "{{ ostype }}-{{ kube_network_plugin }}-{{ commit }}-logs.tar.gz" file_name: "{{ ostype }}-{{ kube_network_plugin }}-{{ commit }}-logs.tar.gz"
- name: Create a bucket - name: Create a bucket
gc_storage: community.google.gc_storage:
bucket: "{{ test_name }}" bucket: "{{ test_name }}"
mode: create mode: create
permission: public-read permission: public-read
@ -46,11 +46,12 @@
get_url: get_url:
url: https://dl.google.com/dl/cloudsdk/channels/rapid/install_google_cloud_sdk.bash url: https://dl.google.com/dl/cloudsdk/channels/rapid/install_google_cloud_sdk.bash
dest: "{{ dir }}/gcp-installer.sh" dest: "{{ dir }}/gcp-installer.sh"
mode: 0644
- name: Get gsutil tool - name: Get gsutil tool
script: "{{ dir }}/gcp-installer.sh" command: "{{ dir }}/gcp-installer.sh"
environment: environment:
CLOUDSDK_CORE_DISABLE_PROMPTS: 1 CLOUDSDK_CORE_DISABLE_PROMPTS: "1"
CLOUDSDK_INSTALL_DIR: "{{ dir }}" CLOUDSDK_INSTALL_DIR: "{{ dir }}"
no_log: True no_log: True
failed_when: false failed_when: false
@ -63,7 +64,7 @@
no_log: True no_log: True
- name: Upload collected diagnostic info - name: Upload collected diagnostic info
gc_storage: community.google.gc_storage:
bucket: "{{ test_name }}" bucket: "{{ test_name }}"
mode: put mode: put
permission: public-read permission: public-read

View File

@ -1,11 +1,11 @@
-r ../requirements.txt -r ../requirements.txt
ansible-lint==5.4.0 ansible-lint==6.16.2
apache-libcloud==3.7.0 apache-libcloud==3.7.0
ara[server]==1.6.1 ara[server]==1.6.1
dopy==0.3.7 dopy==0.3.7
molecule==5.0.1 molecule==5.0.1
molecule-plugins[vagrant]==23.4.1 molecule-plugins[vagrant]==23.4.1
python-vagrant==1.0.0 python-vagrant==1.0.0
pytest-testinfra==7.0.0 pytest-testinfra==8.1.0
tox==4.5.1 tox==4.5.2
yamllint==1.31.0 yamllint==1.32.0

View File

@ -0,0 +1,4 @@
---
collections:
- name: amazon.aws
version: 6.0.1

View File

@ -3,6 +3,7 @@ set -euxo pipefail
/usr/bin/python -m pip uninstall -y ansible ansible-base ansible-core /usr/bin/python -m pip uninstall -y ansible ansible-base ansible-core
/usr/bin/python -m pip install -r tests/requirements.txt /usr/bin/python -m pip install -r tests/requirements.txt
ansible-galaxy install -r tests/requirements.yml
mkdir -p /.ssh mkdir -p /.ssh
mkdir -p cluster-dump mkdir -p cluster-dump
mkdir -p $HOME/.ssh mkdir -p $HOME/.ssh

View File

@ -32,7 +32,7 @@
fail_msg: kubelet_rotate_server_certificates is {{ kubelet_rotate_server_certificates }} but no csr's found fail_msg: kubelet_rotate_server_certificates is {{ kubelet_rotate_server_certificates }} but no csr's found
- name: Get Denied/Pending certificate signing requests - name: Get Denied/Pending certificate signing requests
shell: "{{ bin_dir }}/kubectl get csr | grep -e Denied -e Pending || true" shell: "set -o pipefail && {{ bin_dir }}/kubectl get csr | grep -e Denied -e Pending || true"
register: get_csr_denied_pending register: get_csr_denied_pending
changed_when: false changed_when: false
@ -87,6 +87,7 @@
- name: Run 2 agnhost pods in test ns - name: Run 2 agnhost pods in test ns
shell: shell:
cmd: | cmd: |
set -o pipefail
cat <<EOF | {{ bin_dir }}/kubectl apply -f - cat <<EOF | {{ bin_dir }}/kubectl apply -f -
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
@ -107,6 +108,7 @@
seccompProfile: seccompProfile:
type: RuntimeDefault type: RuntimeDefault
EOF EOF
executable: /bin/bash
changed_when: false changed_when: false
loop: loop:
- agnhost1 - agnhost1

View File

@ -173,6 +173,7 @@
# heuristics by using the cmd parameter like this: # heuristics by using the cmd parameter like this:
shell: shell:
cmd: | cmd: |
set -o pipefail
cat <<EOF | {{ bin_dir }}/kubectl create -f - cat <<EOF | {{ bin_dir }}/kubectl create -f -
apiVersion: "k8s.cni.cncf.io/v1" apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition kind: NetworkAttachmentDefinition
@ -196,6 +197,7 @@
} }
}' }'
EOF EOF
executable: /bin/bash
when: when:
- inventory_hostname == groups['kube_control_plane'][0] - inventory_hostname == groups['kube_control_plane'][0]
- kube_network_plugin_multus|default(false)|bool - kube_network_plugin_multus|default(false)|bool
@ -207,6 +209,7 @@
# heuristics by using the cmd parameter like this: # heuristics by using the cmd parameter like this:
shell: shell:
cmd: | cmd: |
set -o pipefail
cat <<EOF | {{ bin_dir }}/kubectl create -f - cat <<EOF | {{ bin_dir }}/kubectl create -f -
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
@ -220,6 +223,7 @@
command: ["/bin/bash", "-c", "sleep 2000000000000"] command: ["/bin/bash", "-c", "sleep 2000000000000"]
image: dougbtv/centos-network image: dougbtv/centos-network
EOF EOF
executable: /bin/bash
when: when:
- inventory_hostname == groups['kube_control_plane'][0] - inventory_hostname == groups['kube_control_plane'][0]
- kube_network_plugin_multus|default(false)|bool - kube_network_plugin_multus|default(false)|bool

View File

@ -17,6 +17,7 @@
get_url: get_url:
url: "https://github.com/heptio/sonobuoy/releases/download/v{{ sonobuoy_version }}/sonobuoy_{{ sonobuoy_version }}_linux_{{ sonobuoy_arch }}.tar.gz" url: "https://github.com/heptio/sonobuoy/releases/download/v{{ sonobuoy_version }}/sonobuoy_{{ sonobuoy_version }}_linux_{{ sonobuoy_arch }}.tar.gz"
dest: /tmp/sonobuoy.tar.gz dest: /tmp/sonobuoy.tar.gz
mode: 0644
- name: Extract sonobuoy - name: Extract sonobuoy
unarchive: unarchive:

View File

@ -5,7 +5,7 @@
when: inventory_hostname in groups['kube_control_plane'] when: inventory_hostname in groups['kube_control_plane']
- name: Compress directory cluster-dump - name: Compress directory cluster-dump
archive: community.general.archive:
path: /tmp/cluster-dump path: /tmp/cluster-dump
dest: /tmp/cluster-dump.tgz dest: /tmp/cluster-dump.tgz
mode: 0644 mode: 0644