Do not use ‘yes/no’ for boolean values (#11472)

Consistent boolean values in ansible playbooks
pull/11477/head
Vlad Korolev 2024-08-28 01:30:56 -04:00 committed by GitHub
parent 5c5421e453
commit 9a7b021eb8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
162 changed files with 507 additions and 508 deletions

View File

@ -26,4 +26,3 @@ rules:
octal-values:
forbid-implicit-octal: true # yamllint defaults to false
forbid-explicit-octal: true # yamllint defaults to false
truthy: disable

View File

@ -1,6 +1,6 @@
---
- name: Generate Azure inventory
hosts: localhost
gather_facts: False
gather_facts: false
roles:
- generate-inventory

View File

@ -1,6 +1,6 @@
---
- name: Generate Azure inventory
hosts: localhost
gather_facts: False
gather_facts: false
roles:
- generate-inventory_2

View File

@ -1,6 +1,6 @@
---
- name: Generate Azure templates
hosts: localhost
gather_facts: False
gather_facts: false
roles:
- generate-templates

View File

@ -1,7 +1,7 @@
---
- name: Create nodes as docker containers
hosts: localhost
gather_facts: False
gather_facts: false
roles:
- { role: dind-host }

View File

@ -15,7 +15,7 @@ docker_storage_options: -s overlay2 --storage-opt overlay2.override_kernel_check
dns_mode: coredns
deploy_netchecker: True
deploy_netchecker: true
netcheck_agent_image_repo: quay.io/l23network/k8s-netchecker-agent
netcheck_server_image_repo: quay.io/l23network/k8s-netchecker-server
netcheck_agent_image_tag: v1.0

View File

@ -14,7 +14,7 @@
src: "/bin/true"
dest: "{{ item }}"
state: link
force: yes
force: true
with_items:
# DIND box may have swap enable, don't bother
- /sbin/swapoff
@ -58,7 +58,7 @@
name: "{{ distro_user }}"
uid: 1000
# groups: sudo
append: yes
append: true
- name: Allow password-less sudo to "{{ distro_user }}"
copy:

View File

@ -19,7 +19,7 @@
state: started
hostname: "{{ item }}"
command: "{{ distro_init }}"
# recreate: yes
# recreate: true
privileged: true
tmpfs:
- /sys/module/nf_conntrack/parameters

View File

@ -1,8 +1,8 @@
---
- name: Prepare Hypervisor to later install kubespray VMs
hosts: localhost
gather_facts: False
become: yes
gather_facts: false
become: true
vars:
bootstrap_os: none
roles:

View File

@ -11,12 +11,12 @@
- name: Install required packages
apt:
upgrade: yes
update_cache: yes
upgrade: true
update_cache: true
cache_valid_time: 3600
name: "{{ item }}"
state: present
install_recommends: no
install_recommends: false
with_items:
- dnsutils
- ntp

View File

@ -30,7 +30,7 @@
value: 1
sysctl_file: "{{ sysctl_file_path }}"
state: present
reload: yes
reload: true
- name: Set bridge-nf-call-{arptables,iptables} to 0
ansible.posix.sysctl:
@ -38,7 +38,7 @@
state: present
value: 0
sysctl_file: "{{ sysctl_file_path }}"
reload: yes
reload: true
with_items:
- net.bridge.bridge-nf-call-arptables
- net.bridge.bridge-nf-call-ip6tables

View File

@ -21,7 +21,7 @@ glusterfs_default_release: ""
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
```yaml
glusterfs_ppa_use: yes
glusterfs_ppa_use: true
glusterfs_ppa_version: "3.5"
```

View File

@ -1,7 +1,7 @@
---
# For Ubuntu.
glusterfs_default_release: ""
glusterfs_ppa_use: yes
glusterfs_ppa_use: true
glusterfs_ppa_version: "4.1"
# Gluster configuration.

View File

@ -3,7 +3,7 @@
apt_repository:
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
state: present
update_cache: yes
update_cache: true
register: glusterfs_ppa_added
when: glusterfs_ppa_use

View File

@ -1,7 +1,7 @@
---
# For Ubuntu.
glusterfs_default_release: ""
glusterfs_ppa_use: yes
glusterfs_ppa_use: true
glusterfs_ppa_version: "3.12"
# Gluster configuration.

View File

@ -43,7 +43,7 @@
service:
name: "{{ glusterfs_daemon }}"
state: started
enabled: yes
enabled: true
- name: Ensure Gluster brick and mount directories exist.
file:
@ -62,7 +62,7 @@
replicas: "{{ groups['gfs-cluster'] | length }}"
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
host: "{{ inventory_hostname }}"
force: yes
force: true
run_once: true
when: groups['gfs-cluster'] | length > 1
@ -73,7 +73,7 @@
brick: "{{ gluster_brick_dir }}"
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
host: "{{ inventory_hostname }}"
force: yes
force: true
run_once: true
when: groups['gfs-cluster'] | length <= 1

View File

@ -3,7 +3,7 @@
apt_repository:
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
state: present
update_cache: yes
update_cache: true
register: glusterfs_ppa_added
when: glusterfs_ppa_use

View File

@ -6,6 +6,6 @@
- name: Teardown disks in heketi
hosts: heketi-node
become: yes
become: true
roles:
- { role: tear-down-disks }

View File

@ -1,7 +1,7 @@
---
- name: Collect container images for offline deployment
hosts: localhost
become: no
become: false
roles:
# Just load default variables from roles.

View File

@ -10,7 +10,7 @@
systemd_service:
name: firewalld
state: stopped
enabled: no
enabled: false
when:
"'firewalld.service' in services and services['firewalld.service'].status != 'not-found'"
@ -18,6 +18,6 @@
systemd_service:
name: ufw
state: stopped
enabled: no
enabled: false
when:
"'ufw.service' in services and services['ufw.service'].status != 'not-found'"

View File

@ -12,7 +12,7 @@
- name: Setup ssh config to use the bastion
hosts: localhost
gather_facts: False
gather_facts: false
roles:
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}

View File

@ -2,7 +2,7 @@
- name: Check Ansible version
hosts: all
gather_facts: false
become: no
become: false
run_once: true
vars:
minimal_ansible_version: 2.16.4

View File

@ -51,7 +51,7 @@
- name: Install bastion ssh config
hosts: bastion[0]
gather_facts: False
gather_facts: false
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }

View File

@ -7,7 +7,7 @@
- name: Prepare for etcd install
hosts: k8s_cluster:etcd
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@ -21,7 +21,7 @@
- name: Install Kubernetes nodes
hosts: k8s_cluster
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@ -30,7 +30,7 @@
- name: Install the control plane
hosts: kube_control_plane
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@ -41,7 +41,7 @@
- name: Invoke kubeadm and install a CNI
hosts: k8s_cluster
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@ -54,7 +54,7 @@
- name: Install Calico Route Reflector
hosts: calico_rr
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@ -63,7 +63,7 @@
- name: Patch Kubernetes for Windows
hosts: kube_control_plane[0]
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@ -72,7 +72,7 @@
- name: Install Kubernetes apps
hosts: kube_control_plane
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@ -86,7 +86,7 @@
- name: Apply resolv.conf changes now that cluster DNS is up
hosts: k8s_cluster
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:

View File

@ -15,7 +15,7 @@
- name: Gather facts
hosts: k8s_cluster:etcd:calico_rr
gather_facts: False
gather_facts: false
tags: always
tasks:
- name: Gather minimal facts

View File

@ -16,7 +16,7 @@
- name: Install etcd
hosts: etcd:kube_control_plane:_kubespray_needs_etcd
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:

View File

@ -4,13 +4,13 @@
- name: Confirm node removal
hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
gather_facts: no
gather_facts: false
tasks:
- name: Confirm Execution
pause:
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
register: pause_result
run_once: True
run_once: true
when:
- not (skip_confirmation | default(false) | bool)
@ -25,7 +25,7 @@
- name: Reset node
hosts: "{{ node | default('kube_node') }}"
gather_facts: no
gather_facts: false
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults, when: reset_nodes | default(True) | bool }
@ -36,7 +36,7 @@
# Currently cannot remove first master or etcd
- name: Post node removal
hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
gather_facts: no
gather_facts: false
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults, when: reset_nodes | default(True) | bool }

View File

@ -7,13 +7,13 @@
- name: Reset cluster
hosts: etcd:k8s_cluster:calico_rr
gather_facts: False
gather_facts: false
pre_tasks:
- name: Reset Confirmation
pause:
prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
register: reset_confirmation_prompt
run_once: True
run_once: true
when:
- not (skip_confirmation | default(false) | bool)
- reset_confirmation is not defined

View File

@ -7,7 +7,7 @@
- name: Generate the etcd certificates beforehand
hosts: etcd:kube_control_plane
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@ -24,7 +24,7 @@
- name: Download images to ansible host cache via first kube_control_plane node
hosts: kube_control_plane[0]
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@ -34,7 +34,7 @@
- name: Target only workers to get kubelet installed and checking in on any new nodes(engine)
hosts: kube_node
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@ -53,7 +53,7 @@
- name: Target only workers to get kubelet installed and checking in on any new nodes(node)
hosts: kube_node
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@ -63,7 +63,7 @@
- name: Upload control plane certs and retrieve encryption key
hosts: kube_control_plane | first
environment: "{{ proxy_disable_env }}"
gather_facts: False
gather_facts: false
tags: kubeadm
roles:
- { role: kubespray-defaults }
@ -84,7 +84,7 @@
- name: Target only workers to get kubelet installed and checking in on any new nodes(network)
hosts: kube_node
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@ -96,7 +96,7 @@
- name: Apply resolv.conf changes now that cluster DNS is up
hosts: k8s_cluster
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:

View File

@ -7,7 +7,7 @@
- name: Download images to ansible host cache via first kube_control_plane node
hosts: kube_control_plane[0]
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@ -17,7 +17,7 @@
- name: Prepare nodes for upgrade
hosts: k8s_cluster:etcd:calico_rr
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@ -27,7 +27,7 @@
- name: Upgrade container engine on non-cluster nodes
hosts: etcd:calico_rr:!k8s_cluster
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
serial: "{{ serial | default('20%') }}"
@ -39,7 +39,7 @@
import_playbook: install_etcd.yml
- name: Handle upgrades to master components first to maintain backwards compat.
gather_facts: False
gather_facts: false
hosts: kube_control_plane
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
@ -62,7 +62,7 @@
- name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes
hosts: kube_control_plane:calico_rr:kube_node
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: "{{ serial | default('20%') }}"
environment: "{{ proxy_disable_env }}"
@ -75,7 +75,7 @@
- name: Finally handle worker upgrades, based on given batch size
hosts: kube_node:calico_rr:!kube_control_plane
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
serial: "{{ serial | default('20%') }}"
@ -93,7 +93,7 @@
- name: Patch Kubernetes for Windows
hosts: kube_control_plane[0]
gather_facts: False
gather_facts: false
any_errors_fatal: true
environment: "{{ proxy_disable_env }}"
roles:
@ -102,7 +102,7 @@
- name: Install Calico Route Reflector
hosts: calico_rr
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@ -111,7 +111,7 @@
- name: Install Kubernetes apps
hosts: kube_control_plane
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@ -122,7 +122,7 @@
- name: Apply resolv.conf changes now that cluster DNS is up
hosts: k8s_cluster
gather_facts: False
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:

View File

@ -7,14 +7,14 @@ addusers:
etcd:
name: etcd
comment: "Etcd user"
create_home: no
system: yes
create_home: false
system: true
shell: /sbin/nologin
kube:
name: kube
comment: "Kubernetes user"
create_home: no
system: yes
create_home: false
system: true
shell: /sbin/nologin
group: "{{ kube_cert_group }}"

View File

@ -3,6 +3,6 @@ addusers:
- name: kube
comment: "Kubernetes user"
shell: /sbin/nologin
system: yes
system: true
group: "{{ kube_cert_group }}"
create_home: no
create_home: false

View File

@ -2,14 +2,14 @@
addusers:
- name: etcd
comment: "Etcd user"
create_home: yes
create_home: true
home: "{{ etcd_data_dir }}"
system: yes
system: true
shell: /sbin/nologin
- name: kube
comment: "Kubernetes user"
create_home: no
system: yes
create_home: false
system: true
shell: /sbin/nologin
group: "{{ kube_cert_group }}"

View File

@ -2,14 +2,14 @@
addusers:
- name: etcd
comment: "Etcd user"
create_home: yes
create_home: true
home: "{{ etcd_data_dir }}"
system: yes
system: true
shell: /sbin/nologin
- name: kube
comment: "Kubernetes user"
create_home: no
system: yes
create_home: false
system: true
shell: /sbin/nologin
group: "{{ kube_cert_group }}"

View File

@ -1,6 +1,6 @@
---
- name: Converge
hosts: all
gather_facts: no
gather_facts: false
roles:
- role: bootstrap-os

View File

@ -8,9 +8,9 @@
file: epel
description: Extra Packages for Enterprise Linux 7 - $basearch
baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
gpgcheck: yes
gpgcheck: true
gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
skip_if_unavailable: yes
enabled: yes
repo_gpgcheck: no
skip_if_unavailable: true
enabled: true
repo_gpgcheck: false
when: epel_enabled

View File

@ -119,9 +119,9 @@
- name: Check presence of fastestmirror.conf
stat:
path: /etc/yum/pluginconf.d/fastestmirror.conf
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: fastestmirror
# the fastestmirror plugin can actually slow down Ansible deployments

View File

@ -28,7 +28,7 @@
raw: "nohup bash -c 'sleep 5s && shutdown -r now'"
become: true
ignore_errors: true # noqa ignore-errors
ignore_unreachable: yes
ignore_unreachable: true
when: need_bootstrap.rc != 0
- name: Wait for the reboot to complete

View File

@ -22,7 +22,7 @@
- "{{ os_release_dict['ID'] }}.yml"
paths:
- vars/
skip: True
skip: true
- name: Include tasks
include_tasks: "{{ included_tasks_file }}"
with_first_found:

View File

@ -8,9 +8,9 @@
- name: Check that /etc/sysconfig/proxy file exists
stat:
path: /etc/sysconfig/proxy
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: stat_result
- name: Create the /etc/sysconfig/proxy empty file

View File

@ -87,9 +87,9 @@
- name: Check presence of fastestmirror.conf
stat:
path: /etc/yum/pluginconf.d/fastestmirror.conf
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: fastestmirror
# the fastestmirror plugin can actually slow down Ansible deployments

View File

@ -1,2 +1,2 @@
---
is_fedora_coreos: True
is_fedora_coreos: true

View File

@ -2,9 +2,9 @@
- name: Containerd-common | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: ostree
- name: Containerd-common | set is_ostree

View File

@ -3,9 +3,9 @@
systemd_service:
name: containerd
state: restarted
enabled: yes
daemon-reload: yes
masked: no
enabled: true
daemon-reload: true
masked: false
listen: Restart containerd
- name: Containerd | wait for containerd

View File

@ -1,7 +1,7 @@
---
- name: Prepare
hosts: all
gather_facts: False
gather_facts: false
become: true
vars:
ignore_assert_errors: true
@ -19,7 +19,7 @@
- name: Prepare CNI
hosts: all
gather_facts: False
gather_facts: false
become: true
vars:
ignore_assert_errors: true

View File

@ -36,7 +36,7 @@
src: "{{ downloads.containerd.dest }}"
dest: "{{ containerd_bin_dir }}"
mode: "0755"
remote_src: yes
remote_src: true
extra_opts:
- --strip-components=1
notify: Restart containerd
@ -138,6 +138,6 @@
- name: Containerd | Ensure containerd is started and enabled
systemd_service:
name: containerd
daemon_reload: yes
enabled: yes
daemon_reload: true
enabled: true
state: started

View File

@ -3,7 +3,7 @@
systemd_service:
name: cri-dockerd
daemon_reload: true
masked: no
masked: false
listen: Restart and enable cri-dockerd
- name: Cri-dockerd | restart docker.service
@ -27,5 +27,5 @@
- name: Cri-dockerd | enable cri-dockerd service
service:
name: cri-dockerd.service
enabled: yes
enabled: true
listen: Restart and enable cri-dockerd

View File

@ -8,5 +8,5 @@
service:
name: crio
state: restarted
enabled: yes
enabled: true
listen: Restart crio

View File

@ -1,7 +1,7 @@
---
- name: Prepare
hosts: all
gather_facts: False
gather_facts: false
become: true
vars:
ignore_assert_errors: true
@ -19,7 +19,7 @@
- name: Prepare CNI
hosts: all
gather_facts: False
gather_facts: false
become: true
vars:
ignore_assert_errors: true

View File

@ -5,9 +5,9 @@
- name: Cri-o | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: ostree
- name: Cri-o | set is_ostree

View File

@ -8,7 +8,7 @@
lineinfile:
dest: /etc/yum.repos.d/amzn2-extras.repo
line: "[amzn2extra-docker]"
check_mode: yes
check_mode: true
register: amzn2_extras_docker_repo
when:
- amzn2_extras_file_stat.stat.exists
@ -19,7 +19,7 @@
section: amzn2extra-docker
option: enabled
value: "0"
backup: yes
backup: true
mode: "0644"
when:
- amzn2_extras_file_stat.stat.exists

View File

@ -1,7 +1,7 @@
---
- name: Get crictl completion
command: "{{ bin_dir }}/crictl completion"
changed_when: False
changed_when: false
register: cri_completion
check_mode: false

View File

@ -39,7 +39,7 @@
state: present
- name: Docker-storage-setup | install and run container-storage-setup
become: yes
become: true
script: |
install_container_storage_setup.sh \
{{ docker_container_storage_setup_repository }} \

View File

@ -3,7 +3,7 @@
systemd_service:
name: docker
daemon_reload: true
masked: no
masked: false
listen: Restart docker
- name: Docker | reload docker.socket

View File

@ -2,9 +2,9 @@
- name: Check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: ostree
- name: Set is_ostree
@ -66,7 +66,7 @@
path: /etc/apt/sources.list
regexp: 'buster-backports'
state: absent
backup: yes
backup: true
when:
- ansible_os_family == 'Debian'
- ansible_distribution_release == "buster"
@ -183,7 +183,7 @@
- name: Ensure docker service is started and enabled
service:
name: "{{ item }}"
enabled: yes
enabled: true
state: started
with_items:
- docker

View File

@ -21,9 +21,9 @@
shell: set -o pipefail && grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/'
args:
executable: /bin/bash
changed_when: False
changed_when: false
register: system_nameservers
check_mode: no
check_mode: false
- name: Check system search domains
# noqa risky-shell-pipe - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false
@ -31,9 +31,9 @@
shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
args:
executable: /bin/bash
changed_when: False
changed_when: false
register: system_search_domains
check_mode: no
check_mode: false
- name: Add system nameservers to docker options
set_fact:

View File

@ -14,7 +14,7 @@
src: "{{ item.src }}"
dest: "{{ bin_dir }}/{{ item.dest }}"
mode: "0755"
remote_src: yes
remote_src: true
with_items:
- { src: "{{ downloads.gvisor_runsc.dest }}", dest: "runsc" }
- { src: "{{ downloads.gvisor_containerd_shim.dest }}", dest: "containerd-shim-runsc-v1" }

View File

@ -11,7 +11,7 @@
mode: "0755"
owner: root
group: root
remote_src: yes
remote_src: true
- name: Kata-containers | Create config directory
file:

View File

@ -1,7 +1,7 @@
---
- name: Get nerdctl completion
command: "{{ bin_dir }}/nerdctl completion bash"
changed_when: False
changed_when: false
register: nerdctl_completion
check_mode: false

View File

@ -2,9 +2,9 @@
- name: Runc | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: ostree
- name: Runc | set is_ostree

View File

@ -2,9 +2,9 @@
- name: Skopeo | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: ostree
- name: Skopeo | set is_ostree

View File

@ -2,9 +2,9 @@
- name: Validate-container-engine | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: ostree
tags:
- facts
@ -30,8 +30,8 @@
- name: Check if containerd is installed
find:
file_type: file
recurse: yes
use_regex: yes
recurse: true
use_regex: true
patterns:
- containerd.service$
paths:
@ -45,8 +45,8 @@
- name: Check if docker is installed
find:
file_type: file
recurse: yes
use_regex: yes
recurse: true
use_regex: true
patterns:
- docker.service$
paths:
@ -60,8 +60,8 @@
- name: Check if crio is installed
find:
file_type: file
recurse: yes
use_regex: yes
recurse: true
use_regex: true
patterns:
- crio.service$
paths:

View File

@ -5,7 +5,7 @@
shell: "{{ image_info_command }}"
register: docker_images
changed_when: false
check_mode: no
check_mode: false
when: not download_always_pull
- name: Check_pull_required | Set pull_required if the desired image is not yet loaded

View File

@ -26,12 +26,12 @@
- name: Download_container | Determine if image is in cache
stat:
path: "{{ image_path_cached }}"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
delegate_to: localhost
connection: local
delegate_facts: no
delegate_facts: false
register: cache_image
changed_when: false
become: false
@ -57,7 +57,7 @@
- name: Download_container | Download image if required
command: "{{ image_pull_command_on_localhost if download_localhost else image_pull_command }} {{ image_reponame }}"
delegate_to: "{{ download_delegate if download_run_once else inventory_hostname }}"
delegate_facts: yes
delegate_facts: true
run_once: "{{ download_run_once }}"
register: pull_task_result
until: pull_task_result is succeeded
@ -72,7 +72,7 @@
- name: Download_container | Save and compress image
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa command-instead-of-shell - image_save_command_on_localhost contains a pipe, therefore requires shell
delegate_to: "{{ download_delegate }}"
delegate_facts: no
delegate_facts: false
register: container_save_status
failed_when: container_save_status.stderr
run_once: true
@ -99,7 +99,7 @@
dest: "{{ image_path_final }}"
use_ssh_args: true
mode: push
delegate_facts: no
delegate_facts: false
register: upload_image
failed_when: not upload_image
until: upload_image is succeeded

View File

@ -24,13 +24,13 @@
owner: "{{ download.owner | default(omit) }}"
mode: "0755"
state: directory
recurse: yes
recurse: true
- name: Download_file | Create local cache directory
file:
path: "{{ file_path_cached | dirname }}"
state: directory
recurse: yes
recurse: true
delegate_to: localhost
connection: local
delegate_facts: false
@ -45,7 +45,7 @@
file:
path: "{{ file_path_cached | dirname }}"
state: directory
recurse: yes
recurse: true
delegate_to: "{{ download_delegate }}"
delegate_facts: false
run_once: true

View File

@ -5,7 +5,7 @@
dest: "{{ download.dest | dirname }}"
owner: "{{ download.owner | default(omit) }}"
mode: "{{ download.mode | default(omit) }}"
copy: no
copy: false
extra_opts: "{{ download.unarchive_extra_opts | default(omit) }}"
when:
- download.unarchive | default(false)

View File

@ -62,7 +62,7 @@
register: docker_images
failed_when: false
changed_when: false
check_mode: no
check_mode: false
when: download_container
- name: Prep_download | Create staging directory on remote node
@ -81,7 +81,7 @@
mode: "0755"
delegate_to: localhost
connection: local
delegate_facts: no
delegate_facts: false
run_once: true
become: false
when:

View File

@ -23,9 +23,9 @@
- name: Stat etcd v2 data directory
stat:
path: "{{ etcd_data_dir }}/member"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: etcd_data_dir_member
listen: Restart etcd
when: etcd_cluster_is_healthy.rc == 0

View File

@ -26,7 +26,7 @@
- name: Wait for etcd up
uri:
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
validate_certs: no
validate_certs: false
client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
register: result
@ -41,7 +41,7 @@
- name: Wait for etcd-events up
uri:
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health"
validate_certs: no
validate_certs: false
client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
register: result

View File

@ -17,9 +17,9 @@
- name: "Check certs | Register ca and etcd admin/member certs on etcd hosts"
stat:
path: "{{ etcd_cert_dir }}/{{ item }}"
get_attributes: no
get_checksum: yes
get_mime: no
get_attributes: false
get_checksum: true
get_mime: false
register: etcd_member_certs
when: inventory_hostname in groups['etcd']
with_items:

View File

@ -6,8 +6,8 @@
register: etcd_cluster_is_healthy
failed_when: false
changed_when: false
check_mode: no
run_once: yes
check_mode: false
run_once: true
when:
- is_etcd_master
- etcd_cluster_setup
@ -27,8 +27,8 @@
register: etcd_events_cluster_is_healthy
failed_when: false
changed_when: false
check_mode: no
run_once: yes
check_mode: false
run_once: true
when:
- is_etcd_master
- etcd_events_cluster_setup
@ -49,7 +49,7 @@
template:
src: "etcd-{{ etcd_deployment_type }}.service.j2"
dest: /etc/systemd/system/etcd.service
backup: yes
backup: true
mode: "0644"
# FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
# Remove once we drop support for systemd < 250
@ -60,7 +60,7 @@
template:
src: "etcd-events-{{ etcd_deployment_type }}.service.j2"
dest: /etc/systemd/system/etcd-events.service
backup: yes
backup: true
mode: "0644"
validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:etcd-events-{{ etcd_deployment_type }}.service'"
# FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
@ -77,7 +77,7 @@
service:
name: etcd
state: started
enabled: yes
enabled: true
ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}" # noqa ignore-errors
when: is_etcd_master and etcd_cluster_setup
@ -86,7 +86,7 @@
service:
name: etcd-events
state: started
enabled: yes
enabled: true
ignore_errors: "{{ etcd_events_cluster_is_healthy.rc != 0 }}" # noqa ignore-errors
when: is_etcd_master and etcd_events_cluster_setup
@ -99,8 +99,8 @@
retries: "{{ etcd_retries }}"
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
check_mode: no
run_once: yes
check_mode: false
run_once: true
when:
- is_etcd_master
- etcd_cluster_setup
@ -122,8 +122,8 @@
retries: "{{ etcd_retries }}"
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
check_mode: no
run_once: yes
check_mode: false
run_once: true
when:
- is_etcd_master
- etcd_events_cluster_setup
@ -141,7 +141,7 @@
register: etcd_member_in_cluster
ignore_errors: true # noqa ignore-errors
changed_when: false
check_mode: no
check_mode: false
when: is_etcd_master and etcd_cluster_setup
tags:
- facts
@ -157,7 +157,7 @@
register: etcd_events_member_in_cluster
ignore_errors: true # noqa ignore-errors
changed_when: false
check_mode: no
check_mode: false
when: is_etcd_master and etcd_events_cluster_setup
tags:
- facts

View File

@ -6,7 +6,7 @@
state: directory
owner: "{{ etcd_owner }}"
mode: "{{ etcd_cert_dir_mode }}"
recurse: yes
recurse: true
- name: "Gen_certs | create etcd script dir (on {{ groups['etcd'][0] }})"
file:
@ -14,7 +14,7 @@
state: directory
owner: root
mode: "0700"
run_once: yes
run_once: true
when: inventory_hostname == groups['etcd'][0]
- name: Gen_certs | write openssl config
@ -22,7 +22,7 @@
src: "openssl.conf.j2"
dest: "{{ etcd_config_dir }}/openssl.conf"
mode: "0640"
run_once: yes
run_once: true
delegate_to: "{{ groups['etcd'][0] }}"
when:
- gen_certs | default(false)
@ -33,7 +33,7 @@
src: "make-ssl-etcd.sh.j2"
dest: "{{ etcd_script_dir }}/make-ssl-etcd.sh"
mode: "0700"
run_once: yes
run_once: true
when:
- gen_certs | default(false)
- inventory_hostname == groups['etcd'][0]
@ -43,7 +43,7 @@
environment:
MASTERS: "{{ groups['gen_master_certs_True'] | ansible.builtin.intersect(groups['etcd']) | join(' ') }}"
HOSTS: "{{ groups['gen_node_certs_True'] | ansible.builtin.intersect(groups['kube_control_plane']) | join(' ') }}"
run_once: yes
run_once: true
delegate_to: "{{ groups['etcd'][0] }}"
when: gen_certs | default(false)
notify: Set etcd_secret_changed
@ -52,7 +52,7 @@
command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
environment:
HOSTS: "{{ groups['gen_node_certs_True'] | ansible.builtin.intersect(groups['k8s_cluster']) | join(' ') }}"
run_once: yes
run_once: true
delegate_to: "{{ groups['etcd'][0] }}"
when:
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
@ -153,4 +153,4 @@
state: directory
owner: "{{ etcd_owner }}"
mode: "{{ etcd_cert_dir_mode }}"
recurse: yes
recurse: true

View File

@ -21,7 +21,7 @@
executable: /bin/bash
no_log: "{{ not (unsafe_show_logs | bool) }}"
register: etcd_node_certs
check_mode: no
check_mode: false
delegate_to: "{{ groups['etcd'][0] }}"
changed_when: false

View File

@ -29,7 +29,7 @@
dest: "{{ bin_dir }}/etcd"
owner: 'root'
mode: "0750"
backup: yes
backup: true
when: etcd_cluster_setup
- name: Install etcd-events launch script
@ -38,5 +38,5 @@
dest: "{{ bin_dir }}/etcd-events"
owner: 'root'
mode: "0750"
backup: yes
backup: true
when: etcd_events_cluster_setup

View File

@ -25,7 +25,7 @@
src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-{{ host_architecture }}/{{ item }}"
dest: "{{ bin_dir }}/{{ item }}"
mode: "0755"
remote_src: yes
remote_src: true
with_items:
- etcd
when: etcd_cluster_setup

View File

@ -32,7 +32,7 @@
executable: /bin/bash
register: etcd_events_member_in_cluster
changed_when: false
check_mode: no
check_mode: false
tags:
- facts
environment:
@ -46,4 +46,4 @@
service:
name: etcd-events
state: started
enabled: yes
enabled: true

View File

@ -33,7 +33,7 @@
executable: /bin/bash
register: etcd_member_in_cluster
changed_when: false
check_mode: no
check_mode: false
retries: "{{ etcd_retries }}"
delay: "{{ retry_stagger | random + 3 }}"
until: etcd_member_in_cluster.rc == 0
@ -50,4 +50,4 @@
service:
name: etcd
state: started
enabled: yes
enabled: true

View File

@ -33,7 +33,7 @@
command: "openssl x509 -in {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem -noout -serial"
register: "etcd_client_cert_serial_result"
changed_when: false
check_mode: no
check_mode: false
when:
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"

View File

@ -24,7 +24,7 @@
unarchive:
src: "{{ downloads.etcd.dest }}"
dest: "{{ local_release_dir }}/"
remote_src: yes
remote_src: true
when: container_manager in ['crio', 'containerd']
- name: Copy etcdctl and etcdutl binary from download dir
@ -32,7 +32,7 @@
src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-{{ host_architecture }}/{{ item }}"
dest: "{{ bin_dir }}/{{ item }}"
mode: "0755"
remote_src: yes
remote_src: true
with_items:
- etcdctl
- etcdutl

View File

@ -2,7 +2,7 @@
- name: Kubernetes Apps | Wait for kube-apiserver
uri:
url: "{{ kube_apiserver_endpoint }}/healthz"
validate_certs: no
validate_certs: false
client_cert: "{{ kube_apiserver_client_cert }}"
client_key: "{{ kube_apiserver_client_key }}"
register: result

View File

@ -8,10 +8,10 @@
ansible.posix.synchronize:
src: "{{ downloads.yq.dest }}"
dest: "{{ bin_dir }}/yq"
compress: no
perms: yes
owner: no
group: no
compress: false
perms: true
owner: false
group: false
delegate_to: "{{ inventory_hostname }}"
- name: Kubernetes Apps | Set ArgoCD template list
@ -49,17 +49,17 @@
ansible.posix.synchronize:
src: "{{ local_release_dir }}/{{ item.file }}"
dest: "{{ kube_config_dir }}/{{ item.file }}"
compress: no
perms: yes
owner: no
group: no
compress: false
perms: true
owner: false
group: false
delegate_to: "{{ inventory_hostname }}"
with_items: "{{ argocd_templates | selectattr('url', 'defined') | list }}"
when:
- "inventory_hostname == groups['kube_control_plane'][0]"
- name: Kubernetes Apps | Set ArgoCD namespace for remote manifests
become: yes
become: true
command: |
{{ bin_dir }}/yq eval-all -i '.metadata.namespace="{{ argocd_namespace }}"' {{ kube_config_dir }}/{{ item.file }}
with_items: "{{ argocd_templates | selectattr('url', 'defined') | list }}"
@ -69,7 +69,7 @@
- "inventory_hostname == groups['kube_control_plane'][0]"
- name: Kubernetes Apps | Create ArgoCD manifests from templates
become: yes
become: true
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
@ -81,7 +81,7 @@
- "inventory_hostname == groups['kube_control_plane'][0]"
- name: Kubernetes Apps | Install ArgoCD
become: yes
become: true
kube:
name: ArgoCD
kubectl: "{{ bin_dir }}/kubectl"
@ -93,7 +93,7 @@
# https://github.com/argoproj/argo-cd/blob/master/docs/faq.md#i-forgot-the-admin-password-how-do-i-reset-it
- name: Kubernetes Apps | Set ArgoCD custom admin password
become: yes
become: true
shell: |
{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n {{ argocd_namespace }} patch secret argocd-secret -p \
'{

View File

@ -2,7 +2,7 @@
- name: Kubernetes Apps | Wait for kube-apiserver
uri:
url: "{{ kube_apiserver_endpoint }}/healthz"
validate_certs: no
validate_certs: false
client_cert: "{{ kube_apiserver_client_cert }}"
client_key: "{{ kube_apiserver_client_key }}"
register: result

View File

@ -21,7 +21,7 @@ vsphere_csi_controller_replicas: 1
csi_endpoint: '{% if external_vsphere_version >= "7.0u1" %}/csi{% else %}/var/lib/csi/sockets/pluginproxy{% endif %}'
vsphere_csi_aggressive_node_drain: False
vsphere_csi_aggressive_node_drain: false
vsphere_csi_aggressive_node_unreachable_timeout: 300
vsphere_csi_aggressive_node_not_ready_timeout: 300

View File

@ -37,13 +37,13 @@
- name: Helm | Get helm completion
command: "{{ bin_dir }}/helm completion bash"
changed_when: False
changed_when: false
register: helm_completion
check_mode: False
check_mode: false
- name: Helm | Install helm completion
copy:
dest: /etc/bash_completion.d/helm.sh
content: "{{ helm_completion.stdout }}"
mode: "0755"
become: True
become: true

View File

@ -2,13 +2,13 @@
- name: Get installed pip version
command: "{{ ansible_python_interpreter if ansible_python_interpreter is defined else 'python' }} -m pip --version"
register: pip_version_output
ignore_errors: yes
ignore_errors: true
changed_when: false
- name: Get installed PyYAML version
command: "{{ ansible_python_interpreter if ansible_python_interpreter is defined else 'python' }} -m pip show PyYAML"
register: pyyaml_version_output
ignore_errors: yes
ignore_errors: true
changed_when: false
- name: Install pip

View File

@ -24,15 +24,15 @@
- name: Krew | Get krew completion
command: "{{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }} completion bash"
changed_when: False
changed_when: false
register: krew_completion
check_mode: False
ignore_errors: yes # noqa ignore-errors
check_mode: false
ignore_errors: true # noqa ignore-errors
- name: Krew | Install krew completion
copy:
dest: /etc/bash_completion.d/krew.sh
content: "{{ krew_completion.stdout }}"
mode: "0755"
become: True
become: true
when: krew_completion.rc == 0

View File

@ -13,7 +13,7 @@
- name: Weave | Wait for Weave to become available
uri:
url: http://127.0.0.1:6784/status
return_content: yes
return_content: true
register: weave_status
retries: 180
delay: 5

View File

@ -30,9 +30,9 @@
copy:
src: "{{ kube_config_dir }}/admin.conf"
dest: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
remote_src: yes
remote_src: true
mode: "0600"
backup: yes
backup: true
- name: Create kube artifacts dir
file:
@ -41,8 +41,8 @@
state: directory
delegate_to: localhost
connection: local
become: no
run_once: yes
become: false
run_once: true
when: kubeconfig_localhost
- name: Wait for k8s apiserver
@ -54,7 +54,7 @@
- name: Get admin kubeconfig from remote host
slurp:
src: "{{ kube_config_dir }}/admin.conf"
run_once: yes
run_once: true
register: raw_admin_kubeconfig
when: kubeconfig_localhost
@ -83,21 +83,21 @@
mode: "0600"
delegate_to: localhost
connection: local
become: no
run_once: yes
become: false
run_once: true
when: kubeconfig_localhost
- name: Copy kubectl binary to ansible host
fetch:
src: "{{ bin_dir }}/kubectl"
dest: "{{ artifacts_dir }}/kubectl"
flat: yes
validate_checksum: no
flat: true
validate_checksum: false
register: copy_binary_result
until: copy_binary_result is not failed
retries: 20
become: no
run_once: yes
become: false
run_once: true
when: kubectl_localhost
- name: Create helper script kubectl.sh on ansible host
@ -107,8 +107,8 @@
${BASH_SOURCE%/*}/kubectl --kubeconfig=${BASH_SOURCE%/*}/admin.conf "$@"
dest: "{{ artifacts_dir }}/kubectl.sh"
mode: "0755"
become: no
run_once: yes
become: false
run_once: true
delegate_to: localhost
connection: local
when: kubectl_localhost and kubeconfig_localhost

View File

@ -81,7 +81,7 @@
endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '0.0.0.0' else 'localhost' }}"
uri:
url: https://{{ endpoint }}:10259/healthz
validate_certs: no
validate_certs: false
register: scheduler_result
until: scheduler_result.status == 200
retries: 60
@ -95,7 +95,7 @@
endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '0.0.0.0' else 'localhost' }}"
uri:
url: https://{{ endpoint }}:10257/healthz
validate_certs: no
validate_certs: false
register: controller_manager_result
until: controller_manager_result.status == 200
retries: 60
@ -107,7 +107,7 @@
- name: Master | wait for the apiserver to be running
uri:
url: "{{ kube_apiserver_endpoint }}/healthz"
validate_certs: no
validate_certs: false
register: result
until: result.status == 200
retries: 60

View File

@ -3,7 +3,7 @@
- name: Check which kube-control nodes are already members of the cluster
command: "{{ bin_dir }}/kubectl get nodes --selector=node-role.kubernetes.io/control-plane -o json"
register: kube_control_planes_raw
ignore_errors: yes
ignore_errors: true
changed_when: false
- name: Set fact joined_control_planes
@ -12,7 +12,7 @@
delegate_to: "{{ item }}"
loop: "{{ groups['kube_control_plane'] }}"
when: kube_control_planes_raw is succeeded
run_once: yes
run_once: true
- name: Set fact first_kube_control_plane
set_fact:

View File

@ -2,9 +2,9 @@
- name: Check if secret for encrypting data at rest already exist
stat:
path: "{{ kube_cert_dir }}/secrets_encryption.yaml"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: secrets_encryption_file
- name: Slurp secrets_encryption file if it exists

View File

@ -4,7 +4,7 @@
src: "{{ kube_cert_dir }}/{{ item }}"
dest: "{{ kube_cert_dir }}/{{ item }}.old"
mode: preserve
remote_src: yes
remote_src: true
with_items:
- apiserver.crt
- apiserver.key
@ -19,7 +19,7 @@
src: "{{ kube_config_dir }}/{{ item }}"
dest: "{{ kube_config_dir }}/{{ item }}.old"
mode: preserve
remote_src: yes
remote_src: true
with_items:
- admin.conf
- controller-manager.conf

View File

@ -5,7 +5,7 @@
dest: "{{ kube_config_dir }}/{{ item }}"
regexp: '^ server: https'
line: ' server: {{ kube_apiserver_endpoint }}'
backup: yes
backup: true
with_items:
- admin.conf
- controller-manager.conf

View File

@ -25,7 +25,7 @@
- name: Parse certificate key if not set
set_fact:
kubeadm_certificate_key: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'].stdout_lines[-1] | trim }}"
run_once: yes
run_once: true
when:
- hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is defined
- hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is not skipped
@ -35,7 +35,7 @@
src: "kubeadm-controlplane.{{ kubeadmConfig_api_version }}.yaml.j2"
dest: "{{ kube_config_dir }}/kubeadm-controlplane.yaml"
mode: "0640"
backup: yes
backup: true
when:
- inventory_hostname != first_kube_control_plane
- not kubeadm_already_run.stat.exists

View File

@ -13,9 +13,9 @@
- name: Kubeadm | Check if kubeadm has already run
stat:
path: "/var/lib/kubelet/config.yaml"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: kubeadm_already_run
- name: Kubeadm | Backup kubeadm certs / kubeconfig

View File

@ -4,7 +4,7 @@
path: "{{ kube_config_dir }}/kubelet.conf"
regexp: '^ client-certificate-data: '
line: ' client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem'
backup: yes
backup: true
notify:
- "Master | reload kubelet"
@ -13,6 +13,6 @@
path: "{{ kube_config_dir }}/kubelet.conf"
regexp: '^ client-key-data: '
line: ' client-key: /var/lib/kubelet/pki/kubelet-client-current.pem'
backup: yes
backup: true
notify:
- "Master | reload kubelet"

View File

@ -120,7 +120,7 @@
- name: Renew K8S control plane certificates monthly 2/2
systemd_service:
name: k8s-certs-renew.timer
enabled: yes
enabled: true
state: started
daemon_reload: "{{ k8s_certs_units is changed }}"
when: auto_renew_certificates

View File

@ -14,17 +14,17 @@
- name: Check if kubelet.conf exists
stat:
path: "{{ kube_config_dir }}/kubelet.conf"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: kubelet_conf
- name: Check if kubeadm CA cert is accessible
stat:
path: "{{ kube_cert_dir }}/ca.crt"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: kubeadm_ca_stat
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
@ -79,7 +79,7 @@
template:
src: "kubeadm-client.conf.{{ kubeadmConfig_api_version }}.j2"
dest: "{{ kube_config_dir }}/kubeadm-client.conf"
backup: yes
backup: true
mode: "0640"
when: not is_kube_master
@ -140,7 +140,7 @@
dest: "{{ kube_config_dir }}/kubelet.conf"
regexp: 'server:'
line: ' server: {{ kube_apiserver_endpoint }}'
backup: yes
backup: true
when:
- kubeadm_config_api_fqdn is not defined
- not is_kube_master
@ -152,7 +152,7 @@
dest: "{{ kube_config_dir }}/kubelet.conf"
regexp: '^ server: https'
line: ' server: {{ kube_apiserver_endpoint }}'
backup: yes
backup: true
when:
- not is_kube_master
- loadbalancer_apiserver is defined

View File

@ -2,7 +2,7 @@
- name: Kubernetes Apps | Wait for kube-apiserver
uri:
url: "{{ kube_apiserver_endpoint }}/healthz"
validate_certs: no
validate_certs: false
client_cert: "{{ kube_apiserver_client_cert }}"
client_key: "{{ kube_apiserver_client_key }}"
register: result

View File

@ -8,7 +8,7 @@
executable: /bin/bash
register: docker_cgroup_driver_result
changed_when: false
check_mode: no
check_mode: false
- name: Set kubelet_cgroup_driver_detected fact for docker
set_fact:

Some files were not shown because too many files have changed in this diff Show More