Move to Ansible 3.4.0 (#7672)
* Ansible: move to Ansible 3.4.0 which uses ansible-base 2.10.10 * Docs: add a note about ansible upgrade post 2.9.x * CI: ensure ansible is removed before ansible 3.x is installed to avoid pip failures * Ansible: use newer ansible-lint * Fix ansible-lint 5.0.11 found issues * syntax issues * risky-file-permissions * var-naming * role-name * molecule tests * Mitogen: use 0.3.0rc1 which adds support for ansible 2.10+ * Pin ansible-base to 2.10.11 to get package fix on RHEL8pull/7799/head
parent
b0e4c375a7
commit
7516fe142f
|
@ -18,3 +18,13 @@ skip_list:
|
|||
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
||||
# (Disabled in May 2019)
|
||||
- '701'
|
||||
|
||||
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
||||
# Meta roles in Kubespray don't need proper names
|
||||
# (Disabled in June 2021)
|
||||
- 'role-name'
|
||||
|
||||
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
|
||||
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
||||
# (Disabled in June 2021)
|
||||
- 'var-naming'
|
||||
|
|
|
@ -37,6 +37,7 @@ variables:
|
|||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- python -m pip uninstall -y ansible
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- mkdir -p /.ssh
|
||||
|
||||
|
|
|
@ -53,6 +53,7 @@ tox-inventory-builder:
|
|||
- ./tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
script:
|
||||
- pip3 install tox
|
||||
|
|
|
@ -11,6 +11,7 @@ molecule_tests:
|
|||
- tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
|
@ -31,6 +32,7 @@ molecule_tests:
|
|||
before_script:
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
become: no
|
||||
vars:
|
||||
minimal_ansible_version: 2.9.0
|
||||
minimal_ansible_version_2_10: 2.10.11
|
||||
maximal_ansible_version: 2.11.0
|
||||
ansible_connection: local
|
||||
tasks:
|
||||
|
@ -16,6 +17,17 @@
|
|||
tags:
|
||||
- check
|
||||
|
||||
- name: "Check Ansible version > {{ minimal_ansible_version_2_10 }} when using ansible 2.10"
|
||||
assert:
|
||||
msg: "When using Ansible 2.10, the minimum supported version is {{ minimal_ansible_version_2_10 }}"
|
||||
that:
|
||||
- ansible_version.string is version(minimal_ansible_version_2_10, ">=")
|
||||
- ansible_version.string is version(maximal_ansible_version, "<")
|
||||
when:
|
||||
- ansible_version.string is version('2.10.0', ">=")
|
||||
tags:
|
||||
- check
|
||||
|
||||
- name: "Check that python netaddr is installed"
|
||||
assert:
|
||||
msg: "Python netaddr is not present"
|
||||
|
|
|
@ -12,3 +12,4 @@
|
|||
template:
|
||||
src: inventory.j2
|
||||
dest: "{{ playbook_dir }}/inventory"
|
||||
mode: 0644
|
||||
|
|
|
@ -22,8 +22,10 @@
|
|||
template:
|
||||
src: inventory.j2
|
||||
dest: "{{ playbook_dir }}/inventory"
|
||||
mode: 0644
|
||||
|
||||
- name: Generate Load Balancer variables
|
||||
template:
|
||||
src: loadbalancer_vars.j2
|
||||
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
|
||||
mode: 0644
|
||||
|
|
|
@ -8,11 +8,13 @@
|
|||
path: "{{ base_dir }}"
|
||||
state: directory
|
||||
recurse: true
|
||||
mode: 0755
|
||||
|
||||
- name: Store json files in base_dir
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ base_dir }}/{{ item }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- network.json
|
||||
- storage.json
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
path-exclude=/usr/share/doc/*
|
||||
path-include=/usr/share/doc/*/copyright
|
||||
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
|
@ -63,6 +64,7 @@
|
|||
copy:
|
||||
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
|
||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||
mode: 0640
|
||||
|
||||
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
||||
authorized_key:
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
state: directory
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
mode: 0700
|
||||
|
||||
- name: Configure sudo for deployment user
|
||||
copy:
|
||||
|
|
|
@ -82,6 +82,7 @@
|
|||
template:
|
||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||
src: test-file.txt
|
||||
mode: 0644
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Unmount glusterfs
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
---
|
||||
- hosts: all
|
||||
|
||||
roles:
|
||||
- role_under_test
|
|
@ -1,7 +1,10 @@
|
|||
---
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Bootstrap"
|
||||
become: true
|
||||
template: { src: "heketi-bootstrap.json.j2", dest: "{{ kube_config_dir }}/heketi-bootstrap.json" }
|
||||
template:
|
||||
src: "heketi-bootstrap.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||
mode: 0640
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
||||
kube:
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: 0644
|
||||
- name: "Copy topology configuration into container."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
---
|
||||
- name: "Kubernetes Apps | Lay Down GlusterFS Daemonset"
|
||||
template: { src: "glusterfs-daemonset.json.j2", dest: "{{ kube_config_dir }}/glusterfs-daemonset.json" }
|
||||
template:
|
||||
src: "glusterfs-daemonset.json.j2"
|
||||
dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||
mode: 0644
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
||||
|
@ -27,7 +30,10 @@
|
|||
delay: 5
|
||||
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Service Account"
|
||||
template: { src: "heketi-service-account.json.j2", dest: "{{ kube_config_dir }}/heketi-service-account.json" }
|
||||
template:
|
||||
src: "heketi-service-account.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||
mode: 0644
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
template:
|
||||
src: "heketi-deployment.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||
mode: 0644
|
||||
register: "rendering"
|
||||
|
||||
- name: "Kubernetes Apps | Install and configure Heketi"
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
changed_when: false
|
||||
|
||||
- name: "Kubernetes Apps | Deploy cluster role binding."
|
||||
when: "clusterrolebinding_state.stdout == \"\""
|
||||
when: "clusterrolebinding_state.stdout | length > 0"
|
||||
command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||
|
||||
- name: Get clusterrolebindings again
|
||||
|
@ -15,7 +15,7 @@
|
|||
|
||||
- name: Make sure that clusterrolebindings are present now
|
||||
assert:
|
||||
that: "clusterrolebinding_state.stdout != \"\""
|
||||
that: "clusterrolebinding_state.stdout | length > 0"
|
||||
msg: "Cluster role binding is not present."
|
||||
|
||||
- name: Get the heketi-config-secret secret
|
||||
|
@ -28,9 +28,10 @@
|
|||
template:
|
||||
src: "heketi.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi.json"
|
||||
mode: 0644
|
||||
|
||||
- name: "Deploy Heketi config secret"
|
||||
when: "secret_state.stdout == \"\""
|
||||
when: "secret_state.stdout | length > 0"
|
||||
command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||
|
||||
- name: Get the heketi-config-secret secret again
|
||||
|
|
|
@ -2,7 +2,10 @@
|
|||
- name: "Kubernetes Apps | Lay Down Heketi Storage"
|
||||
become: true
|
||||
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||
template: { src: "heketi-storage.json.j2", dest: "{{ kube_config_dir }}/heketi-storage.json" }
|
||||
template:
|
||||
src: "heketi-storage.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-storage.json"
|
||||
mode: 0644
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
||||
kube:
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
template:
|
||||
src: "storageclass.yml.j2"
|
||||
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||
mode: 0644
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Storace Class"
|
||||
kube:
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: 0644
|
||||
- name: "Copy topology configuration into container." # noqa 503
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
become: true
|
||||
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
|
||||
register: "volume_groups"
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
changed_when: false
|
||||
|
||||
- name: "Remove volume groups." # noqa 301
|
||||
|
@ -35,7 +35,7 @@
|
|||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
command: "pvremove {{ disk_volume_device_1 }} --yes"
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: "Remove lvm utils (RedHat)"
|
||||
become: true
|
||||
|
|
|
@ -1,51 +1,51 @@
|
|||
---
|
||||
- name: "Remove storage class." # noqa 301
|
||||
- name: Remove storage class. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||
ignore_errors: true
|
||||
- name: "Tear down heketi." # noqa 301
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||
ignore_errors: true
|
||||
- name: "Tear down heketi." # noqa 301
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||
ignore_errors: true
|
||||
- name: "Tear down bootstrap."
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down bootstrap.
|
||||
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
||||
- name: "Ensure there is nothing left over." # noqa 301
|
||||
- name: Ensure there is nothing left over. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: "Ensure there is nothing left over." # noqa 301
|
||||
- name: Ensure there is nothing left over. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: "Tear down glusterfs." # noqa 301
|
||||
- name: Tear down glusterfs. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi storage service." # noqa 301
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi storage service. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi gluster role binding" # noqa 301
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi gluster role binding # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi config secret" # noqa 301
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi config secret # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi db backup" # noqa 301
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi db backup # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi service account" # noqa 301
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi service account # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||
ignore_errors: true
|
||||
- name: "Get secrets"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Get secrets
|
||||
command: "{{ bin_dir }}/kubectl get secrets --output=\"json\""
|
||||
register: "secrets"
|
||||
changed_when: false
|
||||
- name: "Remove heketi storage secret"
|
||||
- name: Remove heketi storage secret
|
||||
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
||||
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
|
||||
when: "storage_query is defined"
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
|
|
@ -187,3 +187,28 @@ For more information about Ansible and bastion hosts, read
|
|||
## Mitogen
|
||||
|
||||
You can use [mitogen](mitogen.md) to speed up kubespray.
|
||||
|
||||
## Beyond ansible 2.9
|
||||
|
||||
Ansible project has decided, in order to ease their maintenance burden, to split between
|
||||
two projects which are now joined under the Ansible umbrella.
|
||||
|
||||
Ansible-base (2.10.x branch) will contain just the ansible language implementation while
|
||||
ansible modules that were previously bundled into a single repository will be part of the
|
||||
ansible 3.x package. Pleasee see [this blog post](https://blog.while-true-do.io/ansible-release-3-0-0/)
|
||||
that explains in detail the need and the evolution plan.
|
||||
|
||||
**Note:** this change means that ansible virtual envs cannot be upgraded with `pip install -U`.
|
||||
You first need to uninstall your old ansible (pre 2.10) version and install the new one.
|
||||
|
||||
```ShellSession
|
||||
pip uninstall ansible
|
||||
cd kubespray/
|
||||
pip install -U .
|
||||
```
|
||||
|
||||
**Note:** some changes needed to support ansible 2.10+ are not backwards compatible with 2.9
|
||||
Kubespray needs to evolve and keep pace with upstream ansible and will be forced to eventually
|
||||
drop 2.9 support. Kubespray CIs use only the ansible version specified in the `requirements.txt`
|
||||
and while the `ansible_version.yml` may allow older versions to be used, these are not
|
||||
exercised in the CI and compatibility is not guaranteed.
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
- hosts: localhost
|
||||
strategy: linear
|
||||
vars:
|
||||
mitogen_version: 0.2.9
|
||||
mitogen_version: 0.3.0rc1
|
||||
mitogen_url: https://github.com/dw/mitogen/archive/v{{ mitogen_version }}.tar.gz
|
||||
ansible_connection: local
|
||||
tasks:
|
||||
|
@ -13,6 +13,7 @@
|
|||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
become: false
|
||||
loop:
|
||||
- "{{ playbook_dir }}/plugins/mitogen"
|
||||
|
@ -40,3 +41,4 @@
|
|||
section: defaults
|
||||
option: strategy
|
||||
value: mitogen_linear
|
||||
mode: 0644
|
||||
|
|
|
@ -12,13 +12,13 @@
|
|||
- { role: kubespray-defaults}
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||
|
||||
- hosts: "{{ groups['etcd'] | first }}"
|
||||
- hosts: etcd[0]
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: recover_control_plane/etcd }
|
||||
|
||||
- hosts: "{{ groups['kube_control_plane'] | first }}"
|
||||
- hosts: kube_control_plane[0]
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
|
@ -26,7 +26,7 @@
|
|||
|
||||
- include: cluster.yml
|
||||
|
||||
- hosts: "{{ groups['kube_control_plane'] }}"
|
||||
- hosts: kube_control_plane
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
ansible==2.9.20
|
||||
ansible==3.4.0
|
||||
ansible-base==2.10.11
|
||||
cryptography==2.8
|
||||
jinja2==2.11.3
|
||||
netaddr==0.7.19
|
||||
|
|
|
@ -19,3 +19,4 @@
|
|||
template:
|
||||
src: ssh-bastion.conf
|
||||
dest: "{{ playbook_dir }}/ssh-bastion.conf"
|
||||
mode: 0640
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
value: "{{ http_proxy | default(omit) }}"
|
||||
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
|
||||
no_extra_spaces: true
|
||||
mode: 0644
|
||||
become: true
|
||||
when: not skip_http_proxy_on_os_packages
|
||||
|
||||
|
@ -32,6 +33,7 @@
|
|||
section: "{{ item }}"
|
||||
option: enabled
|
||||
value: "1"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- ol7_latest
|
||||
- ol7_addons
|
||||
|
@ -56,6 +58,7 @@
|
|||
section: "ol{{ ansible_distribution_major_version }}_addons"
|
||||
option: "{{ item.option }}"
|
||||
value: "{{ item.value }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- { option: "name", value: "ol{{ ansible_distribution_major_version }}_addons" }
|
||||
- { option: "enabled", value: "1" }
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
- name: Remove podman network cni
|
||||
raw: "podman network rm podman"
|
||||
become: true
|
||||
ignore_errors: yes
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
when: need_bootstrap.rc != 0
|
||||
|
||||
- name: Clean up possible pending packages on fedora coreos
|
||||
|
@ -43,7 +43,7 @@
|
|||
- name: Reboot immediately for updated ostree, please run playbook again if failed first time.
|
||||
raw: "nohup bash -c 'sleep 5s && shutdown -r now'"
|
||||
become: true
|
||||
ignore_errors: yes
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
ignore_unreachable: yes
|
||||
when: need_bootstrap.rc != 0
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
value: "{{ http_proxy | default(omit) }}"
|
||||
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
|
||||
no_extra_spaces: true
|
||||
mode: 0644
|
||||
become: true
|
||||
when: not skip_http_proxy_on_os_packages
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
register: stat_result
|
||||
|
||||
- name: Create the /etc/sysconfig/proxy empty file
|
||||
file:
|
||||
file: # noqa risky-file-permissions
|
||||
path: /etc/sysconfig/proxy
|
||||
state: touch
|
||||
when:
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
value: "{{ http_proxy | default(omit) }}"
|
||||
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
|
||||
no_extra_spaces: true
|
||||
mode: 0644
|
||||
become: true
|
||||
when: not skip_http_proxy_on_os_packages
|
||||
|
||||
|
@ -19,7 +20,7 @@
|
|||
command: /sbin/subscription-manager status
|
||||
register: rh_subscription_status
|
||||
changed_when: "rh_subscription_status != 0"
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
become: true
|
||||
|
||||
- name: RHEL subscription Organization ID/Activation Key registration
|
||||
|
@ -35,12 +36,13 @@
|
|||
service_level_agreement: "{{ rh_subscription_sla }}"
|
||||
sync: true
|
||||
notify: RHEL auto-attach subscription
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
become: true
|
||||
when:
|
||||
- rh_subscription_org_id is defined
|
||||
- rh_subscription_status.changed
|
||||
|
||||
# this task has no_log set to prevent logging security sensitive information such as subscription passwords
|
||||
- name: RHEL subscription Username/Password registration
|
||||
redhat_subscription:
|
||||
state: present
|
||||
|
@ -54,8 +56,9 @@
|
|||
service_level_agreement: "{{ rh_subscription_sla }}"
|
||||
sync: true
|
||||
notify: RHEL auto-attach subscription
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
become: true
|
||||
no_log: true
|
||||
when:
|
||||
- rh_subscription_username is defined
|
||||
- rh_subscription_status.changed
|
||||
|
|
|
@ -4,4 +4,4 @@
|
|||
become: true
|
||||
roles:
|
||||
- role: kubespray-defaults
|
||||
- role: containerd
|
||||
- role: container-engine/containerd
|
||||
|
|
|
@ -23,12 +23,14 @@
|
|||
template:
|
||||
src: "fedora_containerd.repo.j2"
|
||||
dest: "{{ yum_repo_dir }}/containerd.repo"
|
||||
mode: 0644
|
||||
when: ansible_distribution == "Fedora"
|
||||
|
||||
- name: Configure containerd repository on RedHat/OracleLinux/CentOS/AlmaLinux
|
||||
template:
|
||||
src: "rh_containerd.repo.j2"
|
||||
dest: "{{ yum_repo_dir }}/containerd.repo"
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_os_family == "RedHat"
|
||||
- ansible_distribution not in ["Fedora", "Amazon"]
|
||||
|
|
|
@ -58,11 +58,13 @@
|
|||
file:
|
||||
path: /etc/systemd/system/containerd.service.d
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Write containerd proxy drop-in
|
||||
template:
|
||||
src: http-proxy.conf.j2
|
||||
dest: /etc/systemd/system/containerd.service.d/http-proxy.conf
|
||||
mode: 0644
|
||||
notify: restart containerd
|
||||
when: http_proxy is defined or https_proxy is defined
|
||||
|
||||
|
@ -116,7 +118,7 @@
|
|||
- not is_ostree
|
||||
- containerd_package_info.pkgs|length > 0
|
||||
|
||||
- include_role:
|
||||
- include_role: # noqa unnamed-task
|
||||
name: container-engine/crictl
|
||||
|
||||
# you can sometimes end up in a state where everything is installed
|
||||
|
|
|
@ -4,4 +4,4 @@
|
|||
become: true
|
||||
roles:
|
||||
- role: kubespray-defaults
|
||||
- role: cri-o
|
||||
- role: container-engine/cri-o
|
||||
|
|
|
@ -53,6 +53,7 @@
|
|||
option: enabled
|
||||
value: "0"
|
||||
backup: yes
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_distribution in ["Amazon"]
|
||||
- amzn2_extras_file_stat.stat.exists
|
||||
|
@ -119,6 +120,7 @@
|
|||
section: "{{ item.section }}"
|
||||
option: enabled
|
||||
value: 1
|
||||
mode: 0644
|
||||
become: true
|
||||
when: is_ostree
|
||||
loop:
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
import_tasks: "crio_repo.yml"
|
||||
when: crio_add_repos
|
||||
|
||||
- include_role:
|
||||
- include_role: # noqa unnamed-task
|
||||
name: container-engine/crictl
|
||||
|
||||
- name: Build a list of crio runtimes with Katacontainers runtimes
|
||||
|
@ -69,11 +69,13 @@
|
|||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Install cri-o config
|
||||
template:
|
||||
src: crio.conf.j2
|
||||
dest: /etc/crio/crio.conf
|
||||
mode: 0644
|
||||
register: config_install
|
||||
|
||||
- name: Add skopeo pkg to install
|
||||
|
@ -129,6 +131,7 @@
|
|||
copy:
|
||||
src: mounts.conf
|
||||
dest: /etc/containers/mounts.conf
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_os_family == 'RedHat'
|
||||
notify: restart crio
|
||||
|
@ -147,6 +150,7 @@
|
|||
section: storage.options.overlay
|
||||
option: mountopt
|
||||
value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}'
|
||||
mode: 0644
|
||||
|
||||
- name: Create directory registries configs
|
||||
file:
|
||||
|
@ -159,6 +163,7 @@
|
|||
template:
|
||||
src: registry-mirror.conf.j2
|
||||
dest: "/etc/containers/registries.conf.d/{{ item.prefix }}.conf"
|
||||
mode: 0644
|
||||
loop: "{{ crio_registries_mirrors }}"
|
||||
notify: restart crio
|
||||
|
||||
|
@ -166,6 +171,7 @@
|
|||
template:
|
||||
src: http-proxy.conf.j2
|
||||
dest: /etc/systemd/system/crio.service.d/http-proxy.conf
|
||||
mode: 0644
|
||||
notify: restart crio
|
||||
when: http_proxy is defined or https_proxy is defined
|
||||
|
||||
|
|
|
@ -4,4 +4,4 @@
|
|||
become: true
|
||||
roles:
|
||||
- role: kubespray-defaults
|
||||
- role: docker
|
||||
- role: container-engine/docker
|
||||
|
|
|
@ -80,12 +80,14 @@
|
|||
template:
|
||||
src: "fedora_docker.repo.j2"
|
||||
dest: "{{ yum_repo_dir }}/docker.repo"
|
||||
mode: 0644
|
||||
when: ansible_distribution == "Fedora" and not is_ostree
|
||||
|
||||
- name: Configure docker repository on RedHat/CentOS/Oracle/AlmaLinux Linux
|
||||
template:
|
||||
src: "rh_docker.repo.j2"
|
||||
dest: "{{ yum_repo_dir }}/docker-ce.repo"
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_os_family == "RedHat"
|
||||
- ansible_distribution != "Fedora"
|
||||
|
@ -145,7 +147,7 @@
|
|||
state: started
|
||||
when: docker_task_result is not changed
|
||||
rescue:
|
||||
- debug:
|
||||
- debug: # noqa unnamed-task
|
||||
msg: "Docker start failed. Try to remove our config"
|
||||
- name: remove kubespray generated config
|
||||
file:
|
||||
|
|
|
@ -3,11 +3,13 @@
|
|||
file:
|
||||
path: /etc/systemd/system/docker.service.d
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Write docker proxy drop-in
|
||||
template:
|
||||
src: http-proxy.conf.j2
|
||||
dest: /etc/systemd/system/docker.service.d/http-proxy.conf
|
||||
mode: 0644
|
||||
notify: restart docker
|
||||
when: http_proxy is defined or https_proxy is defined
|
||||
|
||||
|
@ -25,6 +27,7 @@
|
|||
template:
|
||||
src: docker.service.j2
|
||||
dest: /etc/systemd/system/docker.service
|
||||
mode: 0644
|
||||
register: docker_service_file
|
||||
notify: restart docker
|
||||
when:
|
||||
|
@ -35,12 +38,14 @@
|
|||
template:
|
||||
src: docker-options.conf.j2
|
||||
dest: "/etc/systemd/system/docker.service.d/docker-options.conf"
|
||||
mode: 0644
|
||||
notify: restart docker
|
||||
|
||||
- name: Write docker dns systemd drop-in
|
||||
template:
|
||||
src: docker-dns.conf.j2
|
||||
dest: "/etc/systemd/system/docker.service.d/docker-dns.conf"
|
||||
mode: 0644
|
||||
notify: restart docker
|
||||
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
|
||||
|
||||
|
@ -55,7 +60,9 @@
|
|||
template:
|
||||
src: docker-orphan-cleanup.conf.j2
|
||||
dest: "/etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf"
|
||||
mode: 0644
|
||||
notify: restart docker
|
||||
when: docker_orphan_clean_up | bool
|
||||
|
||||
- meta: flush_handlers
|
||||
- name: Flush handlers
|
||||
meta: flush_handlers
|
||||
|
|
|
@ -7,5 +7,5 @@
|
|||
container_manager: containerd
|
||||
roles:
|
||||
- role: kubespray-defaults
|
||||
- role: containerd
|
||||
- role: gvisor
|
||||
- role: container-engine/containerd
|
||||
- role: container-engine/gvisor
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
roles:
|
||||
- role: kubespray-defaults
|
||||
- role: bootstrap-os
|
||||
- role: ../adduser
|
||||
- role: adduser
|
||||
user: "{{ addusers.kube }}"
|
||||
tasks:
|
||||
- include_tasks: "../../../../download/tasks/download_file.yml"
|
||||
|
@ -20,8 +20,8 @@
|
|||
kube_network_plugin: cni
|
||||
roles:
|
||||
- role: kubespray-defaults
|
||||
- role: ../network_plugin/cni
|
||||
- role: crictl
|
||||
- role: network_plugin/cni
|
||||
- role: container-engine/crictl
|
||||
tasks:
|
||||
- name: Copy test container files
|
||||
copy:
|
||||
|
|
|
@ -6,5 +6,5 @@
|
|||
kata_containers_enabled: true
|
||||
roles:
|
||||
- role: kubespray-defaults
|
||||
- role: containerd
|
||||
- role: kata-containers
|
||||
- role: container-engine/containerd
|
||||
- role: container-engine/kata-containers
|
||||
|
|
|
@ -15,11 +15,13 @@
|
|||
file:
|
||||
path: "{{ kata_containers_config_dir }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: kata-containers | Set configuration
|
||||
template:
|
||||
src: "{{ item }}.j2"
|
||||
dest: "{{ kata_containers_config_dir }}/{{ item }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- configuration-qemu.toml
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# noqa role-name - this is a meta role that doesn't need a name
|
||||
---
|
||||
dependencies:
|
||||
- role: container-engine/kata-containers
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
when:
|
||||
- not download_always_pull
|
||||
|
||||
- debug:
|
||||
- debug: # noqa unnamed-task
|
||||
msg: "Pull {{ image_reponame }} required is: {{ pull_required }}"
|
||||
|
||||
- name: download_container | Determine if image is in cache
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
- not download_localhost
|
||||
|
||||
# This must always be called, to check if the checksum matches. On no-match the file is re-downloaded.
|
||||
# This task will avoid logging it's parameters to not leak environment passwords in the log
|
||||
- name: download_file | Download item
|
||||
get_url:
|
||||
url: "{{ download.url }}"
|
||||
|
@ -67,6 +68,7 @@
|
|||
retries: 4
|
||||
delay: "{{ retry_stagger | default(5) }}"
|
||||
environment: "{{ proxy_env }}"
|
||||
no_log: true
|
||||
|
||||
- name: download_file | Copy file back to ansible host file cache
|
||||
synchronize:
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
run_once: true
|
||||
register: test_become
|
||||
changed_when: false
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
become: true
|
||||
when:
|
||||
- download_localhost
|
||||
|
@ -53,7 +53,7 @@
|
|||
run_once: true
|
||||
register: test_docker
|
||||
changed_when: false
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
become: false
|
||||
when:
|
||||
- download_localhost
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
template:
|
||||
src: "kubeadm-images.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/kubeadm-images.yaml"
|
||||
mode: 0644
|
||||
when:
|
||||
- not skip_kubeadm_images|default(false)
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
src: "etcd-{{ etcd_deployment_type }}.service.j2"
|
||||
dest: /etc/systemd/system/etcd.service
|
||||
backup: yes
|
||||
mode: 0644
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
- name: Configure | Copy etcd-events.service systemd file
|
||||
|
@ -52,6 +53,7 @@
|
|||
src: "etcd-events-{{ etcd_deployment_type }}.service.j2"
|
||||
dest: /etc/systemd/system/etcd-events.service
|
||||
backup: yes
|
||||
mode: 0644
|
||||
when: is_etcd_master and etcd_events_cluster_setup
|
||||
|
||||
- name: Configure | reload systemd
|
||||
|
@ -65,7 +67,7 @@
|
|||
name: etcd
|
||||
state: started
|
||||
enabled: yes
|
||||
ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}"
|
||||
ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}" # noqa ignore-errors
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
# when scaling new etcd will fail to start
|
||||
|
@ -74,7 +76,7 @@
|
|||
name: etcd-events
|
||||
state: started
|
||||
enabled: yes
|
||||
ignore_errors: "{{ etcd_events_cluster_is_healthy.rc == 0 }}"
|
||||
ignore_errors: "{{ etcd_events_cluster_is_healthy.rc != 0 }}" # noqa ignore-errors
|
||||
when: is_etcd_master and etcd_events_cluster_setup
|
||||
|
||||
- name: Configure | Wait for etcd cluster to be healthy
|
||||
|
@ -126,7 +128,7 @@
|
|||
- name: Configure | Check if member is in etcd cluster
|
||||
shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}"
|
||||
register: etcd_member_in_cluster
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
@ -142,7 +144,7 @@
|
|||
- name: Configure | Check if member is in etcd-events cluster
|
||||
shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}"
|
||||
register: etcd_events_member_in_cluster
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
when: is_etcd_master and etcd_events_cluster_setup
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
template:
|
||||
src: "openssl.conf.j2"
|
||||
dest: "{{ etcd_config_dir }}/openssl.conf"
|
||||
mode: 0640
|
||||
run_once: yes
|
||||
delegate_to: "{{ groups['etcd'][0] }}"
|
||||
when:
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
template:
|
||||
src: etcd.env.j2
|
||||
dest: /etc/etcd.env
|
||||
mode: 0640
|
||||
notify: restart etcd
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
|
@ -10,5 +11,6 @@
|
|||
template:
|
||||
src: etcd-events.env.j2
|
||||
dest: /etc/etcd-events.env
|
||||
mode: 0640
|
||||
notify: restart etcd-events
|
||||
when: is_etcd_master and etcd_events_cluster_setup
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
src: "{{ etcd_cert_dir }}/ca.pem"
|
||||
dest: "{{ ca_cert_path }}"
|
||||
remote_src: true
|
||||
mode: 0640
|
||||
register: etcd_ca_cert
|
||||
|
||||
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar) # noqa 503
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
shell: "{{ bin_dir }}/kubectl get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'"
|
||||
register: createdby_annotation
|
||||
changed_when: false
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
when:
|
||||
- dns_mode in ['coredns', 'coredns_dual']
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
template:
|
||||
src: "{{ item.file }}.j2"
|
||||
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||
mode: 0640
|
||||
register: psp_manifests
|
||||
with_items:
|
||||
- {file: psp.yml, type: psp, name: psp}
|
||||
|
@ -61,6 +62,7 @@
|
|||
template:
|
||||
src: "node-crb.yml.j2"
|
||||
dest: "{{ kube_config_dir }}/node-crb.yml"
|
||||
mode: 0640
|
||||
register: node_crb_manifest
|
||||
when:
|
||||
- rbac_enabled
|
||||
|
@ -86,6 +88,7 @@
|
|||
template:
|
||||
src: "node-webhook-cr.yml.j2"
|
||||
dest: "{{ kube_config_dir }}/node-webhook-cr.yml"
|
||||
mode: 0640
|
||||
register: node_webhook_cr_manifest
|
||||
when:
|
||||
- rbac_enabled
|
||||
|
@ -111,6 +114,7 @@
|
|||
template:
|
||||
src: "node-webhook-crb.yml.j2"
|
||||
dest: "{{ kube_config_dir }}/node-webhook-crb.yml"
|
||||
mode: 0640
|
||||
register: node_webhook_crb_manifest
|
||||
when:
|
||||
- rbac_enabled
|
||||
|
@ -139,7 +143,7 @@
|
|||
- cloud_provider == 'oci'
|
||||
|
||||
- name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
|
||||
copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml
|
||||
copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml mode=0640
|
||||
when: inventory_hostname == groups['kube_control_plane']|last
|
||||
|
||||
- name: PriorityClass | Create k8s-cluster-critical
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
copy:
|
||||
src: "oci-rbac.yml"
|
||||
dest: "{{ kube_config_dir }}/oci-rbac.yml"
|
||||
mode: 0640
|
||||
when:
|
||||
- cloud_provider is defined
|
||||
- cloud_provider == 'oci'
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
- name: CephFS Provisioner | Remove legacy namespace
|
||||
shell: |
|
||||
{{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
|
||||
ignore_errors: yes
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
tags:
|
||||
|
@ -21,7 +21,7 @@
|
|||
- name: CephFS Provisioner | Remove legacy storageclass
|
||||
shell: |
|
||||
{{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
|
||||
ignore_errors: yes
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
tags:
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
- name: RBD Provisioner | Remove legacy namespace
|
||||
shell: |
|
||||
{{ bin_dir }}/kubectl delete namespace {{ rbd_provisioner_namespace }}
|
||||
ignore_errors: yes
|
||||
ignore_errors: true # noqa ignore-errrors
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
tags:
|
||||
|
@ -21,7 +21,7 @@
|
|||
- name: RBD Provisioner | Remove legacy storageclass
|
||||
shell: |
|
||||
{{ bin_dir }}/kubectl delete storageclass {{ rbd_provisioner_storage_class }}
|
||||
ignore_errors: yes
|
||||
ignore_errors: true # noqa ignore-errrors
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
tags:
|
||||
|
@ -63,6 +63,7 @@
|
|||
template:
|
||||
src: "{{ item.file }}.j2"
|
||||
dest: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.file }}"
|
||||
mode: 0644
|
||||
with_items: "{{ rbd_provisioner_templates }}"
|
||||
register: rbd_provisioner_manifests
|
||||
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
- name: Cert Manager | Remove legacy namespace
|
||||
shell: |
|
||||
{{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }}
|
||||
ignore_errors: yes
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
tags:
|
||||
|
|
|
@ -55,7 +55,7 @@
|
|||
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system get secret memberlist"
|
||||
register: metallb_secret
|
||||
become: true
|
||||
ignore_errors: yes
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
|
||||
|
|
|
@ -12,12 +12,12 @@
|
|||
run_once: true
|
||||
|
||||
- name: kube-router | Wait for kube-router pods to be ready
|
||||
command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
|
||||
command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 ignore-errors
|
||||
register: pods_not_ready
|
||||
until: pods_not_ready.stdout.find("kube-router")==-1
|
||||
retries: 30
|
||||
delay: 10
|
||||
ignore_errors: yes
|
||||
ignore_errors: true
|
||||
delegate_to: "{{ groups['kube_control_plane'] | first }}"
|
||||
run_once: true
|
||||
changed_when: false
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
- apiserver-kubelet-client.key
|
||||
- front-proxy-client.crt
|
||||
- front-proxy-client.key
|
||||
ignore_errors: yes
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: Backup old confs
|
||||
copy:
|
||||
|
@ -25,4 +25,4 @@
|
|||
- controller-manager.conf
|
||||
- kubelet.conf
|
||||
- scheduler.conf
|
||||
ignore_errors: yes
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
|
|
@ -50,18 +50,21 @@
|
|||
file:
|
||||
path: "{{ audit_policy_file | dirname }}"
|
||||
state: directory
|
||||
mode: 0640
|
||||
when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
|
||||
|
||||
- name: Write api audit policy yaml
|
||||
template:
|
||||
src: apiserver-audit-policy.yaml.j2
|
||||
dest: "{{ audit_policy_file }}"
|
||||
mode: 0640
|
||||
when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
|
||||
|
||||
- name: Write api audit webhook config yaml
|
||||
template:
|
||||
src: apiserver-audit-webhook-config.yaml.j2
|
||||
dest: "{{ audit_webhook_config_file }}"
|
||||
mode: 0640
|
||||
when: kubernetes_audit_webhook|default(false)
|
||||
|
||||
# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
|
||||
|
|
|
@ -7,12 +7,14 @@
|
|||
template:
|
||||
src: webhook-token-auth-config.yaml.j2
|
||||
dest: "{{ kube_config_dir }}/webhook-token-auth-config.yaml"
|
||||
mode: 0640
|
||||
when: kube_webhook_token_auth|default(false)
|
||||
|
||||
- name: Create webhook authorization config
|
||||
template:
|
||||
src: webhook-authorization-config.yaml.j2
|
||||
dest: "{{ kube_config_dir }}/webhook-authorization-config.yaml"
|
||||
mode: 0640
|
||||
when: kube_webhook_authorization|default(false)
|
||||
|
||||
- name: Create kube-scheduler config
|
||||
|
@ -40,7 +42,7 @@
|
|||
when: ansible_os_family in ["Debian","RedHat"]
|
||||
tags:
|
||||
- kubectl
|
||||
ignore_errors: True
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: Set kubectl bash completion file permissions
|
||||
file:
|
||||
|
@ -52,7 +54,7 @@
|
|||
tags:
|
||||
- kubectl
|
||||
- upgrade
|
||||
ignore_errors: True
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: Disable SecurityContextDeny admission-controller and enable PodSecurityPolicy
|
||||
set_fact:
|
||||
|
@ -77,12 +79,13 @@
|
|||
template:
|
||||
src: k8s-certs-renew.sh.j2
|
||||
dest: "{{ bin_dir }}/k8s-certs-renew.sh"
|
||||
mode: '755'
|
||||
mode: 0755
|
||||
|
||||
- name: Renew K8S control plane certificates monthly 1/2
|
||||
template:
|
||||
src: "{{ item }}.j2"
|
||||
dest: "/etc/systemd/system/{{ item }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- k8s-certs-renew.service
|
||||
- k8s-certs-renew.timer
|
||||
|
|
|
@ -61,6 +61,7 @@
|
|||
src: "kubeadm-client.conf.{{ kubeadmConfig_api_version }}.j2"
|
||||
dest: "{{ kube_config_dir }}/kubeadm-client.conf"
|
||||
backup: yes
|
||||
mode: 0640
|
||||
when: not is_kube_master
|
||||
|
||||
- name: Join to cluster if needed
|
||||
|
|
|
@ -35,8 +35,10 @@
|
|||
- node_labels is defined
|
||||
- node_labels is mapping
|
||||
|
||||
- debug: var=role_node_labels
|
||||
- debug: var=inventory_node_labels
|
||||
- debug: # noqa unnamed-task
|
||||
var: role_node_labels
|
||||
- debug: # noqa unnamed-task
|
||||
var: inventory_node_labels
|
||||
|
||||
- name: Set label to node
|
||||
command: >-
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
src: "kubelet.env.{{ kubeletConfig_api_version }}.j2"
|
||||
dest: "{{ kube_config_dir }}/kubelet.env"
|
||||
backup: yes
|
||||
mode: 0640
|
||||
notify: Node | restart kubelet
|
||||
tags:
|
||||
- kubelet
|
||||
|
@ -27,6 +28,7 @@
|
|||
template:
|
||||
src: "kubelet-config.{{ kubeletConfig_api_version }}.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/kubelet-config.yaml"
|
||||
mode: 0640
|
||||
notify: Kubelet | restart kubelet
|
||||
tags:
|
||||
- kubelet
|
||||
|
@ -37,6 +39,7 @@
|
|||
src: "kubelet.service.j2"
|
||||
dest: "/etc/systemd/system/kubelet.service"
|
||||
backup: "yes"
|
||||
mode: 0644
|
||||
notify: Node | restart kubelet
|
||||
tags:
|
||||
- kubelet
|
||||
|
|
|
@ -31,3 +31,4 @@
|
|||
template:
|
||||
src: manifests/haproxy.manifest.j2
|
||||
dest: "{{ kube_manifest_dir }}/haproxy.yml"
|
||||
mode: 0640
|
||||
|
|
|
@ -31,3 +31,4 @@
|
|||
template:
|
||||
src: manifests/nginx-proxy.manifest.j2
|
||||
dest: "{{ kube_manifest_dir }}/nginx-proxy.yml"
|
||||
mode: 0640
|
||||
|
|
|
@ -57,6 +57,7 @@
|
|||
file:
|
||||
path: /etc/modules-load.d
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Enable br_netfilter module
|
||||
modprobe:
|
||||
|
@ -68,6 +69,7 @@
|
|||
copy:
|
||||
dest: /etc/modules-load.d/kubespray-br_netfilter.conf
|
||||
content: br_netfilter
|
||||
mode: 0644
|
||||
when: modinfo_br_netfilter.rc == 0
|
||||
|
||||
# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module
|
||||
|
@ -108,7 +110,7 @@
|
|||
name: nf_conntrack_ipv4
|
||||
state: present
|
||||
register: modprobe_nf_conntrack_ipv4
|
||||
ignore_errors: yes
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
when:
|
||||
- kube_proxy_mode == 'ipvs'
|
||||
tags:
|
||||
|
@ -117,6 +119,7 @@
|
|||
- name: Persist ip_vs modules
|
||||
copy:
|
||||
dest: /etc/modules-load.d/kube_proxy-ipvs.conf
|
||||
mode: 0644
|
||||
content: |
|
||||
ip_vs
|
||||
ip_vs_rr
|
||||
|
|
|
@ -16,4 +16,4 @@
|
|||
- name: Disable swap
|
||||
command: /sbin/swapoff -a
|
||||
when: swapon.stdout
|
||||
ignore_errors: "{{ ansible_check_mode }}"
|
||||
ignore_errors: "{{ ansible_check_mode }}" # noqa ignore-errors
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: kube
|
||||
mode: 0755
|
||||
when: inventory_hostname in groups['k8s_cluster']
|
||||
become: true
|
||||
tags:
|
||||
|
@ -28,6 +29,7 @@
|
|||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: root
|
||||
mode: 0755
|
||||
when: inventory_hostname in groups['k8s_cluster']
|
||||
become: true
|
||||
tags:
|
||||
|
@ -59,6 +61,7 @@
|
|||
src: "{{ kube_cert_dir }}"
|
||||
dest: "{{ kube_cert_compat_dir }}"
|
||||
state: link
|
||||
mode: 0755
|
||||
when:
|
||||
- inventory_hostname in groups['k8s_cluster']
|
||||
- kube_cert_dir != kube_cert_compat_dir
|
||||
|
@ -69,6 +72,7 @@
|
|||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: kube
|
||||
mode: 0755
|
||||
with_items:
|
||||
- "/etc/cni/net.d"
|
||||
- "/opt/cni/bin"
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
create: yes
|
||||
backup: yes
|
||||
marker: "# Ansible entries {mark}"
|
||||
mode: 0644
|
||||
notify: Preinstall | propagate resolvconf to k8s components
|
||||
|
||||
- name: Remove search/domain/nameserver options before block
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
[keyfile]
|
||||
unmanaged-devices+=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico
|
||||
dest: /etc/NetworkManager/conf.d/calico.conf
|
||||
mode: 0644
|
||||
when:
|
||||
- nm_check.rc == 0
|
||||
- kube_network_plugin == "calico"
|
||||
|
@ -32,5 +33,6 @@
|
|||
[keyfile]
|
||||
unmanaged-devices+=interface-name:kube-ipvs0;interface-name:nodelocaldns
|
||||
dest: /etc/NetworkManager/conf.d/k8s.conf
|
||||
mode: 0644
|
||||
when: nm_check.rc == 0
|
||||
notify: Preinstall | reload NetworkManager
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
state: present
|
||||
create: yes
|
||||
backup: yes
|
||||
mode: 0644
|
||||
when:
|
||||
- disable_ipv6_dns
|
||||
- not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
|
||||
|
@ -59,6 +60,7 @@
|
|||
file:
|
||||
name: "{{ sysctl_file_path | dirname }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Enable ip forwarding
|
||||
sysctl:
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
backup: yes
|
||||
unsafe_writes: yes
|
||||
marker: "# Ansible inventory hosts {mark}"
|
||||
mode: 0644
|
||||
when: populate_inventory_to_hosts_file
|
||||
|
||||
- name: Hosts | populate kubernetes loadbalancer address into hosts file
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
insertbefore: BOF
|
||||
backup: yes
|
||||
marker: "# Ansible entries {mark}"
|
||||
mode: 0644
|
||||
notify: Preinstall | propagate resolvconf to k8s components
|
||||
when: dhclientconffile is defined
|
||||
|
||||
|
|
|
@ -91,7 +91,8 @@
|
|||
|
||||
# We need to make sure the network is restarted early enough so that docker can later pick up the correct system
|
||||
# nameservers and search domains
|
||||
- meta: flush_handlers
|
||||
- name: Flush handlers
|
||||
meta: flush_handlers
|
||||
|
||||
- name: Check if we are running inside a Azure VM
|
||||
stat:
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
until: pods_not_ready.stdout.find("cilium")==-1
|
||||
retries: 30
|
||||
delay: 10
|
||||
ignore_errors: yes
|
||||
fail_when: false
|
||||
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||
|
||||
- name: Cilium | Hubble install
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
slurp:
|
||||
src: /etc/cni/net.d/10-kuberouter.conflist
|
||||
register: cni_config_slurp
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: kube-router | Set cni_config variable
|
||||
set_fact:
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
- name: Get etcd endpoint health
|
||||
command: "{{ bin_dir }}/etcdctl endpoint health"
|
||||
register: etcd_endpoint_health
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
environment:
|
||||
|
@ -38,13 +38,13 @@
|
|||
state: absent
|
||||
delegate_to: "{{ item }}"
|
||||
with_items: "{{ groups['broken_etcd'] }}"
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
when:
|
||||
- groups['broken_etcd']
|
||||
- has_quorum
|
||||
|
||||
- name: Delete old certificates
|
||||
# noqa 302 - rm is ok here for now
|
||||
# noqa 302 ignore-error - rm is ok here for now
|
||||
shell: "rm {{ etcd_cert_dir }}/*{{ item }}*"
|
||||
with_items: "{{ groups['broken_etcd'] }}"
|
||||
register: delete_old_cerificates
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
copy:
|
||||
src: "{{ etcd_snapshot }}"
|
||||
dest: /tmp/snapshot.db
|
||||
mode: 0640
|
||||
when: etcd_snapshot is defined
|
||||
|
||||
- name: Stop etcd
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- name: Delete node # noqa 301
|
||||
- name: Delete node # noqa 301 ignore-errors
|
||||
command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}"
|
||||
delegate_to: "{{ groups['kube_control_plane']|first }}"
|
||||
ignore_errors: yes
|
||||
ignore_errors: true
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
- name: Lookup etcd member id
|
||||
shell: "{{ bin_dir }}/etcdctl member list | grep {{ node_ip }} | cut -d, -f1"
|
||||
register: etcd_member_id
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
tags:
|
||||
|
|
|
@ -86,7 +86,7 @@
|
|||
when:
|
||||
- crictl.stat.exists
|
||||
- container_manager in ["crio", "containerd"]
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: reset | force remove all cri containers
|
||||
command: "{{ bin_dir }}/crictl rm -a -f"
|
||||
|
@ -129,7 +129,7 @@
|
|||
when:
|
||||
- crictl.stat.exists
|
||||
- container_manager == "containerd"
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- block:
|
||||
- name: reset | force remove all cri pods
|
||||
|
@ -206,7 +206,7 @@
|
|||
|
||||
- name: Clear IPVS virtual server table
|
||||
command: "ipvsadm -C"
|
||||
ignore_errors: yes
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
when:
|
||||
- kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s_cluster']
|
||||
|
||||
|
@ -306,7 +306,7 @@
|
|||
- /etc/modules-load.d/kube_proxy-ipvs.conf
|
||||
- /etc/modules-load.d/kubespray-br_netfilter.conf
|
||||
- /usr/libexec/kubernetes
|
||||
ignore_errors: yes
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
tags:
|
||||
- files
|
||||
|
||||
|
@ -333,7 +333,7 @@
|
|||
- dns
|
||||
|
||||
- name: reset | include file with reset tasks specific to the network_plugin if exists
|
||||
include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/reset.yml') | realpath }}"
|
||||
include_tasks: "{{ (role_path,'../network_plugin',kube_network_plugin,'tasks/reset.yml') | path_join | realpath }}"
|
||||
when:
|
||||
- kube_network_plugin in ['flannel', 'cilium', 'kube-router', 'calico']
|
||||
tags:
|
||||
|
|
|
@ -29,10 +29,12 @@
|
|||
register: patch_kube_proxy_state
|
||||
when: current_kube_proxy_state.stdout | trim | lower != "linux"
|
||||
|
||||
- debug: msg={{ patch_kube_proxy_state.stdout_lines }}
|
||||
- debug: # noqa unnamed-task
|
||||
msg: "{{ patch_kube_proxy_state.stdout_lines }}"
|
||||
when: patch_kube_proxy_state is not skipped
|
||||
|
||||
- debug: msg={{ patch_kube_proxy_state.stderr_lines }}
|
||||
- debug: # noqa unnamed-task
|
||||
msg: "{{ patch_kube_proxy_state.stderr_lines }}"
|
||||
when: patch_kube_proxy_state is not skipped
|
||||
tags: init
|
||||
when:
|
||||
|
|
|
@ -135,6 +135,7 @@
|
|||
path: "/tmp/{{ archive_dirname }}"
|
||||
dest: "{{ dir|default('.') }}/logs.tar.gz"
|
||||
remove: true
|
||||
mode: 0640
|
||||
delegate_to: localhost
|
||||
connection: local
|
||||
become: false
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
file:
|
||||
state: directory
|
||||
path: "{{ images_dir }}"
|
||||
mode: 0755
|
||||
|
||||
- name: Download images files
|
||||
get_url:
|
||||
|
@ -39,6 +40,7 @@
|
|||
template:
|
||||
src: Dockerfile
|
||||
dest: "{{ images_dir }}/Dockerfile"
|
||||
mode: 0644
|
||||
|
||||
- name: Create docker images for each OS # noqa 301
|
||||
command: docker build -t {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
|
||||
|
|
|
@ -22,3 +22,4 @@
|
|||
template:
|
||||
src: ../templates/inventory-aws.j2 # noqa 404 CI inventory templates are not in role_path
|
||||
dest: "{{ inventory_path }}"
|
||||
mode: 0644
|
||||
|
|
|
@ -79,7 +79,7 @@
|
|||
register: droplets
|
||||
with_items: "{{ instance_names }}"
|
||||
|
||||
- debug:
|
||||
- debug: # noqa unnamed-task
|
||||
msg: "{{ droplets }}, {{ inventory_path }}"
|
||||
when: state == 'present'
|
||||
|
||||
|
@ -87,4 +87,5 @@
|
|||
template:
|
||||
src: ../templates/inventory-do.j2 # noqa 404 CI templates are not in role_path
|
||||
dest: "{{ inventory_path }}"
|
||||
mode: 0644
|
||||
when: state == 'present'
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
{%- endif -%}
|
||||
|
||||
- name: Create gce instances
|
||||
gce:
|
||||
google.cloud.gcp_compute_instance:
|
||||
instance_names: "{{ instance_names }}"
|
||||
machine_type: "{{ cloud_machine_type }}"
|
||||
image: "{{ cloud_image | default(omit) }}"
|
||||
|
@ -53,17 +53,20 @@
|
|||
template:
|
||||
src: ../templates/inventory-gce.j2
|
||||
dest: "{{ inventory_path }}"
|
||||
mode: 0644
|
||||
|
||||
- name: Make group_vars directory
|
||||
file:
|
||||
path: "{{ inventory_path|dirname }}/group_vars"
|
||||
state: directory
|
||||
mode: 0755
|
||||
when: mode in ['scale', 'separate-scale', 'ha-scale']
|
||||
|
||||
- name: Template fake hosts group vars # noqa 404 CI templates are not in role_path
|
||||
template:
|
||||
src: ../templates/fake_hosts.yml.j2
|
||||
dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"
|
||||
mode: 0644
|
||||
when: mode in ['scale', 'separate-scale', 'ha-scale']
|
||||
|
||||
- name: Delete group_vars directory
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
{%- endif -%}
|
||||
|
||||
- name: stop gce instances
|
||||
gce:
|
||||
google.cloud.gcp_compute_instance:
|
||||
instance_names: "{{ instance_names }}"
|
||||
image: "{{ cloud_image | default(omit) }}"
|
||||
service_account_email: "{{ gce_service_account_email }}"
|
||||
|
@ -34,7 +34,7 @@
|
|||
register: gce
|
||||
|
||||
- name: delete gce instances
|
||||
gce:
|
||||
google.cloud.gcp_compute_instance:
|
||||
instance_names: "{{ instance_names }}"
|
||||
image: "{{ cloud_image | default(omit) }}"
|
||||
service_account_email: "{{ gce_service_account_email }}"
|
||||
|
|
|
@ -12,11 +12,13 @@
|
|||
file:
|
||||
path: "/tmp/{{ test_name }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Template vm files for CI job
|
||||
template:
|
||||
src: "vm.yml.j2"
|
||||
dest: "/tmp/{{ test_name }}/instance-{{ vm_id }}.yml"
|
||||
mode: 0644
|
||||
loop: "{{ range(1, vm_count|int + 1, 1) | list }}"
|
||||
loop_control:
|
||||
index_var: vm_id
|
||||
|
@ -47,5 +49,6 @@
|
|||
template:
|
||||
src: "inventory.j2"
|
||||
dest: "{{ inventory_path }}"
|
||||
mode: 0644
|
||||
vars:
|
||||
vms: "{{ vm_ips }}"
|
||||
|
|
|
@ -33,11 +33,13 @@
|
|||
template:
|
||||
src: gcs_life.json.j2
|
||||
dest: "{{ dir }}/gcs_life.json"
|
||||
mode: 0644
|
||||
|
||||
- name: Create a boto config to access GCS
|
||||
template:
|
||||
src: boto.j2
|
||||
dest: "{{ dir }}/.boto"
|
||||
mode: 0640
|
||||
no_log: True
|
||||
|
||||
- name: Download gsutil cp installer
|
||||
|
@ -74,5 +76,5 @@
|
|||
failed_when: false
|
||||
no_log: True
|
||||
|
||||
- debug:
|
||||
- debug: # noqa unnamed-task
|
||||
msg: "A public url https://storage.googleapis.com/{{ test_name }}/{{ file_name }}"
|
||||
|
|
|
@ -4,7 +4,7 @@ apache-libcloud==2.2.1
|
|||
tox==3.11.1
|
||||
dopy==0.3.7
|
||||
cryptography==2.8
|
||||
ansible-lint==4.2.0
|
||||
ansible-lint==5.0.11
|
||||
openshift==0.8.8
|
||||
molecule==3.0.6
|
||||
molecule-vagrant==0.3
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#!/bin/bash
|
||||
set -euxo pipefail
|
||||
|
||||
/usr/bin/python -m pip uninstall -y ansible
|
||||
/usr/bin/python -m pip install -r tests/requirements.txt
|
||||
mkdir -p /.ssh
|
||||
mkdir -p cluster-dump
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
status_code: 200
|
||||
register: apiserver_response
|
||||
|
||||
- debug:
|
||||
- debug: # noqa unnamed-task
|
||||
msg: "{{ apiserver_response.json }}"
|
||||
|
||||
- name: Check API servers version
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
bin_dir: "/usr/local/bin"
|
||||
when: not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
|
||||
|
||||
- import_role:
|
||||
- import_role: # noqa unnamed-task
|
||||
name: cluster-dump
|
||||
|
||||
- name: Check kubectl output
|
||||
|
@ -21,7 +21,7 @@
|
|||
register: get_nodes
|
||||
no_log: true
|
||||
|
||||
- debug:
|
||||
- debug: # noqa unnamed-task
|
||||
msg: "{{ get_nodes.stdout.split('\n') }}"
|
||||
|
||||
- name: Check that all nodes are running and ready
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
bin_dir: "/usr/local/bin"
|
||||
when: not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
|
||||
|
||||
- import_role:
|
||||
- import_role: # noqa unnamed-task
|
||||
name: cluster-dump
|
||||
|
||||
- name: Check kubectl output
|
||||
|
@ -21,7 +21,7 @@
|
|||
register: get_pods
|
||||
no_log: true
|
||||
|
||||
- debug:
|
||||
- debug: # noqa unnamed-task
|
||||
msg: "{{ get_pods.stdout.split('\n') }}"
|
||||
|
||||
- name: Check that all pods are running and ready
|
||||
|
@ -44,6 +44,6 @@
|
|||
register: get_pods
|
||||
no_log: true
|
||||
|
||||
- debug:
|
||||
- debug: # noqa unnamed-task
|
||||
msg: "{{ get_pods.stdout.split('\n') }}"
|
||||
failed_when: not run_pods_log is success
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue