Move to Ansible 3.4.0 (#7672)
* Ansible: move to Ansible 3.4.0 which uses ansible-base 2.10.10 * Docs: add a note about ansible upgrade post 2.9.x * CI: ensure ansible is removed before ansible 3.x is installed to avoid pip failures * Ansible: use newer ansible-lint * Fix ansible-lint 5.0.11 found issues * syntax issues * risky-file-permissions * var-naming * role-name * molecule tests * Mitogen: use 0.3.0rc1 which adds support for ansible 2.10+ * Pin ansible-base to 2.10.11 to get package fix on RHEL8pull/7799/head
parent
b0e4c375a7
commit
7516fe142f
|
@ -18,3 +18,13 @@ skip_list:
|
||||||
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
||||||
# (Disabled in May 2019)
|
# (Disabled in May 2019)
|
||||||
- '701'
|
- '701'
|
||||||
|
|
||||||
|
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
||||||
|
# Meta roles in Kubespray don't need proper names
|
||||||
|
# (Disabled in June 2021)
|
||||||
|
- 'role-name'
|
||||||
|
|
||||||
|
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
|
||||||
|
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
||||||
|
# (Disabled in June 2021)
|
||||||
|
- 'var-naming'
|
||||||
|
|
|
@ -37,6 +37,7 @@ variables:
|
||||||
before_script:
|
before_script:
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||||
|
- python -m pip uninstall -y ansible
|
||||||
- python -m pip install -r tests/requirements.txt
|
- python -m pip install -r tests/requirements.txt
|
||||||
- mkdir -p /.ssh
|
- mkdir -p /.ssh
|
||||||
|
|
||||||
|
|
|
@ -53,6 +53,7 @@ tox-inventory-builder:
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
- apt-get update && apt-get install -y python3-pip
|
- apt-get update && apt-get install -y python3-pip
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||||
|
- python -m pip uninstall -y ansible
|
||||||
- python -m pip install -r tests/requirements.txt
|
- python -m pip install -r tests/requirements.txt
|
||||||
script:
|
script:
|
||||||
- pip3 install tox
|
- pip3 install tox
|
||||||
|
|
|
@ -11,6 +11,7 @@ molecule_tests:
|
||||||
- tests/scripts/rebase.sh
|
- tests/scripts/rebase.sh
|
||||||
- apt-get update && apt-get install -y python3-pip
|
- apt-get update && apt-get install -y python3-pip
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||||
|
- python -m pip uninstall -y ansible
|
||||||
- python -m pip install -r tests/requirements.txt
|
- python -m pip install -r tests/requirements.txt
|
||||||
- ./tests/scripts/vagrant_clean.sh
|
- ./tests/scripts/vagrant_clean.sh
|
||||||
script:
|
script:
|
||||||
|
@ -31,6 +32,7 @@ molecule_tests:
|
||||||
before_script:
|
before_script:
|
||||||
- apt-get update && apt-get install -y python3-pip
|
- apt-get update && apt-get install -y python3-pip
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||||
|
- python -m pip uninstall -y ansible
|
||||||
- python -m pip install -r tests/requirements.txt
|
- python -m pip install -r tests/requirements.txt
|
||||||
- ./tests/scripts/vagrant_clean.sh
|
- ./tests/scripts/vagrant_clean.sh
|
||||||
script:
|
script:
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
become: no
|
become: no
|
||||||
vars:
|
vars:
|
||||||
minimal_ansible_version: 2.9.0
|
minimal_ansible_version: 2.9.0
|
||||||
|
minimal_ansible_version_2_10: 2.10.11
|
||||||
maximal_ansible_version: 2.11.0
|
maximal_ansible_version: 2.11.0
|
||||||
ansible_connection: local
|
ansible_connection: local
|
||||||
tasks:
|
tasks:
|
||||||
|
@ -16,6 +17,17 @@
|
||||||
tags:
|
tags:
|
||||||
- check
|
- check
|
||||||
|
|
||||||
|
- name: "Check Ansible version > {{ minimal_ansible_version_2_10 }} when using ansible 2.10"
|
||||||
|
assert:
|
||||||
|
msg: "When using Ansible 2.10, the minimum supported version is {{ minimal_ansible_version_2_10 }}"
|
||||||
|
that:
|
||||||
|
- ansible_version.string is version(minimal_ansible_version_2_10, ">=")
|
||||||
|
- ansible_version.string is version(maximal_ansible_version, "<")
|
||||||
|
when:
|
||||||
|
- ansible_version.string is version('2.10.0', ">=")
|
||||||
|
tags:
|
||||||
|
- check
|
||||||
|
|
||||||
- name: "Check that python netaddr is installed"
|
- name: "Check that python netaddr is installed"
|
||||||
assert:
|
assert:
|
||||||
msg: "Python netaddr is not present"
|
msg: "Python netaddr is not present"
|
||||||
|
|
|
@ -12,3 +12,4 @@
|
||||||
template:
|
template:
|
||||||
src: inventory.j2
|
src: inventory.j2
|
||||||
dest: "{{ playbook_dir }}/inventory"
|
dest: "{{ playbook_dir }}/inventory"
|
||||||
|
mode: 0644
|
||||||
|
|
|
@ -22,8 +22,10 @@
|
||||||
template:
|
template:
|
||||||
src: inventory.j2
|
src: inventory.j2
|
||||||
dest: "{{ playbook_dir }}/inventory"
|
dest: "{{ playbook_dir }}/inventory"
|
||||||
|
mode: 0644
|
||||||
|
|
||||||
- name: Generate Load Balancer variables
|
- name: Generate Load Balancer variables
|
||||||
template:
|
template:
|
||||||
src: loadbalancer_vars.j2
|
src: loadbalancer_vars.j2
|
||||||
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
|
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
|
||||||
|
mode: 0644
|
||||||
|
|
|
@ -8,11 +8,13 @@
|
||||||
path: "{{ base_dir }}"
|
path: "{{ base_dir }}"
|
||||||
state: directory
|
state: directory
|
||||||
recurse: true
|
recurse: true
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
- name: Store json files in base_dir
|
- name: Store json files in base_dir
|
||||||
template:
|
template:
|
||||||
src: "{{ item }}"
|
src: "{{ item }}"
|
||||||
dest: "{{ base_dir }}/{{ item }}"
|
dest: "{{ base_dir }}/{{ item }}"
|
||||||
|
mode: 0644
|
||||||
with_items:
|
with_items:
|
||||||
- network.json
|
- network.json
|
||||||
- storage.json
|
- storage.json
|
||||||
|
|
|
@ -35,6 +35,7 @@
|
||||||
path-exclude=/usr/share/doc/*
|
path-exclude=/usr/share/doc/*
|
||||||
path-include=/usr/share/doc/*/copyright
|
path-include=/usr/share/doc/*/copyright
|
||||||
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||||
|
mode: 0644
|
||||||
when:
|
when:
|
||||||
- ansible_os_family == 'Debian'
|
- ansible_os_family == 'Debian'
|
||||||
|
|
||||||
|
@ -63,6 +64,7 @@
|
||||||
copy:
|
copy:
|
||||||
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
|
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
|
||||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||||
|
mode: 0640
|
||||||
|
|
||||||
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
||||||
authorized_key:
|
authorized_key:
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
state: directory
|
state: directory
|
||||||
owner: "{{ k8s_deployment_user }}"
|
owner: "{{ k8s_deployment_user }}"
|
||||||
group: "{{ k8s_deployment_user }}"
|
group: "{{ k8s_deployment_user }}"
|
||||||
|
mode: 0700
|
||||||
|
|
||||||
- name: Configure sudo for deployment user
|
- name: Configure sudo for deployment user
|
||||||
copy:
|
copy:
|
||||||
|
|
|
@ -82,6 +82,7 @@
|
||||||
template:
|
template:
|
||||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||||
src: test-file.txt
|
src: test-file.txt
|
||||||
|
mode: 0644
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Unmount glusterfs
|
- name: Unmount glusterfs
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
---
|
|
||||||
- hosts: all
|
|
||||||
|
|
||||||
roles:
|
|
||||||
- role_under_test
|
|
|
@ -1,7 +1,10 @@
|
||||||
---
|
---
|
||||||
- name: "Kubernetes Apps | Lay Down Heketi Bootstrap"
|
- name: "Kubernetes Apps | Lay Down Heketi Bootstrap"
|
||||||
become: true
|
become: true
|
||||||
template: { src: "heketi-bootstrap.json.j2", dest: "{{ kube_config_dir }}/heketi-bootstrap.json" }
|
template:
|
||||||
|
src: "heketi-bootstrap.json.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||||
|
mode: 0640
|
||||||
register: "rendering"
|
register: "rendering"
|
||||||
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
||||||
kube:
|
kube:
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
template:
|
template:
|
||||||
src: "topology.json.j2"
|
src: "topology.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/topology.json"
|
dest: "{{ kube_config_dir }}/topology.json"
|
||||||
|
mode: 0644
|
||||||
- name: "Copy topology configuration into container."
|
- name: "Copy topology configuration into container."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
---
|
---
|
||||||
- name: "Kubernetes Apps | Lay Down GlusterFS Daemonset"
|
- name: "Kubernetes Apps | Lay Down GlusterFS Daemonset"
|
||||||
template: { src: "glusterfs-daemonset.json.j2", dest: "{{ kube_config_dir }}/glusterfs-daemonset.json" }
|
template:
|
||||||
|
src: "glusterfs-daemonset.json.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||||
|
mode: 0644
|
||||||
become: true
|
become: true
|
||||||
register: "rendering"
|
register: "rendering"
|
||||||
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
||||||
|
@ -27,7 +30,10 @@
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|
||||||
- name: "Kubernetes Apps | Lay Down Heketi Service Account"
|
- name: "Kubernetes Apps | Lay Down Heketi Service Account"
|
||||||
template: { src: "heketi-service-account.json.j2", dest: "{{ kube_config_dir }}/heketi-service-account.json" }
|
template:
|
||||||
|
src: "heketi-service-account.json.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||||
|
mode: 0644
|
||||||
become: true
|
become: true
|
||||||
register: "rendering"
|
register: "rendering"
|
||||||
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
template:
|
template:
|
||||||
src: "heketi-deployment.json.j2"
|
src: "heketi-deployment.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/heketi-deployment.json"
|
dest: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||||
|
mode: 0644
|
||||||
register: "rendering"
|
register: "rendering"
|
||||||
|
|
||||||
- name: "Kubernetes Apps | Install and configure Heketi"
|
- name: "Kubernetes Apps | Install and configure Heketi"
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Kubernetes Apps | Deploy cluster role binding."
|
- name: "Kubernetes Apps | Deploy cluster role binding."
|
||||||
when: "clusterrolebinding_state.stdout == \"\""
|
when: "clusterrolebinding_state.stdout | length > 0"
|
||||||
command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||||
|
|
||||||
- name: Get clusterrolebindings again
|
- name: Get clusterrolebindings again
|
||||||
|
@ -15,7 +15,7 @@
|
||||||
|
|
||||||
- name: Make sure that clusterrolebindings are present now
|
- name: Make sure that clusterrolebindings are present now
|
||||||
assert:
|
assert:
|
||||||
that: "clusterrolebinding_state.stdout != \"\""
|
that: "clusterrolebinding_state.stdout | length > 0"
|
||||||
msg: "Cluster role binding is not present."
|
msg: "Cluster role binding is not present."
|
||||||
|
|
||||||
- name: Get the heketi-config-secret secret
|
- name: Get the heketi-config-secret secret
|
||||||
|
@ -28,9 +28,10 @@
|
||||||
template:
|
template:
|
||||||
src: "heketi.json.j2"
|
src: "heketi.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/heketi.json"
|
dest: "{{ kube_config_dir }}/heketi.json"
|
||||||
|
mode: 0644
|
||||||
|
|
||||||
- name: "Deploy Heketi config secret"
|
- name: "Deploy Heketi config secret"
|
||||||
when: "secret_state.stdout == \"\""
|
when: "secret_state.stdout | length > 0"
|
||||||
command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||||
|
|
||||||
- name: Get the heketi-config-secret secret again
|
- name: Get the heketi-config-secret secret again
|
||||||
|
|
|
@ -2,7 +2,10 @@
|
||||||
- name: "Kubernetes Apps | Lay Down Heketi Storage"
|
- name: "Kubernetes Apps | Lay Down Heketi Storage"
|
||||||
become: true
|
become: true
|
||||||
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||||
template: { src: "heketi-storage.json.j2", dest: "{{ kube_config_dir }}/heketi-storage.json" }
|
template:
|
||||||
|
src: "heketi-storage.json.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/heketi-storage.json"
|
||||||
|
mode: 0644
|
||||||
register: "rendering"
|
register: "rendering"
|
||||||
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
||||||
kube:
|
kube:
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
template:
|
template:
|
||||||
src: "storageclass.yml.j2"
|
src: "storageclass.yml.j2"
|
||||||
dest: "{{ kube_config_dir }}/storageclass.yml"
|
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||||
|
mode: 0644
|
||||||
register: "rendering"
|
register: "rendering"
|
||||||
- name: "Kubernetes Apps | Install and configure Storace Class"
|
- name: "Kubernetes Apps | Install and configure Storace Class"
|
||||||
kube:
|
kube:
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
template:
|
template:
|
||||||
src: "topology.json.j2"
|
src: "topology.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/topology.json"
|
dest: "{{ kube_config_dir }}/topology.json"
|
||||||
|
mode: 0644
|
||||||
- name: "Copy topology configuration into container." # noqa 503
|
- name: "Copy topology configuration into container." # noqa 503
|
||||||
when: "rendering.changed"
|
when: "rendering.changed"
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
become: true
|
become: true
|
||||||
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
|
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
|
||||||
register: "volume_groups"
|
register: "volume_groups"
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Remove volume groups." # noqa 301
|
- name: "Remove volume groups." # noqa 301
|
||||||
|
@ -35,7 +35,7 @@
|
||||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
command: "pvremove {{ disk_volume_device_1 }} --yes"
|
command: "pvremove {{ disk_volume_device_1 }} --yes"
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
|
|
||||||
- name: "Remove lvm utils (RedHat)"
|
- name: "Remove lvm utils (RedHat)"
|
||||||
become: true
|
become: true
|
||||||
|
|
|
@ -1,51 +1,51 @@
|
||||||
---
|
---
|
||||||
- name: "Remove storage class." # noqa 301
|
- name: Remove storage class. # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: "Tear down heketi." # noqa 301
|
- name: Tear down heketi. # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: "Tear down heketi." # noqa 301
|
- name: Tear down heketi. # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: "Tear down bootstrap."
|
- name: Tear down bootstrap.
|
||||||
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
||||||
- name: "Ensure there is nothing left over." # noqa 301
|
- name: Ensure there is nothing left over. # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
- name: "Ensure there is nothing left over." # noqa 301
|
- name: Ensure there is nothing left over. # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
- name: "Tear down glusterfs." # noqa 301
|
- name: Tear down glusterfs. # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: "Remove heketi storage service." # noqa 301
|
- name: Remove heketi storage service. # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: "Remove heketi gluster role binding" # noqa 301
|
- name: Remove heketi gluster role binding # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: "Remove heketi config secret" # noqa 301
|
- name: Remove heketi config secret # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: "Remove heketi db backup" # noqa 301
|
- name: Remove heketi db backup # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: "Remove heketi service account" # noqa 301
|
- name: Remove heketi service account # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: "Get secrets"
|
- name: Get secrets
|
||||||
command: "{{ bin_dir }}/kubectl get secrets --output=\"json\""
|
command: "{{ bin_dir }}/kubectl get secrets --output=\"json\""
|
||||||
register: "secrets"
|
register: "secrets"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
- name: "Remove heketi storage secret"
|
- name: Remove heketi storage secret
|
||||||
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
||||||
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
|
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
|
||||||
when: "storage_query is defined"
|
when: "storage_query is defined"
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
|
|
|
@ -187,3 +187,28 @@ For more information about Ansible and bastion hosts, read
|
||||||
## Mitogen
|
## Mitogen
|
||||||
|
|
||||||
You can use [mitogen](mitogen.md) to speed up kubespray.
|
You can use [mitogen](mitogen.md) to speed up kubespray.
|
||||||
|
|
||||||
|
## Beyond ansible 2.9
|
||||||
|
|
||||||
|
Ansible project has decided, in order to ease their maintenance burden, to split between
|
||||||
|
two projects which are now joined under the Ansible umbrella.
|
||||||
|
|
||||||
|
Ansible-base (2.10.x branch) will contain just the ansible language implementation while
|
||||||
|
ansible modules that were previously bundled into a single repository will be part of the
|
||||||
|
ansible 3.x package. Pleasee see [this blog post](https://blog.while-true-do.io/ansible-release-3-0-0/)
|
||||||
|
that explains in detail the need and the evolution plan.
|
||||||
|
|
||||||
|
**Note:** this change means that ansible virtual envs cannot be upgraded with `pip install -U`.
|
||||||
|
You first need to uninstall your old ansible (pre 2.10) version and install the new one.
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
pip uninstall ansible
|
||||||
|
cd kubespray/
|
||||||
|
pip install -U .
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note:** some changes needed to support ansible 2.10+ are not backwards compatible with 2.9
|
||||||
|
Kubespray needs to evolve and keep pace with upstream ansible and will be forced to eventually
|
||||||
|
drop 2.9 support. Kubespray CIs use only the ansible version specified in the `requirements.txt`
|
||||||
|
and while the `ansible_version.yml` may allow older versions to be used, these are not
|
||||||
|
exercised in the CI and compatibility is not guaranteed.
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
- hosts: localhost
|
- hosts: localhost
|
||||||
strategy: linear
|
strategy: linear
|
||||||
vars:
|
vars:
|
||||||
mitogen_version: 0.2.9
|
mitogen_version: 0.3.0rc1
|
||||||
mitogen_url: https://github.com/dw/mitogen/archive/v{{ mitogen_version }}.tar.gz
|
mitogen_url: https://github.com/dw/mitogen/archive/v{{ mitogen_version }}.tar.gz
|
||||||
ansible_connection: local
|
ansible_connection: local
|
||||||
tasks:
|
tasks:
|
||||||
|
@ -13,6 +13,7 @@
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
|
mode: 0755
|
||||||
become: false
|
become: false
|
||||||
loop:
|
loop:
|
||||||
- "{{ playbook_dir }}/plugins/mitogen"
|
- "{{ playbook_dir }}/plugins/mitogen"
|
||||||
|
@ -40,3 +41,4 @@
|
||||||
section: defaults
|
section: defaults
|
||||||
option: strategy
|
option: strategy
|
||||||
value: mitogen_linear
|
value: mitogen_linear
|
||||||
|
mode: 0644
|
||||||
|
|
|
@ -12,13 +12,13 @@
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||||
|
|
||||||
- hosts: "{{ groups['etcd'] | first }}"
|
- hosts: etcd[0]
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: recover_control_plane/etcd }
|
- { role: recover_control_plane/etcd }
|
||||||
|
|
||||||
- hosts: "{{ groups['kube_control_plane'] | first }}"
|
- hosts: kube_control_plane[0]
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
|
@ -26,7 +26,7 @@
|
||||||
|
|
||||||
- include: cluster.yml
|
- include: cluster.yml
|
||||||
|
|
||||||
- hosts: "{{ groups['kube_control_plane'] }}"
|
- hosts: kube_control_plane
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
ansible==2.9.20
|
ansible==3.4.0
|
||||||
|
ansible-base==2.10.11
|
||||||
cryptography==2.8
|
cryptography==2.8
|
||||||
jinja2==2.11.3
|
jinja2==2.11.3
|
||||||
netaddr==0.7.19
|
netaddr==0.7.19
|
||||||
|
|
|
@ -19,3 +19,4 @@
|
||||||
template:
|
template:
|
||||||
src: ssh-bastion.conf
|
src: ssh-bastion.conf
|
||||||
dest: "{{ playbook_dir }}/ssh-bastion.conf"
|
dest: "{{ playbook_dir }}/ssh-bastion.conf"
|
||||||
|
mode: 0640
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
value: "{{ http_proxy | default(omit) }}"
|
value: "{{ http_proxy | default(omit) }}"
|
||||||
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
|
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
|
||||||
no_extra_spaces: true
|
no_extra_spaces: true
|
||||||
|
mode: 0644
|
||||||
become: true
|
become: true
|
||||||
when: not skip_http_proxy_on_os_packages
|
when: not skip_http_proxy_on_os_packages
|
||||||
|
|
||||||
|
@ -32,6 +33,7 @@
|
||||||
section: "{{ item }}"
|
section: "{{ item }}"
|
||||||
option: enabled
|
option: enabled
|
||||||
value: "1"
|
value: "1"
|
||||||
|
mode: 0644
|
||||||
with_items:
|
with_items:
|
||||||
- ol7_latest
|
- ol7_latest
|
||||||
- ol7_addons
|
- ol7_addons
|
||||||
|
@ -56,6 +58,7 @@
|
||||||
section: "ol{{ ansible_distribution_major_version }}_addons"
|
section: "ol{{ ansible_distribution_major_version }}_addons"
|
||||||
option: "{{ item.option }}"
|
option: "{{ item.option }}"
|
||||||
value: "{{ item.value }}"
|
value: "{{ item.value }}"
|
||||||
|
mode: 0644
|
||||||
with_items:
|
with_items:
|
||||||
- { option: "name", value: "ol{{ ansible_distribution_major_version }}_addons" }
|
- { option: "name", value: "ol{{ ansible_distribution_major_version }}_addons" }
|
||||||
- { option: "enabled", value: "1" }
|
- { option: "enabled", value: "1" }
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
- name: Remove podman network cni
|
- name: Remove podman network cni
|
||||||
raw: "podman network rm podman"
|
raw: "podman network rm podman"
|
||||||
become: true
|
become: true
|
||||||
ignore_errors: yes
|
ignore_errors: true # noqa ignore-errors
|
||||||
when: need_bootstrap.rc != 0
|
when: need_bootstrap.rc != 0
|
||||||
|
|
||||||
- name: Clean up possible pending packages on fedora coreos
|
- name: Clean up possible pending packages on fedora coreos
|
||||||
|
@ -43,7 +43,7 @@
|
||||||
- name: Reboot immediately for updated ostree, please run playbook again if failed first time.
|
- name: Reboot immediately for updated ostree, please run playbook again if failed first time.
|
||||||
raw: "nohup bash -c 'sleep 5s && shutdown -r now'"
|
raw: "nohup bash -c 'sleep 5s && shutdown -r now'"
|
||||||
become: true
|
become: true
|
||||||
ignore_errors: yes
|
ignore_errors: true # noqa ignore-errors
|
||||||
ignore_unreachable: yes
|
ignore_unreachable: yes
|
||||||
when: need_bootstrap.rc != 0
|
when: need_bootstrap.rc != 0
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
value: "{{ http_proxy | default(omit) }}"
|
value: "{{ http_proxy | default(omit) }}"
|
||||||
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
|
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
|
||||||
no_extra_spaces: true
|
no_extra_spaces: true
|
||||||
|
mode: 0644
|
||||||
become: true
|
become: true
|
||||||
when: not skip_http_proxy_on_os_packages
|
when: not skip_http_proxy_on_os_packages
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
register: stat_result
|
register: stat_result
|
||||||
|
|
||||||
- name: Create the /etc/sysconfig/proxy empty file
|
- name: Create the /etc/sysconfig/proxy empty file
|
||||||
file:
|
file: # noqa risky-file-permissions
|
||||||
path: /etc/sysconfig/proxy
|
path: /etc/sysconfig/proxy
|
||||||
state: touch
|
state: touch
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
value: "{{ http_proxy | default(omit) }}"
|
value: "{{ http_proxy | default(omit) }}"
|
||||||
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
|
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
|
||||||
no_extra_spaces: true
|
no_extra_spaces: true
|
||||||
|
mode: 0644
|
||||||
become: true
|
become: true
|
||||||
when: not skip_http_proxy_on_os_packages
|
when: not skip_http_proxy_on_os_packages
|
||||||
|
|
||||||
|
@ -19,7 +20,7 @@
|
||||||
command: /sbin/subscription-manager status
|
command: /sbin/subscription-manager status
|
||||||
register: rh_subscription_status
|
register: rh_subscription_status
|
||||||
changed_when: "rh_subscription_status != 0"
|
changed_when: "rh_subscription_status != 0"
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
- name: RHEL subscription Organization ID/Activation Key registration
|
- name: RHEL subscription Organization ID/Activation Key registration
|
||||||
|
@ -35,12 +36,13 @@
|
||||||
service_level_agreement: "{{ rh_subscription_sla }}"
|
service_level_agreement: "{{ rh_subscription_sla }}"
|
||||||
sync: true
|
sync: true
|
||||||
notify: RHEL auto-attach subscription
|
notify: RHEL auto-attach subscription
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
become: true
|
become: true
|
||||||
when:
|
when:
|
||||||
- rh_subscription_org_id is defined
|
- rh_subscription_org_id is defined
|
||||||
- rh_subscription_status.changed
|
- rh_subscription_status.changed
|
||||||
|
|
||||||
|
# this task has no_log set to prevent logging security sensitive information such as subscription passwords
|
||||||
- name: RHEL subscription Username/Password registration
|
- name: RHEL subscription Username/Password registration
|
||||||
redhat_subscription:
|
redhat_subscription:
|
||||||
state: present
|
state: present
|
||||||
|
@ -54,8 +56,9 @@
|
||||||
service_level_agreement: "{{ rh_subscription_sla }}"
|
service_level_agreement: "{{ rh_subscription_sla }}"
|
||||||
sync: true
|
sync: true
|
||||||
notify: RHEL auto-attach subscription
|
notify: RHEL auto-attach subscription
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
become: true
|
become: true
|
||||||
|
no_log: true
|
||||||
when:
|
when:
|
||||||
- rh_subscription_username is defined
|
- rh_subscription_username is defined
|
||||||
- rh_subscription_status.changed
|
- rh_subscription_status.changed
|
||||||
|
|
|
@ -4,4 +4,4 @@
|
||||||
become: true
|
become: true
|
||||||
roles:
|
roles:
|
||||||
- role: kubespray-defaults
|
- role: kubespray-defaults
|
||||||
- role: containerd
|
- role: container-engine/containerd
|
||||||
|
|
|
@ -23,12 +23,14 @@
|
||||||
template:
|
template:
|
||||||
src: "fedora_containerd.repo.j2"
|
src: "fedora_containerd.repo.j2"
|
||||||
dest: "{{ yum_repo_dir }}/containerd.repo"
|
dest: "{{ yum_repo_dir }}/containerd.repo"
|
||||||
|
mode: 0644
|
||||||
when: ansible_distribution == "Fedora"
|
when: ansible_distribution == "Fedora"
|
||||||
|
|
||||||
- name: Configure containerd repository on RedHat/OracleLinux/CentOS/AlmaLinux
|
- name: Configure containerd repository on RedHat/OracleLinux/CentOS/AlmaLinux
|
||||||
template:
|
template:
|
||||||
src: "rh_containerd.repo.j2"
|
src: "rh_containerd.repo.j2"
|
||||||
dest: "{{ yum_repo_dir }}/containerd.repo"
|
dest: "{{ yum_repo_dir }}/containerd.repo"
|
||||||
|
mode: 0644
|
||||||
when:
|
when:
|
||||||
- ansible_os_family == "RedHat"
|
- ansible_os_family == "RedHat"
|
||||||
- ansible_distribution not in ["Fedora", "Amazon"]
|
- ansible_distribution not in ["Fedora", "Amazon"]
|
||||||
|
|
|
@ -58,11 +58,13 @@
|
||||||
file:
|
file:
|
||||||
path: /etc/systemd/system/containerd.service.d
|
path: /etc/systemd/system/containerd.service.d
|
||||||
state: directory
|
state: directory
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
- name: Write containerd proxy drop-in
|
- name: Write containerd proxy drop-in
|
||||||
template:
|
template:
|
||||||
src: http-proxy.conf.j2
|
src: http-proxy.conf.j2
|
||||||
dest: /etc/systemd/system/containerd.service.d/http-proxy.conf
|
dest: /etc/systemd/system/containerd.service.d/http-proxy.conf
|
||||||
|
mode: 0644
|
||||||
notify: restart containerd
|
notify: restart containerd
|
||||||
when: http_proxy is defined or https_proxy is defined
|
when: http_proxy is defined or https_proxy is defined
|
||||||
|
|
||||||
|
@ -116,7 +118,7 @@
|
||||||
- not is_ostree
|
- not is_ostree
|
||||||
- containerd_package_info.pkgs|length > 0
|
- containerd_package_info.pkgs|length > 0
|
||||||
|
|
||||||
- include_role:
|
- include_role: # noqa unnamed-task
|
||||||
name: container-engine/crictl
|
name: container-engine/crictl
|
||||||
|
|
||||||
# you can sometimes end up in a state where everything is installed
|
# you can sometimes end up in a state where everything is installed
|
||||||
|
|
|
@ -4,4 +4,4 @@
|
||||||
become: true
|
become: true
|
||||||
roles:
|
roles:
|
||||||
- role: kubespray-defaults
|
- role: kubespray-defaults
|
||||||
- role: cri-o
|
- role: container-engine/cri-o
|
||||||
|
|
|
@ -53,6 +53,7 @@
|
||||||
option: enabled
|
option: enabled
|
||||||
value: "0"
|
value: "0"
|
||||||
backup: yes
|
backup: yes
|
||||||
|
mode: 0644
|
||||||
when:
|
when:
|
||||||
- ansible_distribution in ["Amazon"]
|
- ansible_distribution in ["Amazon"]
|
||||||
- amzn2_extras_file_stat.stat.exists
|
- amzn2_extras_file_stat.stat.exists
|
||||||
|
@ -119,6 +120,7 @@
|
||||||
section: "{{ item.section }}"
|
section: "{{ item.section }}"
|
||||||
option: enabled
|
option: enabled
|
||||||
value: 1
|
value: 1
|
||||||
|
mode: 0644
|
||||||
become: true
|
become: true
|
||||||
when: is_ostree
|
when: is_ostree
|
||||||
loop:
|
loop:
|
||||||
|
|
|
@ -46,7 +46,7 @@
|
||||||
import_tasks: "crio_repo.yml"
|
import_tasks: "crio_repo.yml"
|
||||||
when: crio_add_repos
|
when: crio_add_repos
|
||||||
|
|
||||||
- include_role:
|
- include_role: # noqa unnamed-task
|
||||||
name: container-engine/crictl
|
name: container-engine/crictl
|
||||||
|
|
||||||
- name: Build a list of crio runtimes with Katacontainers runtimes
|
- name: Build a list of crio runtimes with Katacontainers runtimes
|
||||||
|
@ -69,11 +69,13 @@
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
- name: Install cri-o config
|
- name: Install cri-o config
|
||||||
template:
|
template:
|
||||||
src: crio.conf.j2
|
src: crio.conf.j2
|
||||||
dest: /etc/crio/crio.conf
|
dest: /etc/crio/crio.conf
|
||||||
|
mode: 0644
|
||||||
register: config_install
|
register: config_install
|
||||||
|
|
||||||
- name: Add skopeo pkg to install
|
- name: Add skopeo pkg to install
|
||||||
|
@ -129,6 +131,7 @@
|
||||||
copy:
|
copy:
|
||||||
src: mounts.conf
|
src: mounts.conf
|
||||||
dest: /etc/containers/mounts.conf
|
dest: /etc/containers/mounts.conf
|
||||||
|
mode: 0644
|
||||||
when:
|
when:
|
||||||
- ansible_os_family == 'RedHat'
|
- ansible_os_family == 'RedHat'
|
||||||
notify: restart crio
|
notify: restart crio
|
||||||
|
@ -147,6 +150,7 @@
|
||||||
section: storage.options.overlay
|
section: storage.options.overlay
|
||||||
option: mountopt
|
option: mountopt
|
||||||
value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}'
|
value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}'
|
||||||
|
mode: 0644
|
||||||
|
|
||||||
- name: Create directory registries configs
|
- name: Create directory registries configs
|
||||||
file:
|
file:
|
||||||
|
@ -159,6 +163,7 @@
|
||||||
template:
|
template:
|
||||||
src: registry-mirror.conf.j2
|
src: registry-mirror.conf.j2
|
||||||
dest: "/etc/containers/registries.conf.d/{{ item.prefix }}.conf"
|
dest: "/etc/containers/registries.conf.d/{{ item.prefix }}.conf"
|
||||||
|
mode: 0644
|
||||||
loop: "{{ crio_registries_mirrors }}"
|
loop: "{{ crio_registries_mirrors }}"
|
||||||
notify: restart crio
|
notify: restart crio
|
||||||
|
|
||||||
|
@ -166,6 +171,7 @@
|
||||||
template:
|
template:
|
||||||
src: http-proxy.conf.j2
|
src: http-proxy.conf.j2
|
||||||
dest: /etc/systemd/system/crio.service.d/http-proxy.conf
|
dest: /etc/systemd/system/crio.service.d/http-proxy.conf
|
||||||
|
mode: 0644
|
||||||
notify: restart crio
|
notify: restart crio
|
||||||
when: http_proxy is defined or https_proxy is defined
|
when: http_proxy is defined or https_proxy is defined
|
||||||
|
|
||||||
|
|
|
@ -4,4 +4,4 @@
|
||||||
become: true
|
become: true
|
||||||
roles:
|
roles:
|
||||||
- role: kubespray-defaults
|
- role: kubespray-defaults
|
||||||
- role: docker
|
- role: container-engine/docker
|
||||||
|
|
|
@ -80,12 +80,14 @@
|
||||||
template:
|
template:
|
||||||
src: "fedora_docker.repo.j2"
|
src: "fedora_docker.repo.j2"
|
||||||
dest: "{{ yum_repo_dir }}/docker.repo"
|
dest: "{{ yum_repo_dir }}/docker.repo"
|
||||||
|
mode: 0644
|
||||||
when: ansible_distribution == "Fedora" and not is_ostree
|
when: ansible_distribution == "Fedora" and not is_ostree
|
||||||
|
|
||||||
- name: Configure docker repository on RedHat/CentOS/Oracle/AlmaLinux Linux
|
- name: Configure docker repository on RedHat/CentOS/Oracle/AlmaLinux Linux
|
||||||
template:
|
template:
|
||||||
src: "rh_docker.repo.j2"
|
src: "rh_docker.repo.j2"
|
||||||
dest: "{{ yum_repo_dir }}/docker-ce.repo"
|
dest: "{{ yum_repo_dir }}/docker-ce.repo"
|
||||||
|
mode: 0644
|
||||||
when:
|
when:
|
||||||
- ansible_os_family == "RedHat"
|
- ansible_os_family == "RedHat"
|
||||||
- ansible_distribution != "Fedora"
|
- ansible_distribution != "Fedora"
|
||||||
|
@ -145,7 +147,7 @@
|
||||||
state: started
|
state: started
|
||||||
when: docker_task_result is not changed
|
when: docker_task_result is not changed
|
||||||
rescue:
|
rescue:
|
||||||
- debug:
|
- debug: # noqa unnamed-task
|
||||||
msg: "Docker start failed. Try to remove our config"
|
msg: "Docker start failed. Try to remove our config"
|
||||||
- name: remove kubespray generated config
|
- name: remove kubespray generated config
|
||||||
file:
|
file:
|
||||||
|
|
|
@ -3,11 +3,13 @@
|
||||||
file:
|
file:
|
||||||
path: /etc/systemd/system/docker.service.d
|
path: /etc/systemd/system/docker.service.d
|
||||||
state: directory
|
state: directory
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
- name: Write docker proxy drop-in
|
- name: Write docker proxy drop-in
|
||||||
template:
|
template:
|
||||||
src: http-proxy.conf.j2
|
src: http-proxy.conf.j2
|
||||||
dest: /etc/systemd/system/docker.service.d/http-proxy.conf
|
dest: /etc/systemd/system/docker.service.d/http-proxy.conf
|
||||||
|
mode: 0644
|
||||||
notify: restart docker
|
notify: restart docker
|
||||||
when: http_proxy is defined or https_proxy is defined
|
when: http_proxy is defined or https_proxy is defined
|
||||||
|
|
||||||
|
@ -25,6 +27,7 @@
|
||||||
template:
|
template:
|
||||||
src: docker.service.j2
|
src: docker.service.j2
|
||||||
dest: /etc/systemd/system/docker.service
|
dest: /etc/systemd/system/docker.service
|
||||||
|
mode: 0644
|
||||||
register: docker_service_file
|
register: docker_service_file
|
||||||
notify: restart docker
|
notify: restart docker
|
||||||
when:
|
when:
|
||||||
|
@ -35,12 +38,14 @@
|
||||||
template:
|
template:
|
||||||
src: docker-options.conf.j2
|
src: docker-options.conf.j2
|
||||||
dest: "/etc/systemd/system/docker.service.d/docker-options.conf"
|
dest: "/etc/systemd/system/docker.service.d/docker-options.conf"
|
||||||
|
mode: 0644
|
||||||
notify: restart docker
|
notify: restart docker
|
||||||
|
|
||||||
- name: Write docker dns systemd drop-in
|
- name: Write docker dns systemd drop-in
|
||||||
template:
|
template:
|
||||||
src: docker-dns.conf.j2
|
src: docker-dns.conf.j2
|
||||||
dest: "/etc/systemd/system/docker.service.d/docker-dns.conf"
|
dest: "/etc/systemd/system/docker.service.d/docker-dns.conf"
|
||||||
|
mode: 0644
|
||||||
notify: restart docker
|
notify: restart docker
|
||||||
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
|
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
|
||||||
|
|
||||||
|
@ -55,7 +60,9 @@
|
||||||
template:
|
template:
|
||||||
src: docker-orphan-cleanup.conf.j2
|
src: docker-orphan-cleanup.conf.j2
|
||||||
dest: "/etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf"
|
dest: "/etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf"
|
||||||
|
mode: 0644
|
||||||
notify: restart docker
|
notify: restart docker
|
||||||
when: docker_orphan_clean_up | bool
|
when: docker_orphan_clean_up | bool
|
||||||
|
|
||||||
- meta: flush_handlers
|
- name: Flush handlers
|
||||||
|
meta: flush_handlers
|
||||||
|
|
|
@ -7,5 +7,5 @@
|
||||||
container_manager: containerd
|
container_manager: containerd
|
||||||
roles:
|
roles:
|
||||||
- role: kubespray-defaults
|
- role: kubespray-defaults
|
||||||
- role: containerd
|
- role: container-engine/containerd
|
||||||
- role: gvisor
|
- role: container-engine/gvisor
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
roles:
|
roles:
|
||||||
- role: kubespray-defaults
|
- role: kubespray-defaults
|
||||||
- role: bootstrap-os
|
- role: bootstrap-os
|
||||||
- role: ../adduser
|
- role: adduser
|
||||||
user: "{{ addusers.kube }}"
|
user: "{{ addusers.kube }}"
|
||||||
tasks:
|
tasks:
|
||||||
- include_tasks: "../../../../download/tasks/download_file.yml"
|
- include_tasks: "../../../../download/tasks/download_file.yml"
|
||||||
|
@ -20,8 +20,8 @@
|
||||||
kube_network_plugin: cni
|
kube_network_plugin: cni
|
||||||
roles:
|
roles:
|
||||||
- role: kubespray-defaults
|
- role: kubespray-defaults
|
||||||
- role: ../network_plugin/cni
|
- role: network_plugin/cni
|
||||||
- role: crictl
|
- role: container-engine/crictl
|
||||||
tasks:
|
tasks:
|
||||||
- name: Copy test container files
|
- name: Copy test container files
|
||||||
copy:
|
copy:
|
||||||
|
|
|
@ -6,5 +6,5 @@
|
||||||
kata_containers_enabled: true
|
kata_containers_enabled: true
|
||||||
roles:
|
roles:
|
||||||
- role: kubespray-defaults
|
- role: kubespray-defaults
|
||||||
- role: containerd
|
- role: container-engine/containerd
|
||||||
- role: kata-containers
|
- role: container-engine/kata-containers
|
||||||
|
|
|
@ -15,11 +15,13 @@
|
||||||
file:
|
file:
|
||||||
path: "{{ kata_containers_config_dir }}"
|
path: "{{ kata_containers_config_dir }}"
|
||||||
state: directory
|
state: directory
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
- name: kata-containers | Set configuration
|
- name: kata-containers | Set configuration
|
||||||
template:
|
template:
|
||||||
src: "{{ item }}.j2"
|
src: "{{ item }}.j2"
|
||||||
dest: "{{ kata_containers_config_dir }}/{{ item }}"
|
dest: "{{ kata_containers_config_dir }}/{{ item }}"
|
||||||
|
mode: 0644
|
||||||
with_items:
|
with_items:
|
||||||
- configuration-qemu.toml
|
- configuration-qemu.toml
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
# noqa role-name - this is a meta role that doesn't need a name
|
||||||
---
|
---
|
||||||
dependencies:
|
dependencies:
|
||||||
- role: container-engine/kata-containers
|
- role: container-engine/kata-containers
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
when:
|
when:
|
||||||
- not download_always_pull
|
- not download_always_pull
|
||||||
|
|
||||||
- debug:
|
- debug: # noqa unnamed-task
|
||||||
msg: "Pull {{ image_reponame }} required is: {{ pull_required }}"
|
msg: "Pull {{ image_reponame }} required is: {{ pull_required }}"
|
||||||
|
|
||||||
- name: download_container | Determine if image is in cache
|
- name: download_container | Determine if image is in cache
|
||||||
|
|
|
@ -48,6 +48,7 @@
|
||||||
- not download_localhost
|
- not download_localhost
|
||||||
|
|
||||||
# This must always be called, to check if the checksum matches. On no-match the file is re-downloaded.
|
# This must always be called, to check if the checksum matches. On no-match the file is re-downloaded.
|
||||||
|
# This task will avoid logging it's parameters to not leak environment passwords in the log
|
||||||
- name: download_file | Download item
|
- name: download_file | Download item
|
||||||
get_url:
|
get_url:
|
||||||
url: "{{ download.url }}"
|
url: "{{ download.url }}"
|
||||||
|
@ -67,6 +68,7 @@
|
||||||
retries: 4
|
retries: 4
|
||||||
delay: "{{ retry_stagger | default(5) }}"
|
delay: "{{ retry_stagger | default(5) }}"
|
||||||
environment: "{{ proxy_env }}"
|
environment: "{{ proxy_env }}"
|
||||||
|
no_log: true
|
||||||
|
|
||||||
- name: download_file | Copy file back to ansible host file cache
|
- name: download_file | Copy file back to ansible host file cache
|
||||||
synchronize:
|
synchronize:
|
||||||
|
|
|
@ -38,7 +38,7 @@
|
||||||
run_once: true
|
run_once: true
|
||||||
register: test_become
|
register: test_become
|
||||||
changed_when: false
|
changed_when: false
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
become: true
|
become: true
|
||||||
when:
|
when:
|
||||||
- download_localhost
|
- download_localhost
|
||||||
|
@ -53,7 +53,7 @@
|
||||||
run_once: true
|
run_once: true
|
||||||
register: test_docker
|
register: test_docker
|
||||||
changed_when: false
|
changed_when: false
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
become: false
|
become: false
|
||||||
when:
|
when:
|
||||||
- download_localhost
|
- download_localhost
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
template:
|
template:
|
||||||
src: "kubeadm-images.yaml.j2"
|
src: "kubeadm-images.yaml.j2"
|
||||||
dest: "{{ kube_config_dir }}/kubeadm-images.yaml"
|
dest: "{{ kube_config_dir }}/kubeadm-images.yaml"
|
||||||
|
mode: 0644
|
||||||
when:
|
when:
|
||||||
- not skip_kubeadm_images|default(false)
|
- not skip_kubeadm_images|default(false)
|
||||||
|
|
||||||
|
|
|
@ -45,6 +45,7 @@
|
||||||
src: "etcd-{{ etcd_deployment_type }}.service.j2"
|
src: "etcd-{{ etcd_deployment_type }}.service.j2"
|
||||||
dest: /etc/systemd/system/etcd.service
|
dest: /etc/systemd/system/etcd.service
|
||||||
backup: yes
|
backup: yes
|
||||||
|
mode: 0644
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when: is_etcd_master and etcd_cluster_setup
|
||||||
|
|
||||||
- name: Configure | Copy etcd-events.service systemd file
|
- name: Configure | Copy etcd-events.service systemd file
|
||||||
|
@ -52,6 +53,7 @@
|
||||||
src: "etcd-events-{{ etcd_deployment_type }}.service.j2"
|
src: "etcd-events-{{ etcd_deployment_type }}.service.j2"
|
||||||
dest: /etc/systemd/system/etcd-events.service
|
dest: /etc/systemd/system/etcd-events.service
|
||||||
backup: yes
|
backup: yes
|
||||||
|
mode: 0644
|
||||||
when: is_etcd_master and etcd_events_cluster_setup
|
when: is_etcd_master and etcd_events_cluster_setup
|
||||||
|
|
||||||
- name: Configure | reload systemd
|
- name: Configure | reload systemd
|
||||||
|
@ -65,7 +67,7 @@
|
||||||
name: etcd
|
name: etcd
|
||||||
state: started
|
state: started
|
||||||
enabled: yes
|
enabled: yes
|
||||||
ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}"
|
ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}" # noqa ignore-errors
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when: is_etcd_master and etcd_cluster_setup
|
||||||
|
|
||||||
# when scaling new etcd will fail to start
|
# when scaling new etcd will fail to start
|
||||||
|
@ -74,7 +76,7 @@
|
||||||
name: etcd-events
|
name: etcd-events
|
||||||
state: started
|
state: started
|
||||||
enabled: yes
|
enabled: yes
|
||||||
ignore_errors: "{{ etcd_events_cluster_is_healthy.rc == 0 }}"
|
ignore_errors: "{{ etcd_events_cluster_is_healthy.rc != 0 }}" # noqa ignore-errors
|
||||||
when: is_etcd_master and etcd_events_cluster_setup
|
when: is_etcd_master and etcd_events_cluster_setup
|
||||||
|
|
||||||
- name: Configure | Wait for etcd cluster to be healthy
|
- name: Configure | Wait for etcd cluster to be healthy
|
||||||
|
@ -126,7 +128,7 @@
|
||||||
- name: Configure | Check if member is in etcd cluster
|
- name: Configure | Check if member is in etcd cluster
|
||||||
shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}"
|
shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}"
|
||||||
register: etcd_member_in_cluster
|
register: etcd_member_in_cluster
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
changed_when: false
|
changed_when: false
|
||||||
check_mode: no
|
check_mode: no
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when: is_etcd_master and etcd_cluster_setup
|
||||||
|
@ -142,7 +144,7 @@
|
||||||
- name: Configure | Check if member is in etcd-events cluster
|
- name: Configure | Check if member is in etcd-events cluster
|
||||||
shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}"
|
shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}"
|
||||||
register: etcd_events_member_in_cluster
|
register: etcd_events_member_in_cluster
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
changed_when: false
|
changed_when: false
|
||||||
check_mode: no
|
check_mode: no
|
||||||
when: is_etcd_master and etcd_events_cluster_setup
|
when: is_etcd_master and etcd_events_cluster_setup
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
template:
|
template:
|
||||||
src: "openssl.conf.j2"
|
src: "openssl.conf.j2"
|
||||||
dest: "{{ etcd_config_dir }}/openssl.conf"
|
dest: "{{ etcd_config_dir }}/openssl.conf"
|
||||||
|
mode: 0640
|
||||||
run_once: yes
|
run_once: yes
|
||||||
delegate_to: "{{ groups['etcd'][0] }}"
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
template:
|
template:
|
||||||
src: etcd.env.j2
|
src: etcd.env.j2
|
||||||
dest: /etc/etcd.env
|
dest: /etc/etcd.env
|
||||||
|
mode: 0640
|
||||||
notify: restart etcd
|
notify: restart etcd
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when: is_etcd_master and etcd_cluster_setup
|
||||||
|
|
||||||
|
@ -10,5 +11,6 @@
|
||||||
template:
|
template:
|
||||||
src: etcd-events.env.j2
|
src: etcd-events.env.j2
|
||||||
dest: /etc/etcd-events.env
|
dest: /etc/etcd-events.env
|
||||||
|
mode: 0640
|
||||||
notify: restart etcd-events
|
notify: restart etcd-events
|
||||||
when: is_etcd_master and etcd_events_cluster_setup
|
when: is_etcd_master and etcd_events_cluster_setup
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
src: "{{ etcd_cert_dir }}/ca.pem"
|
src: "{{ etcd_cert_dir }}/ca.pem"
|
||||||
dest: "{{ ca_cert_path }}"
|
dest: "{{ ca_cert_path }}"
|
||||||
remote_src: true
|
remote_src: true
|
||||||
|
mode: 0640
|
||||||
register: etcd_ca_cert
|
register: etcd_ca_cert
|
||||||
|
|
||||||
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar) # noqa 503
|
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar) # noqa 503
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
shell: "{{ bin_dir }}/kubectl get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'"
|
shell: "{{ bin_dir }}/kubectl get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'"
|
||||||
register: createdby_annotation
|
register: createdby_annotation
|
||||||
changed_when: false
|
changed_when: false
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
when:
|
when:
|
||||||
- dns_mode in ['coredns', 'coredns_dual']
|
- dns_mode in ['coredns', 'coredns_dual']
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
template:
|
template:
|
||||||
src: "{{ item.file }}.j2"
|
src: "{{ item.file }}.j2"
|
||||||
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||||
|
mode: 0640
|
||||||
register: psp_manifests
|
register: psp_manifests
|
||||||
with_items:
|
with_items:
|
||||||
- {file: psp.yml, type: psp, name: psp}
|
- {file: psp.yml, type: psp, name: psp}
|
||||||
|
@ -61,6 +62,7 @@
|
||||||
template:
|
template:
|
||||||
src: "node-crb.yml.j2"
|
src: "node-crb.yml.j2"
|
||||||
dest: "{{ kube_config_dir }}/node-crb.yml"
|
dest: "{{ kube_config_dir }}/node-crb.yml"
|
||||||
|
mode: 0640
|
||||||
register: node_crb_manifest
|
register: node_crb_manifest
|
||||||
when:
|
when:
|
||||||
- rbac_enabled
|
- rbac_enabled
|
||||||
|
@ -86,6 +88,7 @@
|
||||||
template:
|
template:
|
||||||
src: "node-webhook-cr.yml.j2"
|
src: "node-webhook-cr.yml.j2"
|
||||||
dest: "{{ kube_config_dir }}/node-webhook-cr.yml"
|
dest: "{{ kube_config_dir }}/node-webhook-cr.yml"
|
||||||
|
mode: 0640
|
||||||
register: node_webhook_cr_manifest
|
register: node_webhook_cr_manifest
|
||||||
when:
|
when:
|
||||||
- rbac_enabled
|
- rbac_enabled
|
||||||
|
@ -111,6 +114,7 @@
|
||||||
template:
|
template:
|
||||||
src: "node-webhook-crb.yml.j2"
|
src: "node-webhook-crb.yml.j2"
|
||||||
dest: "{{ kube_config_dir }}/node-webhook-crb.yml"
|
dest: "{{ kube_config_dir }}/node-webhook-crb.yml"
|
||||||
|
mode: 0640
|
||||||
register: node_webhook_crb_manifest
|
register: node_webhook_crb_manifest
|
||||||
when:
|
when:
|
||||||
- rbac_enabled
|
- rbac_enabled
|
||||||
|
@ -139,7 +143,7 @@
|
||||||
- cloud_provider == 'oci'
|
- cloud_provider == 'oci'
|
||||||
|
|
||||||
- name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
|
- name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
|
||||||
copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml
|
copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml mode=0640
|
||||||
when: inventory_hostname == groups['kube_control_plane']|last
|
when: inventory_hostname == groups['kube_control_plane']|last
|
||||||
|
|
||||||
- name: PriorityClass | Create k8s-cluster-critical
|
- name: PriorityClass | Create k8s-cluster-critical
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
copy:
|
copy:
|
||||||
src: "oci-rbac.yml"
|
src: "oci-rbac.yml"
|
||||||
dest: "{{ kube_config_dir }}/oci-rbac.yml"
|
dest: "{{ kube_config_dir }}/oci-rbac.yml"
|
||||||
|
mode: 0640
|
||||||
when:
|
when:
|
||||||
- cloud_provider is defined
|
- cloud_provider is defined
|
||||||
- cloud_provider == 'oci'
|
- cloud_provider == 'oci'
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
- name: CephFS Provisioner | Remove legacy namespace
|
- name: CephFS Provisioner | Remove legacy namespace
|
||||||
shell: |
|
shell: |
|
||||||
{{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
|
{{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
|
||||||
ignore_errors: yes
|
ignore_errors: true # noqa ignore-errors
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
|
@ -21,7 +21,7 @@
|
||||||
- name: CephFS Provisioner | Remove legacy storageclass
|
- name: CephFS Provisioner | Remove legacy storageclass
|
||||||
shell: |
|
shell: |
|
||||||
{{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
|
{{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
|
||||||
ignore_errors: yes
|
ignore_errors: true # noqa ignore-errors
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
- name: RBD Provisioner | Remove legacy namespace
|
- name: RBD Provisioner | Remove legacy namespace
|
||||||
shell: |
|
shell: |
|
||||||
{{ bin_dir }}/kubectl delete namespace {{ rbd_provisioner_namespace }}
|
{{ bin_dir }}/kubectl delete namespace {{ rbd_provisioner_namespace }}
|
||||||
ignore_errors: yes
|
ignore_errors: true # noqa ignore-errrors
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
|
@ -21,7 +21,7 @@
|
||||||
- name: RBD Provisioner | Remove legacy storageclass
|
- name: RBD Provisioner | Remove legacy storageclass
|
||||||
shell: |
|
shell: |
|
||||||
{{ bin_dir }}/kubectl delete storageclass {{ rbd_provisioner_storage_class }}
|
{{ bin_dir }}/kubectl delete storageclass {{ rbd_provisioner_storage_class }}
|
||||||
ignore_errors: yes
|
ignore_errors: true # noqa ignore-errrors
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
|
@ -63,6 +63,7 @@
|
||||||
template:
|
template:
|
||||||
src: "{{ item.file }}.j2"
|
src: "{{ item.file }}.j2"
|
||||||
dest: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.file }}"
|
dest: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.file }}"
|
||||||
|
mode: 0644
|
||||||
with_items: "{{ rbd_provisioner_templates }}"
|
with_items: "{{ rbd_provisioner_templates }}"
|
||||||
register: rbd_provisioner_manifests
|
register: rbd_provisioner_manifests
|
||||||
when: inventory_hostname == groups['kube_control_plane'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
- name: Cert Manager | Remove legacy namespace
|
- name: Cert Manager | Remove legacy namespace
|
||||||
shell: |
|
shell: |
|
||||||
{{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }}
|
{{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }}
|
||||||
ignore_errors: yes
|
ignore_errors: true # noqa ignore-errors
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
|
|
|
@ -55,7 +55,7 @@
|
||||||
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system get secret memberlist"
|
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system get secret memberlist"
|
||||||
register: metallb_secret
|
register: metallb_secret
|
||||||
become: true
|
become: true
|
||||||
ignore_errors: yes
|
ignore_errors: true # noqa ignore-errors
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
|
|
|
@ -12,12 +12,12 @@
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: kube-router | Wait for kube-router pods to be ready
|
- name: kube-router | Wait for kube-router pods to be ready
|
||||||
command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
|
command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 ignore-errors
|
||||||
register: pods_not_ready
|
register: pods_not_ready
|
||||||
until: pods_not_ready.stdout.find("kube-router")==-1
|
until: pods_not_ready.stdout.find("kube-router")==-1
|
||||||
retries: 30
|
retries: 30
|
||||||
delay: 10
|
delay: 10
|
||||||
ignore_errors: yes
|
ignore_errors: true
|
||||||
delegate_to: "{{ groups['kube_control_plane'] | first }}"
|
delegate_to: "{{ groups['kube_control_plane'] | first }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
- apiserver-kubelet-client.key
|
- apiserver-kubelet-client.key
|
||||||
- front-proxy-client.crt
|
- front-proxy-client.crt
|
||||||
- front-proxy-client.key
|
- front-proxy-client.key
|
||||||
ignore_errors: yes
|
ignore_errors: true # noqa ignore-errors
|
||||||
|
|
||||||
- name: Backup old confs
|
- name: Backup old confs
|
||||||
copy:
|
copy:
|
||||||
|
@ -25,4 +25,4 @@
|
||||||
- controller-manager.conf
|
- controller-manager.conf
|
||||||
- kubelet.conf
|
- kubelet.conf
|
||||||
- scheduler.conf
|
- scheduler.conf
|
||||||
ignore_errors: yes
|
ignore_errors: true # noqa ignore-errors
|
||||||
|
|
|
@ -50,18 +50,21 @@
|
||||||
file:
|
file:
|
||||||
path: "{{ audit_policy_file | dirname }}"
|
path: "{{ audit_policy_file | dirname }}"
|
||||||
state: directory
|
state: directory
|
||||||
|
mode: 0640
|
||||||
when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
|
when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
|
||||||
|
|
||||||
- name: Write api audit policy yaml
|
- name: Write api audit policy yaml
|
||||||
template:
|
template:
|
||||||
src: apiserver-audit-policy.yaml.j2
|
src: apiserver-audit-policy.yaml.j2
|
||||||
dest: "{{ audit_policy_file }}"
|
dest: "{{ audit_policy_file }}"
|
||||||
|
mode: 0640
|
||||||
when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
|
when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
|
||||||
|
|
||||||
- name: Write api audit webhook config yaml
|
- name: Write api audit webhook config yaml
|
||||||
template:
|
template:
|
||||||
src: apiserver-audit-webhook-config.yaml.j2
|
src: apiserver-audit-webhook-config.yaml.j2
|
||||||
dest: "{{ audit_webhook_config_file }}"
|
dest: "{{ audit_webhook_config_file }}"
|
||||||
|
mode: 0640
|
||||||
when: kubernetes_audit_webhook|default(false)
|
when: kubernetes_audit_webhook|default(false)
|
||||||
|
|
||||||
# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
|
# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
|
||||||
|
|
|
@ -7,12 +7,14 @@
|
||||||
template:
|
template:
|
||||||
src: webhook-token-auth-config.yaml.j2
|
src: webhook-token-auth-config.yaml.j2
|
||||||
dest: "{{ kube_config_dir }}/webhook-token-auth-config.yaml"
|
dest: "{{ kube_config_dir }}/webhook-token-auth-config.yaml"
|
||||||
|
mode: 0640
|
||||||
when: kube_webhook_token_auth|default(false)
|
when: kube_webhook_token_auth|default(false)
|
||||||
|
|
||||||
- name: Create webhook authorization config
|
- name: Create webhook authorization config
|
||||||
template:
|
template:
|
||||||
src: webhook-authorization-config.yaml.j2
|
src: webhook-authorization-config.yaml.j2
|
||||||
dest: "{{ kube_config_dir }}/webhook-authorization-config.yaml"
|
dest: "{{ kube_config_dir }}/webhook-authorization-config.yaml"
|
||||||
|
mode: 0640
|
||||||
when: kube_webhook_authorization|default(false)
|
when: kube_webhook_authorization|default(false)
|
||||||
|
|
||||||
- name: Create kube-scheduler config
|
- name: Create kube-scheduler config
|
||||||
|
@ -40,7 +42,7 @@
|
||||||
when: ansible_os_family in ["Debian","RedHat"]
|
when: ansible_os_family in ["Debian","RedHat"]
|
||||||
tags:
|
tags:
|
||||||
- kubectl
|
- kubectl
|
||||||
ignore_errors: True
|
ignore_errors: true # noqa ignore-errors
|
||||||
|
|
||||||
- name: Set kubectl bash completion file permissions
|
- name: Set kubectl bash completion file permissions
|
||||||
file:
|
file:
|
||||||
|
@ -52,7 +54,7 @@
|
||||||
tags:
|
tags:
|
||||||
- kubectl
|
- kubectl
|
||||||
- upgrade
|
- upgrade
|
||||||
ignore_errors: True
|
ignore_errors: true # noqa ignore-errors
|
||||||
|
|
||||||
- name: Disable SecurityContextDeny admission-controller and enable PodSecurityPolicy
|
- name: Disable SecurityContextDeny admission-controller and enable PodSecurityPolicy
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -77,12 +79,13 @@
|
||||||
template:
|
template:
|
||||||
src: k8s-certs-renew.sh.j2
|
src: k8s-certs-renew.sh.j2
|
||||||
dest: "{{ bin_dir }}/k8s-certs-renew.sh"
|
dest: "{{ bin_dir }}/k8s-certs-renew.sh"
|
||||||
mode: '755'
|
mode: 0755
|
||||||
|
|
||||||
- name: Renew K8S control plane certificates monthly 1/2
|
- name: Renew K8S control plane certificates monthly 1/2
|
||||||
template:
|
template:
|
||||||
src: "{{ item }}.j2"
|
src: "{{ item }}.j2"
|
||||||
dest: "/etc/systemd/system/{{ item }}"
|
dest: "/etc/systemd/system/{{ item }}"
|
||||||
|
mode: 0644
|
||||||
with_items:
|
with_items:
|
||||||
- k8s-certs-renew.service
|
- k8s-certs-renew.service
|
||||||
- k8s-certs-renew.timer
|
- k8s-certs-renew.timer
|
||||||
|
|
|
@ -61,6 +61,7 @@
|
||||||
src: "kubeadm-client.conf.{{ kubeadmConfig_api_version }}.j2"
|
src: "kubeadm-client.conf.{{ kubeadmConfig_api_version }}.j2"
|
||||||
dest: "{{ kube_config_dir }}/kubeadm-client.conf"
|
dest: "{{ kube_config_dir }}/kubeadm-client.conf"
|
||||||
backup: yes
|
backup: yes
|
||||||
|
mode: 0640
|
||||||
when: not is_kube_master
|
when: not is_kube_master
|
||||||
|
|
||||||
- name: Join to cluster if needed
|
- name: Join to cluster if needed
|
||||||
|
|
|
@ -35,8 +35,10 @@
|
||||||
- node_labels is defined
|
- node_labels is defined
|
||||||
- node_labels is mapping
|
- node_labels is mapping
|
||||||
|
|
||||||
- debug: var=role_node_labels
|
- debug: # noqa unnamed-task
|
||||||
- debug: var=inventory_node_labels
|
var: role_node_labels
|
||||||
|
- debug: # noqa unnamed-task
|
||||||
|
var: inventory_node_labels
|
||||||
|
|
||||||
- name: Set label to node
|
- name: Set label to node
|
||||||
command: >-
|
command: >-
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
src: "kubelet.env.{{ kubeletConfig_api_version }}.j2"
|
src: "kubelet.env.{{ kubeletConfig_api_version }}.j2"
|
||||||
dest: "{{ kube_config_dir }}/kubelet.env"
|
dest: "{{ kube_config_dir }}/kubelet.env"
|
||||||
backup: yes
|
backup: yes
|
||||||
|
mode: 0640
|
||||||
notify: Node | restart kubelet
|
notify: Node | restart kubelet
|
||||||
tags:
|
tags:
|
||||||
- kubelet
|
- kubelet
|
||||||
|
@ -27,6 +28,7 @@
|
||||||
template:
|
template:
|
||||||
src: "kubelet-config.{{ kubeletConfig_api_version }}.yaml.j2"
|
src: "kubelet-config.{{ kubeletConfig_api_version }}.yaml.j2"
|
||||||
dest: "{{ kube_config_dir }}/kubelet-config.yaml"
|
dest: "{{ kube_config_dir }}/kubelet-config.yaml"
|
||||||
|
mode: 0640
|
||||||
notify: Kubelet | restart kubelet
|
notify: Kubelet | restart kubelet
|
||||||
tags:
|
tags:
|
||||||
- kubelet
|
- kubelet
|
||||||
|
@ -37,6 +39,7 @@
|
||||||
src: "kubelet.service.j2"
|
src: "kubelet.service.j2"
|
||||||
dest: "/etc/systemd/system/kubelet.service"
|
dest: "/etc/systemd/system/kubelet.service"
|
||||||
backup: "yes"
|
backup: "yes"
|
||||||
|
mode: 0644
|
||||||
notify: Node | restart kubelet
|
notify: Node | restart kubelet
|
||||||
tags:
|
tags:
|
||||||
- kubelet
|
- kubelet
|
||||||
|
|
|
@ -31,3 +31,4 @@
|
||||||
template:
|
template:
|
||||||
src: manifests/haproxy.manifest.j2
|
src: manifests/haproxy.manifest.j2
|
||||||
dest: "{{ kube_manifest_dir }}/haproxy.yml"
|
dest: "{{ kube_manifest_dir }}/haproxy.yml"
|
||||||
|
mode: 0640
|
||||||
|
|
|
@ -31,3 +31,4 @@
|
||||||
template:
|
template:
|
||||||
src: manifests/nginx-proxy.manifest.j2
|
src: manifests/nginx-proxy.manifest.j2
|
||||||
dest: "{{ kube_manifest_dir }}/nginx-proxy.yml"
|
dest: "{{ kube_manifest_dir }}/nginx-proxy.yml"
|
||||||
|
mode: 0640
|
||||||
|
|
|
@ -57,6 +57,7 @@
|
||||||
file:
|
file:
|
||||||
path: /etc/modules-load.d
|
path: /etc/modules-load.d
|
||||||
state: directory
|
state: directory
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
- name: Enable br_netfilter module
|
- name: Enable br_netfilter module
|
||||||
modprobe:
|
modprobe:
|
||||||
|
@ -68,6 +69,7 @@
|
||||||
copy:
|
copy:
|
||||||
dest: /etc/modules-load.d/kubespray-br_netfilter.conf
|
dest: /etc/modules-load.d/kubespray-br_netfilter.conf
|
||||||
content: br_netfilter
|
content: br_netfilter
|
||||||
|
mode: 0644
|
||||||
when: modinfo_br_netfilter.rc == 0
|
when: modinfo_br_netfilter.rc == 0
|
||||||
|
|
||||||
# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module
|
# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module
|
||||||
|
@ -108,7 +110,7 @@
|
||||||
name: nf_conntrack_ipv4
|
name: nf_conntrack_ipv4
|
||||||
state: present
|
state: present
|
||||||
register: modprobe_nf_conntrack_ipv4
|
register: modprobe_nf_conntrack_ipv4
|
||||||
ignore_errors: yes
|
ignore_errors: true # noqa ignore-errors
|
||||||
when:
|
when:
|
||||||
- kube_proxy_mode == 'ipvs'
|
- kube_proxy_mode == 'ipvs'
|
||||||
tags:
|
tags:
|
||||||
|
@ -117,6 +119,7 @@
|
||||||
- name: Persist ip_vs modules
|
- name: Persist ip_vs modules
|
||||||
copy:
|
copy:
|
||||||
dest: /etc/modules-load.d/kube_proxy-ipvs.conf
|
dest: /etc/modules-load.d/kube_proxy-ipvs.conf
|
||||||
|
mode: 0644
|
||||||
content: |
|
content: |
|
||||||
ip_vs
|
ip_vs
|
||||||
ip_vs_rr
|
ip_vs_rr
|
||||||
|
|
|
@ -16,4 +16,4 @@
|
||||||
- name: Disable swap
|
- name: Disable swap
|
||||||
command: /sbin/swapoff -a
|
command: /sbin/swapoff -a
|
||||||
when: swapon.stdout
|
when: swapon.stdout
|
||||||
ignore_errors: "{{ ansible_check_mode }}"
|
ignore_errors: "{{ ansible_check_mode }}" # noqa ignore-errors
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
owner: kube
|
owner: kube
|
||||||
|
mode: 0755
|
||||||
when: inventory_hostname in groups['k8s_cluster']
|
when: inventory_hostname in groups['k8s_cluster']
|
||||||
become: true
|
become: true
|
||||||
tags:
|
tags:
|
||||||
|
@ -28,6 +29,7 @@
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
owner: root
|
owner: root
|
||||||
|
mode: 0755
|
||||||
when: inventory_hostname in groups['k8s_cluster']
|
when: inventory_hostname in groups['k8s_cluster']
|
||||||
become: true
|
become: true
|
||||||
tags:
|
tags:
|
||||||
|
@ -59,6 +61,7 @@
|
||||||
src: "{{ kube_cert_dir }}"
|
src: "{{ kube_cert_dir }}"
|
||||||
dest: "{{ kube_cert_compat_dir }}"
|
dest: "{{ kube_cert_compat_dir }}"
|
||||||
state: link
|
state: link
|
||||||
|
mode: 0755
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups['k8s_cluster']
|
- inventory_hostname in groups['k8s_cluster']
|
||||||
- kube_cert_dir != kube_cert_compat_dir
|
- kube_cert_dir != kube_cert_compat_dir
|
||||||
|
@ -69,6 +72,7 @@
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
owner: kube
|
owner: kube
|
||||||
|
mode: 0755
|
||||||
with_items:
|
with_items:
|
||||||
- "/etc/cni/net.d"
|
- "/etc/cni/net.d"
|
||||||
- "/opt/cni/bin"
|
- "/opt/cni/bin"
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
create: yes
|
create: yes
|
||||||
backup: yes
|
backup: yes
|
||||||
marker: "# Ansible entries {mark}"
|
marker: "# Ansible entries {mark}"
|
||||||
|
mode: 0644
|
||||||
notify: Preinstall | propagate resolvconf to k8s components
|
notify: Preinstall | propagate resolvconf to k8s components
|
||||||
|
|
||||||
- name: Remove search/domain/nameserver options before block
|
- name: Remove search/domain/nameserver options before block
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
[keyfile]
|
[keyfile]
|
||||||
unmanaged-devices+=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico
|
unmanaged-devices+=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico
|
||||||
dest: /etc/NetworkManager/conf.d/calico.conf
|
dest: /etc/NetworkManager/conf.d/calico.conf
|
||||||
|
mode: 0644
|
||||||
when:
|
when:
|
||||||
- nm_check.rc == 0
|
- nm_check.rc == 0
|
||||||
- kube_network_plugin == "calico"
|
- kube_network_plugin == "calico"
|
||||||
|
@ -32,5 +33,6 @@
|
||||||
[keyfile]
|
[keyfile]
|
||||||
unmanaged-devices+=interface-name:kube-ipvs0;interface-name:nodelocaldns
|
unmanaged-devices+=interface-name:kube-ipvs0;interface-name:nodelocaldns
|
||||||
dest: /etc/NetworkManager/conf.d/k8s.conf
|
dest: /etc/NetworkManager/conf.d/k8s.conf
|
||||||
|
mode: 0644
|
||||||
when: nm_check.rc == 0
|
when: nm_check.rc == 0
|
||||||
notify: Preinstall | reload NetworkManager
|
notify: Preinstall | reload NetworkManager
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
state: present
|
state: present
|
||||||
create: yes
|
create: yes
|
||||||
backup: yes
|
backup: yes
|
||||||
|
mode: 0644
|
||||||
when:
|
when:
|
||||||
- disable_ipv6_dns
|
- disable_ipv6_dns
|
||||||
- not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
|
- not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
|
||||||
|
@ -59,6 +60,7 @@
|
||||||
file:
|
file:
|
||||||
name: "{{ sysctl_file_path | dirname }}"
|
name: "{{ sysctl_file_path | dirname }}"
|
||||||
state: directory
|
state: directory
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
- name: Enable ip forwarding
|
- name: Enable ip forwarding
|
||||||
sysctl:
|
sysctl:
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
backup: yes
|
backup: yes
|
||||||
unsafe_writes: yes
|
unsafe_writes: yes
|
||||||
marker: "# Ansible inventory hosts {mark}"
|
marker: "# Ansible inventory hosts {mark}"
|
||||||
|
mode: 0644
|
||||||
when: populate_inventory_to_hosts_file
|
when: populate_inventory_to_hosts_file
|
||||||
|
|
||||||
- name: Hosts | populate kubernetes loadbalancer address into hosts file
|
- name: Hosts | populate kubernetes loadbalancer address into hosts file
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
insertbefore: BOF
|
insertbefore: BOF
|
||||||
backup: yes
|
backup: yes
|
||||||
marker: "# Ansible entries {mark}"
|
marker: "# Ansible entries {mark}"
|
||||||
|
mode: 0644
|
||||||
notify: Preinstall | propagate resolvconf to k8s components
|
notify: Preinstall | propagate resolvconf to k8s components
|
||||||
when: dhclientconffile is defined
|
when: dhclientconffile is defined
|
||||||
|
|
||||||
|
|
|
@ -91,7 +91,8 @@
|
||||||
|
|
||||||
# We need to make sure the network is restarted early enough so that docker can later pick up the correct system
|
# We need to make sure the network is restarted early enough so that docker can later pick up the correct system
|
||||||
# nameservers and search domains
|
# nameservers and search domains
|
||||||
- meta: flush_handlers
|
- name: Flush handlers
|
||||||
|
meta: flush_handlers
|
||||||
|
|
||||||
- name: Check if we are running inside a Azure VM
|
- name: Check if we are running inside a Azure VM
|
||||||
stat:
|
stat:
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
until: pods_not_ready.stdout.find("cilium")==-1
|
until: pods_not_ready.stdout.find("cilium")==-1
|
||||||
retries: 30
|
retries: 30
|
||||||
delay: 10
|
delay: 10
|
||||||
ignore_errors: yes
|
fail_when: false
|
||||||
when: inventory_hostname == groups['kube_control_plane'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Cilium | Hubble install
|
- name: Cilium | Hubble install
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
slurp:
|
slurp:
|
||||||
src: /etc/cni/net.d/10-kuberouter.conflist
|
src: /etc/cni/net.d/10-kuberouter.conflist
|
||||||
register: cni_config_slurp
|
register: cni_config_slurp
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
|
|
||||||
- name: kube-router | Set cni_config variable
|
- name: kube-router | Set cni_config variable
|
||||||
set_fact:
|
set_fact:
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
- name: Get etcd endpoint health
|
- name: Get etcd endpoint health
|
||||||
command: "{{ bin_dir }}/etcdctl endpoint health"
|
command: "{{ bin_dir }}/etcdctl endpoint health"
|
||||||
register: etcd_endpoint_health
|
register: etcd_endpoint_health
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
changed_when: false
|
changed_when: false
|
||||||
check_mode: no
|
check_mode: no
|
||||||
environment:
|
environment:
|
||||||
|
@ -38,13 +38,13 @@
|
||||||
state: absent
|
state: absent
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
with_items: "{{ groups['broken_etcd'] }}"
|
with_items: "{{ groups['broken_etcd'] }}"
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
when:
|
when:
|
||||||
- groups['broken_etcd']
|
- groups['broken_etcd']
|
||||||
- has_quorum
|
- has_quorum
|
||||||
|
|
||||||
- name: Delete old certificates
|
- name: Delete old certificates
|
||||||
# noqa 302 - rm is ok here for now
|
# noqa 302 ignore-error - rm is ok here for now
|
||||||
shell: "rm {{ etcd_cert_dir }}/*{{ item }}*"
|
shell: "rm {{ etcd_cert_dir }}/*{{ item }}*"
|
||||||
with_items: "{{ groups['broken_etcd'] }}"
|
with_items: "{{ groups['broken_etcd'] }}"
|
||||||
register: delete_old_cerificates
|
register: delete_old_cerificates
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
copy:
|
copy:
|
||||||
src: "{{ etcd_snapshot }}"
|
src: "{{ etcd_snapshot }}"
|
||||||
dest: /tmp/snapshot.db
|
dest: /tmp/snapshot.db
|
||||||
|
mode: 0640
|
||||||
when: etcd_snapshot is defined
|
when: etcd_snapshot is defined
|
||||||
|
|
||||||
- name: Stop etcd
|
- name: Stop etcd
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Delete node # noqa 301
|
- name: Delete node # noqa 301 ignore-errors
|
||||||
command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}"
|
command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}"
|
||||||
delegate_to: "{{ groups['kube_control_plane']|first }}"
|
delegate_to: "{{ groups['kube_control_plane']|first }}"
|
||||||
ignore_errors: yes
|
ignore_errors: true
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
- name: Lookup etcd member id
|
- name: Lookup etcd member id
|
||||||
shell: "{{ bin_dir }}/etcdctl member list | grep {{ node_ip }} | cut -d, -f1"
|
shell: "{{ bin_dir }}/etcdctl member list | grep {{ node_ip }} | cut -d, -f1"
|
||||||
register: etcd_member_id
|
register: etcd_member_id
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
changed_when: false
|
changed_when: false
|
||||||
check_mode: no
|
check_mode: no
|
||||||
tags:
|
tags:
|
||||||
|
|
|
@ -86,7 +86,7 @@
|
||||||
when:
|
when:
|
||||||
- crictl.stat.exists
|
- crictl.stat.exists
|
||||||
- container_manager in ["crio", "containerd"]
|
- container_manager in ["crio", "containerd"]
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
|
|
||||||
- name: reset | force remove all cri containers
|
- name: reset | force remove all cri containers
|
||||||
command: "{{ bin_dir }}/crictl rm -a -f"
|
command: "{{ bin_dir }}/crictl rm -a -f"
|
||||||
|
@ -129,7 +129,7 @@
|
||||||
when:
|
when:
|
||||||
- crictl.stat.exists
|
- crictl.stat.exists
|
||||||
- container_manager == "containerd"
|
- container_manager == "containerd"
|
||||||
ignore_errors: true
|
ignore_errors: true # noqa ignore-errors
|
||||||
|
|
||||||
- block:
|
- block:
|
||||||
- name: reset | force remove all cri pods
|
- name: reset | force remove all cri pods
|
||||||
|
@ -206,7 +206,7 @@
|
||||||
|
|
||||||
- name: Clear IPVS virtual server table
|
- name: Clear IPVS virtual server table
|
||||||
command: "ipvsadm -C"
|
command: "ipvsadm -C"
|
||||||
ignore_errors: yes
|
ignore_errors: true # noqa ignore-errors
|
||||||
when:
|
when:
|
||||||
- kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s_cluster']
|
- kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s_cluster']
|
||||||
|
|
||||||
|
@ -306,7 +306,7 @@
|
||||||
- /etc/modules-load.d/kube_proxy-ipvs.conf
|
- /etc/modules-load.d/kube_proxy-ipvs.conf
|
||||||
- /etc/modules-load.d/kubespray-br_netfilter.conf
|
- /etc/modules-load.d/kubespray-br_netfilter.conf
|
||||||
- /usr/libexec/kubernetes
|
- /usr/libexec/kubernetes
|
||||||
ignore_errors: yes
|
ignore_errors: true # noqa ignore-errors
|
||||||
tags:
|
tags:
|
||||||
- files
|
- files
|
||||||
|
|
||||||
|
@ -333,7 +333,7 @@
|
||||||
- dns
|
- dns
|
||||||
|
|
||||||
- name: reset | include file with reset tasks specific to the network_plugin if exists
|
- name: reset | include file with reset tasks specific to the network_plugin if exists
|
||||||
include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/reset.yml') | realpath }}"
|
include_tasks: "{{ (role_path,'../network_plugin',kube_network_plugin,'tasks/reset.yml') | path_join | realpath }}"
|
||||||
when:
|
when:
|
||||||
- kube_network_plugin in ['flannel', 'cilium', 'kube-router', 'calico']
|
- kube_network_plugin in ['flannel', 'cilium', 'kube-router', 'calico']
|
||||||
tags:
|
tags:
|
||||||
|
|
|
@ -29,10 +29,12 @@
|
||||||
register: patch_kube_proxy_state
|
register: patch_kube_proxy_state
|
||||||
when: current_kube_proxy_state.stdout | trim | lower != "linux"
|
when: current_kube_proxy_state.stdout | trim | lower != "linux"
|
||||||
|
|
||||||
- debug: msg={{ patch_kube_proxy_state.stdout_lines }}
|
- debug: # noqa unnamed-task
|
||||||
|
msg: "{{ patch_kube_proxy_state.stdout_lines }}"
|
||||||
when: patch_kube_proxy_state is not skipped
|
when: patch_kube_proxy_state is not skipped
|
||||||
|
|
||||||
- debug: msg={{ patch_kube_proxy_state.stderr_lines }}
|
- debug: # noqa unnamed-task
|
||||||
|
msg: "{{ patch_kube_proxy_state.stderr_lines }}"
|
||||||
when: patch_kube_proxy_state is not skipped
|
when: patch_kube_proxy_state is not skipped
|
||||||
tags: init
|
tags: init
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -135,6 +135,7 @@
|
||||||
path: "/tmp/{{ archive_dirname }}"
|
path: "/tmp/{{ archive_dirname }}"
|
||||||
dest: "{{ dir|default('.') }}/logs.tar.gz"
|
dest: "{{ dir|default('.') }}/logs.tar.gz"
|
||||||
remove: true
|
remove: true
|
||||||
|
mode: 0640
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
connection: local
|
connection: local
|
||||||
become: false
|
become: false
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
file:
|
file:
|
||||||
state: directory
|
state: directory
|
||||||
path: "{{ images_dir }}"
|
path: "{{ images_dir }}"
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
- name: Download images files
|
- name: Download images files
|
||||||
get_url:
|
get_url:
|
||||||
|
@ -39,6 +40,7 @@
|
||||||
template:
|
template:
|
||||||
src: Dockerfile
|
src: Dockerfile
|
||||||
dest: "{{ images_dir }}/Dockerfile"
|
dest: "{{ images_dir }}/Dockerfile"
|
||||||
|
mode: 0644
|
||||||
|
|
||||||
- name: Create docker images for each OS # noqa 301
|
- name: Create docker images for each OS # noqa 301
|
||||||
command: docker build -t {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
|
command: docker build -t {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
|
||||||
|
|
|
@ -22,3 +22,4 @@
|
||||||
template:
|
template:
|
||||||
src: ../templates/inventory-aws.j2 # noqa 404 CI inventory templates are not in role_path
|
src: ../templates/inventory-aws.j2 # noqa 404 CI inventory templates are not in role_path
|
||||||
dest: "{{ inventory_path }}"
|
dest: "{{ inventory_path }}"
|
||||||
|
mode: 0644
|
||||||
|
|
|
@ -79,7 +79,7 @@
|
||||||
register: droplets
|
register: droplets
|
||||||
with_items: "{{ instance_names }}"
|
with_items: "{{ instance_names }}"
|
||||||
|
|
||||||
- debug:
|
- debug: # noqa unnamed-task
|
||||||
msg: "{{ droplets }}, {{ inventory_path }}"
|
msg: "{{ droplets }}, {{ inventory_path }}"
|
||||||
when: state == 'present'
|
when: state == 'present'
|
||||||
|
|
||||||
|
@ -87,4 +87,5 @@
|
||||||
template:
|
template:
|
||||||
src: ../templates/inventory-do.j2 # noqa 404 CI templates are not in role_path
|
src: ../templates/inventory-do.j2 # noqa 404 CI templates are not in role_path
|
||||||
dest: "{{ inventory_path }}"
|
dest: "{{ inventory_path }}"
|
||||||
|
mode: 0644
|
||||||
when: state == 'present'
|
when: state == 'present'
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
|
|
||||||
- name: Create gce instances
|
- name: Create gce instances
|
||||||
gce:
|
google.cloud.gcp_compute_instance:
|
||||||
instance_names: "{{ instance_names }}"
|
instance_names: "{{ instance_names }}"
|
||||||
machine_type: "{{ cloud_machine_type }}"
|
machine_type: "{{ cloud_machine_type }}"
|
||||||
image: "{{ cloud_image | default(omit) }}"
|
image: "{{ cloud_image | default(omit) }}"
|
||||||
|
@ -53,17 +53,20 @@
|
||||||
template:
|
template:
|
||||||
src: ../templates/inventory-gce.j2
|
src: ../templates/inventory-gce.j2
|
||||||
dest: "{{ inventory_path }}"
|
dest: "{{ inventory_path }}"
|
||||||
|
mode: 0644
|
||||||
|
|
||||||
- name: Make group_vars directory
|
- name: Make group_vars directory
|
||||||
file:
|
file:
|
||||||
path: "{{ inventory_path|dirname }}/group_vars"
|
path: "{{ inventory_path|dirname }}/group_vars"
|
||||||
state: directory
|
state: directory
|
||||||
|
mode: 0755
|
||||||
when: mode in ['scale', 'separate-scale', 'ha-scale']
|
when: mode in ['scale', 'separate-scale', 'ha-scale']
|
||||||
|
|
||||||
- name: Template fake hosts group vars # noqa 404 CI templates are not in role_path
|
- name: Template fake hosts group vars # noqa 404 CI templates are not in role_path
|
||||||
template:
|
template:
|
||||||
src: ../templates/fake_hosts.yml.j2
|
src: ../templates/fake_hosts.yml.j2
|
||||||
dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"
|
dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"
|
||||||
|
mode: 0644
|
||||||
when: mode in ['scale', 'separate-scale', 'ha-scale']
|
when: mode in ['scale', 'separate-scale', 'ha-scale']
|
||||||
|
|
||||||
- name: Delete group_vars directory
|
- name: Delete group_vars directory
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
|
|
||||||
- name: stop gce instances
|
- name: stop gce instances
|
||||||
gce:
|
google.cloud.gcp_compute_instance:
|
||||||
instance_names: "{{ instance_names }}"
|
instance_names: "{{ instance_names }}"
|
||||||
image: "{{ cloud_image | default(omit) }}"
|
image: "{{ cloud_image | default(omit) }}"
|
||||||
service_account_email: "{{ gce_service_account_email }}"
|
service_account_email: "{{ gce_service_account_email }}"
|
||||||
|
@ -34,7 +34,7 @@
|
||||||
register: gce
|
register: gce
|
||||||
|
|
||||||
- name: delete gce instances
|
- name: delete gce instances
|
||||||
gce:
|
google.cloud.gcp_compute_instance:
|
||||||
instance_names: "{{ instance_names }}"
|
instance_names: "{{ instance_names }}"
|
||||||
image: "{{ cloud_image | default(omit) }}"
|
image: "{{ cloud_image | default(omit) }}"
|
||||||
service_account_email: "{{ gce_service_account_email }}"
|
service_account_email: "{{ gce_service_account_email }}"
|
||||||
|
|
|
@ -12,11 +12,13 @@
|
||||||
file:
|
file:
|
||||||
path: "/tmp/{{ test_name }}"
|
path: "/tmp/{{ test_name }}"
|
||||||
state: directory
|
state: directory
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
- name: Template vm files for CI job
|
- name: Template vm files for CI job
|
||||||
template:
|
template:
|
||||||
src: "vm.yml.j2"
|
src: "vm.yml.j2"
|
||||||
dest: "/tmp/{{ test_name }}/instance-{{ vm_id }}.yml"
|
dest: "/tmp/{{ test_name }}/instance-{{ vm_id }}.yml"
|
||||||
|
mode: 0644
|
||||||
loop: "{{ range(1, vm_count|int + 1, 1) | list }}"
|
loop: "{{ range(1, vm_count|int + 1, 1) | list }}"
|
||||||
loop_control:
|
loop_control:
|
||||||
index_var: vm_id
|
index_var: vm_id
|
||||||
|
@ -47,5 +49,6 @@
|
||||||
template:
|
template:
|
||||||
src: "inventory.j2"
|
src: "inventory.j2"
|
||||||
dest: "{{ inventory_path }}"
|
dest: "{{ inventory_path }}"
|
||||||
|
mode: 0644
|
||||||
vars:
|
vars:
|
||||||
vms: "{{ vm_ips }}"
|
vms: "{{ vm_ips }}"
|
||||||
|
|
|
@ -33,11 +33,13 @@
|
||||||
template:
|
template:
|
||||||
src: gcs_life.json.j2
|
src: gcs_life.json.j2
|
||||||
dest: "{{ dir }}/gcs_life.json"
|
dest: "{{ dir }}/gcs_life.json"
|
||||||
|
mode: 0644
|
||||||
|
|
||||||
- name: Create a boto config to access GCS
|
- name: Create a boto config to access GCS
|
||||||
template:
|
template:
|
||||||
src: boto.j2
|
src: boto.j2
|
||||||
dest: "{{ dir }}/.boto"
|
dest: "{{ dir }}/.boto"
|
||||||
|
mode: 0640
|
||||||
no_log: True
|
no_log: True
|
||||||
|
|
||||||
- name: Download gsutil cp installer
|
- name: Download gsutil cp installer
|
||||||
|
@ -74,5 +76,5 @@
|
||||||
failed_when: false
|
failed_when: false
|
||||||
no_log: True
|
no_log: True
|
||||||
|
|
||||||
- debug:
|
- debug: # noqa unnamed-task
|
||||||
msg: "A public url https://storage.googleapis.com/{{ test_name }}/{{ file_name }}"
|
msg: "A public url https://storage.googleapis.com/{{ test_name }}/{{ file_name }}"
|
||||||
|
|
|
@ -4,7 +4,7 @@ apache-libcloud==2.2.1
|
||||||
tox==3.11.1
|
tox==3.11.1
|
||||||
dopy==0.3.7
|
dopy==0.3.7
|
||||||
cryptography==2.8
|
cryptography==2.8
|
||||||
ansible-lint==4.2.0
|
ansible-lint==5.0.11
|
||||||
openshift==0.8.8
|
openshift==0.8.8
|
||||||
molecule==3.0.6
|
molecule==3.0.6
|
||||||
molecule-vagrant==0.3
|
molecule-vagrant==0.3
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -euxo pipefail
|
set -euxo pipefail
|
||||||
|
|
||||||
|
/usr/bin/python -m pip uninstall -y ansible
|
||||||
/usr/bin/python -m pip install -r tests/requirements.txt
|
/usr/bin/python -m pip install -r tests/requirements.txt
|
||||||
mkdir -p /.ssh
|
mkdir -p /.ssh
|
||||||
mkdir -p cluster-dump
|
mkdir -p cluster-dump
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
status_code: 200
|
status_code: 200
|
||||||
register: apiserver_response
|
register: apiserver_response
|
||||||
|
|
||||||
- debug:
|
- debug: # noqa unnamed-task
|
||||||
msg: "{{ apiserver_response.json }}"
|
msg: "{{ apiserver_response.json }}"
|
||||||
|
|
||||||
- name: Check API servers version
|
- name: Check API servers version
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
bin_dir: "/usr/local/bin"
|
bin_dir: "/usr/local/bin"
|
||||||
when: not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
|
when: not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
|
||||||
|
|
||||||
- import_role:
|
- import_role: # noqa unnamed-task
|
||||||
name: cluster-dump
|
name: cluster-dump
|
||||||
|
|
||||||
- name: Check kubectl output
|
- name: Check kubectl output
|
||||||
|
@ -21,7 +21,7 @@
|
||||||
register: get_nodes
|
register: get_nodes
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- debug:
|
- debug: # noqa unnamed-task
|
||||||
msg: "{{ get_nodes.stdout.split('\n') }}"
|
msg: "{{ get_nodes.stdout.split('\n') }}"
|
||||||
|
|
||||||
- name: Check that all nodes are running and ready
|
- name: Check that all nodes are running and ready
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
bin_dir: "/usr/local/bin"
|
bin_dir: "/usr/local/bin"
|
||||||
when: not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
|
when: not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
|
||||||
|
|
||||||
- import_role:
|
- import_role: # noqa unnamed-task
|
||||||
name: cluster-dump
|
name: cluster-dump
|
||||||
|
|
||||||
- name: Check kubectl output
|
- name: Check kubectl output
|
||||||
|
@ -21,7 +21,7 @@
|
||||||
register: get_pods
|
register: get_pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- debug:
|
- debug: # noqa unnamed-task
|
||||||
msg: "{{ get_pods.stdout.split('\n') }}"
|
msg: "{{ get_pods.stdout.split('\n') }}"
|
||||||
|
|
||||||
- name: Check that all pods are running and ready
|
- name: Check that all pods are running and ready
|
||||||
|
@ -44,6 +44,6 @@
|
||||||
register: get_pods
|
register: get_pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- debug:
|
- debug: # noqa unnamed-task
|
||||||
msg: "{{ get_pods.stdout.split('\n') }}"
|
msg: "{{ get_pods.stdout.split('\n') }}"
|
||||||
failed_when: not run_pods_log is success
|
failed_when: not run_pods_log is success
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue