Resolve ansible-lint name errors (#10253)
* project: fix ansible-lint name Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> * project: ignore jinja template error in names Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> * project: capitalize ansible name Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> * project: update notify after name capitalization Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> --------- Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>pull/10312/head
parent
b9e3861385
commit
36e5d742dc
|
@ -22,18 +22,7 @@ skip_list:
|
||||||
# (Disabled in Feb 2023)
|
# (Disabled in Feb 2023)
|
||||||
- 'fqcn-builtins'
|
- 'fqcn-builtins'
|
||||||
|
|
||||||
# names should start with an uppercase letter
|
# We use template in names
|
||||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
|
||||||
- 'name[casing]'
|
|
||||||
|
|
||||||
# Everything should be named
|
|
||||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
|
||||||
- 'name[play]'
|
|
||||||
- 'name[missing]'
|
|
||||||
|
|
||||||
# templates should only be at the end of 'name'
|
|
||||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
|
||||||
- 'name[jinja]'
|
|
||||||
- 'name[template]'
|
- 'name[template]'
|
||||||
|
|
||||||
# order of keys errors
|
# order of keys errors
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Generate Azure inventory
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- generate-inventory
|
- generate-inventory
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Generate Azure inventory
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- generate-inventory_2
|
- generate-inventory_2
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Generate Azure templates
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- generate-templates
|
- generate-templates
|
||||||
|
|
|
@ -1,9 +1,11 @@
|
||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Create nodes as docker containers
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- { role: dind-host }
|
- { role: dind-host }
|
||||||
|
|
||||||
- hosts: containers
|
- name: Customize each node containers
|
||||||
|
hosts: containers
|
||||||
roles:
|
roles:
|
||||||
- { role: dind-cluster }
|
- { role: dind-cluster }
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
---
|
---
|
||||||
- name: set_fact distro_setup
|
- name: Set_fact distro_setup
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||||
|
|
||||||
- name: set_fact other distro settings
|
- name: Set_fact other distro settings
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_user: "{{ distro_setup['user'] }}"
|
distro_user: "{{ distro_setup['user'] }}"
|
||||||
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
||||||
|
@ -66,7 +66,7 @@
|
||||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||||
mode: 0640
|
mode: 0640
|
||||||
|
|
||||||
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
- name: "Add my pubkey to {{ distro_user }} user authorized keys"
|
||||||
ansible.posix.authorized_key:
|
ansible.posix.authorized_key:
|
||||||
user: "{{ distro_user }}"
|
user: "{{ distro_user }}"
|
||||||
state: present
|
state: present
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
---
|
---
|
||||||
- name: set_fact distro_setup
|
- name: Set_fact distro_setup
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||||
|
|
||||||
- name: set_fact other distro settings
|
- name: Set_fact other distro settings
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_image: "{{ distro_setup['image'] }}"
|
distro_image: "{{ distro_setup['image'] }}"
|
||||||
distro_init: "{{ distro_setup['init'] }}"
|
distro_init: "{{ distro_setup['init'] }}"
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Prepare Hypervisor to later install kubespray VMs
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
become: yes
|
become: yes
|
||||||
vars:
|
vars:
|
||||||
|
|
|
@ -22,9 +22,9 @@
|
||||||
- ntp
|
- ntp
|
||||||
when: ansible_os_family == "Debian"
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
# Create deployment user if required
|
- name: Create deployment user if required
|
||||||
- include_tasks: user.yml
|
include_tasks: user.yml
|
||||||
when: k8s_deployment_user is defined
|
when: k8s_deployment_user is defined
|
||||||
|
|
||||||
# Set proper sysctl values
|
- name: Set proper sysctl values
|
||||||
- import_tasks: sysctl.yml
|
import_tasks: sysctl.yml
|
||||||
|
|
|
@ -2,7 +2,8 @@
|
||||||
- name: Check ansible version
|
- name: Check ansible version
|
||||||
import_playbook: kubernetes_sigs.kubespray.ansible_version
|
import_playbook: kubernetes_sigs.kubespray.ansible_version
|
||||||
|
|
||||||
- hosts: localhost
|
- name: Install mitogen
|
||||||
|
hosts: localhost
|
||||||
strategy: linear
|
strategy: linear
|
||||||
vars:
|
vars:
|
||||||
mitogen_version: 0.3.2
|
mitogen_version: 0.3.2
|
||||||
|
@ -19,24 +20,24 @@
|
||||||
- "{{ playbook_dir }}/plugins/mitogen"
|
- "{{ playbook_dir }}/plugins/mitogen"
|
||||||
- "{{ playbook_dir }}/dist"
|
- "{{ playbook_dir }}/dist"
|
||||||
|
|
||||||
- name: download mitogen release
|
- name: Download mitogen release
|
||||||
get_url:
|
get_url:
|
||||||
url: "{{ mitogen_url }}"
|
url: "{{ mitogen_url }}"
|
||||||
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||||
validate_certs: true
|
validate_certs: true
|
||||||
mode: 0644
|
mode: 0644
|
||||||
|
|
||||||
- name: extract archive
|
- name: Extract archive
|
||||||
unarchive:
|
unarchive:
|
||||||
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||||
dest: "{{ playbook_dir }}/dist/"
|
dest: "{{ playbook_dir }}/dist/"
|
||||||
|
|
||||||
- name: copy plugin
|
- name: Copy plugin
|
||||||
ansible.posix.synchronize:
|
ansible.posix.synchronize:
|
||||||
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
||||||
dest: "{{ playbook_dir }}/plugins/mitogen"
|
dest: "{{ playbook_dir }}/plugins/mitogen"
|
||||||
|
|
||||||
- name: add strategy to ansible.cfg
|
- name: Add strategy to ansible.cfg
|
||||||
community.general.ini_file:
|
community.general.ini_file:
|
||||||
path: ansible.cfg
|
path: ansible.cfg
|
||||||
mode: 0644
|
mode: 0644
|
||||||
|
|
|
@ -1,24 +1,29 @@
|
||||||
---
|
---
|
||||||
- hosts: gfs-cluster
|
- name: Bootstrap hosts
|
||||||
|
hosts: gfs-cluster
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
vars:
|
vars:
|
||||||
ansible_ssh_pipelining: false
|
ansible_ssh_pipelining: false
|
||||||
roles:
|
roles:
|
||||||
- { role: bootstrap-os, tags: bootstrap-os}
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
- hosts: all
|
- name: Gather facts
|
||||||
|
hosts: all
|
||||||
gather_facts: true
|
gather_facts: true
|
||||||
|
|
||||||
- hosts: gfs-cluster
|
- name: Install glusterfs server
|
||||||
|
hosts: gfs-cluster
|
||||||
vars:
|
vars:
|
||||||
ansible_ssh_pipelining: true
|
ansible_ssh_pipelining: true
|
||||||
roles:
|
roles:
|
||||||
- { role: glusterfs/server }
|
- { role: glusterfs/server }
|
||||||
|
|
||||||
- hosts: k8s_cluster
|
- name: Install glusterfs servers
|
||||||
|
hosts: k8s_cluster
|
||||||
roles:
|
roles:
|
||||||
- { role: glusterfs/client }
|
- { role: glusterfs/client }
|
||||||
|
|
||||||
- hosts: kube_control_plane[0]
|
- name: Configure Kubernetes to use glusterfs
|
||||||
|
hosts: kube_control_plane[0]
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes-pv }
|
- { role: kubernetes-pv }
|
||||||
|
|
|
@ -3,10 +3,12 @@
|
||||||
# hyperkube and needs to be installed as part of the system.
|
# hyperkube and needs to be installed as part of the system.
|
||||||
|
|
||||||
# Setup/install tasks.
|
# Setup/install tasks.
|
||||||
- include_tasks: setup-RedHat.yml
|
- name: Setup RedHat distros for glusterfs
|
||||||
|
include_tasks: setup-RedHat.yml
|
||||||
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
||||||
|
|
||||||
- include_tasks: setup-Debian.yml
|
- name: Setup Debian distros for glusterfs
|
||||||
|
include_tasks: setup-Debian.yml
|
||||||
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
||||||
|
|
||||||
- name: Ensure Gluster mount directories exist.
|
- name: Ensure Gluster mount directories exist.
|
||||||
|
|
|
@ -4,13 +4,13 @@
|
||||||
include_vars: "{{ ansible_os_family }}.yml"
|
include_vars: "{{ ansible_os_family }}.yml"
|
||||||
|
|
||||||
# Install xfs package
|
# Install xfs package
|
||||||
- name: install xfs Debian
|
- name: Install xfs Debian
|
||||||
apt:
|
apt:
|
||||||
name: xfsprogs
|
name: xfsprogs
|
||||||
state: present
|
state: present
|
||||||
when: ansible_os_family == "Debian"
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
- name: install xfs RedHat
|
- name: Install xfs RedHat
|
||||||
package:
|
package:
|
||||||
name: xfsprogs
|
name: xfsprogs
|
||||||
state: present
|
state: present
|
||||||
|
@ -23,7 +23,7 @@
|
||||||
dev: "{{ disk_volume_device_1 }}"
|
dev: "{{ disk_volume_device_1 }}"
|
||||||
|
|
||||||
# Mount external volumes
|
# Mount external volumes
|
||||||
- name: mounting new xfs filesystem
|
- name: Mounting new xfs filesystem
|
||||||
ansible.posix.mount:
|
ansible.posix.mount:
|
||||||
name: "{{ gluster_volume_node_mount_dir }}"
|
name: "{{ gluster_volume_node_mount_dir }}"
|
||||||
src: "{{ disk_volume_device_1 }}"
|
src: "{{ disk_volume_device_1 }}"
|
||||||
|
@ -31,10 +31,12 @@
|
||||||
state: mounted
|
state: mounted
|
||||||
|
|
||||||
# Setup/install tasks.
|
# Setup/install tasks.
|
||||||
- include_tasks: setup-RedHat.yml
|
- name: Setup RedHat distros for glusterfs
|
||||||
|
include_tasks: setup-RedHat.yml
|
||||||
when: ansible_os_family == 'RedHat'
|
when: ansible_os_family == 'RedHat'
|
||||||
|
|
||||||
- include_tasks: setup-Debian.yml
|
- name: Setup Debian distros for glusterfs
|
||||||
|
include_tasks: setup-Debian.yml
|
||||||
when: ansible_os_family == 'Debian'
|
when: ansible_os_family == 'Debian'
|
||||||
|
|
||||||
- name: Ensure GlusterFS is started and enabled at boot.
|
- name: Ensure GlusterFS is started and enabled at boot.
|
||||||
|
|
|
@ -1,9 +1,11 @@
|
||||||
---
|
---
|
||||||
- hosts: kube_control_plane[0]
|
- name: Tear down heketi
|
||||||
|
hosts: kube_control_plane[0]
|
||||||
roles:
|
roles:
|
||||||
- { role: tear-down }
|
- { role: tear-down }
|
||||||
|
|
||||||
- hosts: heketi-node
|
- name: Teardown disks in heketi
|
||||||
|
hosts: heketi-node
|
||||||
become: yes
|
become: yes
|
||||||
roles:
|
roles:
|
||||||
- { role: tear-down-disks }
|
- { role: tear-down-disks }
|
||||||
|
|
|
@ -1,9 +1,11 @@
|
||||||
---
|
---
|
||||||
- hosts: heketi-node
|
- name: Prepare heketi install
|
||||||
|
hosts: heketi-node
|
||||||
roles:
|
roles:
|
||||||
- { role: prepare }
|
- { role: prepare }
|
||||||
|
|
||||||
- hosts: kube_control_plane[0]
|
- name: Provision heketi
|
||||||
|
hosts: kube_control_plane[0]
|
||||||
tags:
|
tags:
|
||||||
- "provision"
|
- "provision"
|
||||||
roles:
|
roles:
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
---
|
---
|
||||||
- name: "stop port forwarding"
|
- name: "Stop port forwarding"
|
||||||
command: "killall "
|
command: "killall "
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Collect container images for offline deployment
|
||||||
|
hosts: localhost
|
||||||
become: no
|
become: no
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
|
@ -11,7 +12,8 @@
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
# Generate files.list and images.list files from templates.
|
# Generate files.list and images.list files from templates.
|
||||||
- template:
|
- name: Collect container images for offline deployment
|
||||||
|
template:
|
||||||
src: ./contrib/offline/temp/{{ item }}.list.template
|
src: ./contrib/offline/temp/{{ item }}.list.template
|
||||||
dest: ./contrib/offline/temp/{{ item }}.list
|
dest: ./contrib/offline/temp/{{ item }}.list
|
||||||
mode: 0644
|
mode: 0644
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
---
|
---
|
||||||
- hosts: all
|
- name: Disable firewalld/ufw
|
||||||
|
hosts: all
|
||||||
roles:
|
roles:
|
||||||
- { role: prepare }
|
- { role: prepare }
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
- block:
|
- name: Disable firewalld and ufw
|
||||||
|
block:
|
||||||
- name: List services
|
- name: List services
|
||||||
service_facts:
|
service_facts:
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
- hosts: kube_node:kube_control_plane
|
- name: Remove old cloud provider config
|
||||||
|
hosts: kube_node:kube_control_plane
|
||||||
tasks:
|
tasks:
|
||||||
- name: Remove old cloud provider config
|
- name: Remove old cloud provider config
|
||||||
file:
|
file:
|
||||||
|
@ -7,7 +8,8 @@
|
||||||
state: absent
|
state: absent
|
||||||
with_items:
|
with_items:
|
||||||
- /etc/kubernetes/cloud_config
|
- /etc/kubernetes/cloud_config
|
||||||
- hosts: kube_control_plane[0]
|
- name: Migrate intree Cinder PV
|
||||||
|
hosts: kube_control_plane[0]
|
||||||
tasks:
|
tasks:
|
||||||
- name: Include kubespray-default variables
|
- name: Include kubespray-default variables
|
||||||
include_vars: ../roles/kubespray-defaults/defaults/main.yaml
|
include_vars: ../roles/kubespray-defaults/defaults/main.yaml
|
||||||
|
|
|
@ -10,13 +10,15 @@
|
||||||
### In most cases, you probably want to use upgrade-cluster.yml playbook and
|
### In most cases, you probably want to use upgrade-cluster.yml playbook and
|
||||||
### not this one.
|
### not this one.
|
||||||
|
|
||||||
- hosts: localhost
|
- name: Setup ssh config to use the bastion
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||||
|
|
||||||
- hosts: k8s_cluster:etcd:calico_rr
|
- name: Bootstrap hosts OS for Ansible
|
||||||
|
hosts: k8s_cluster:etcd:calico_rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
vars:
|
vars:
|
||||||
|
@ -27,7 +29,8 @@
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bootstrap-os, tags: bootstrap-os}
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
- hosts: k8s_cluster:etcd:calico_rr
|
- name: Preinstall
|
||||||
|
hosts: k8s_cluster:etcd:calico_rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
- hosts: all
|
- name: Wait for cloud-init to finish
|
||||||
|
hosts: all
|
||||||
tasks:
|
tasks:
|
||||||
- name: Wait for cloud-init to finish
|
- name: Wait for cloud-init to finish
|
||||||
command: cloud-init status --wait
|
command: cloud-init status --wait
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Check Ansible version
|
||||||
|
hosts: localhost
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
become: no
|
become: no
|
||||||
vars:
|
vars:
|
||||||
|
|
|
@ -5,7 +5,8 @@
|
||||||
- name: Ensure compatibility with old groups
|
- name: Ensure compatibility with old groups
|
||||||
import_playbook: legacy_groups.yml
|
import_playbook: legacy_groups.yml
|
||||||
|
|
||||||
- hosts: bastion[0]
|
- name: Install bastion ssh config
|
||||||
|
hosts: bastion[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
|
@ -16,7 +17,8 @@
|
||||||
tags: always
|
tags: always
|
||||||
import_playbook: facts.yml
|
import_playbook: facts.yml
|
||||||
|
|
||||||
- hosts: k8s_cluster:etcd
|
- name: Prepare for etcd install
|
||||||
|
hosts: k8s_cluster:etcd
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -26,7 +28,8 @@
|
||||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
|
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
|
||||||
- { role: download, tags: download, when: "not skip_downloads" }
|
- { role: download, tags: download, when: "not skip_downloads" }
|
||||||
|
|
||||||
- hosts: etcd:kube_control_plane
|
- name: Install etcd
|
||||||
|
hosts: etcd:kube_control_plane
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -39,7 +42,8 @@
|
||||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||||
when: etcd_deployment_type != "kubeadm"
|
when: etcd_deployment_type != "kubeadm"
|
||||||
|
|
||||||
- hosts: k8s_cluster
|
- name: Install etcd certs on nodes if required
|
||||||
|
hosts: k8s_cluster
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -55,7 +59,8 @@
|
||||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||||
|
|
||||||
- hosts: k8s_cluster
|
- name: Install Kubernetes nodes
|
||||||
|
hosts: k8s_cluster
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -63,7 +68,8 @@
|
||||||
- { role: kubespray-defaults }
|
- { role: kubespray-defaults }
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
|
|
||||||
- hosts: kube_control_plane
|
- name: Install the control plane
|
||||||
|
hosts: kube_control_plane
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -73,7 +79,8 @@
|
||||||
- { role: kubernetes/client, tags: client }
|
- { role: kubernetes/client, tags: client }
|
||||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||||
|
|
||||||
- hosts: k8s_cluster
|
- name: Invoke kubeadm and install a CNI
|
||||||
|
hosts: k8s_cluster
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -84,7 +91,8 @@
|
||||||
- { role: network_plugin, tags: network }
|
- { role: network_plugin, tags: network }
|
||||||
- { role: kubernetes-apps/kubelet-csr-approver, tags: kubelet-csr-approver }
|
- { role: kubernetes-apps/kubelet-csr-approver, tags: kubelet-csr-approver }
|
||||||
|
|
||||||
- hosts: calico_rr
|
- name: Install Calico Route Reflector
|
||||||
|
hosts: calico_rr
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -92,7 +100,8 @@
|
||||||
- { role: kubespray-defaults }
|
- { role: kubespray-defaults }
|
||||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
|
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
|
||||||
|
|
||||||
- hosts: kube_control_plane[0]
|
- name: Patch Kubernetes for Windows
|
||||||
|
hosts: kube_control_plane[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -100,7 +109,8 @@
|
||||||
- { role: kubespray-defaults }
|
- { role: kubespray-defaults }
|
||||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
||||||
|
|
||||||
- hosts: kube_control_plane
|
- name: Install Kubernetes apps
|
||||||
|
hosts: kube_control_plane
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
- hosts: k8s_cluster:etcd:calico_rr
|
- name: Bootstrap hosts for Ansible
|
||||||
|
hosts: k8s_cluster:etcd:calico_rr
|
||||||
strategy: linear
|
strategy: linear
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
tags: always
|
tags: always
|
||||||
tasks:
|
tasks:
|
||||||
- name: add nodes to kube_control_plane group
|
- name: Add nodes to kube_control_plane group
|
||||||
group_by:
|
group_by:
|
||||||
key: 'kube_control_plane'
|
key: 'kube_control_plane'
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
tags: always
|
tags: always
|
||||||
tasks:
|
tasks:
|
||||||
- name: add nodes to kube_node group
|
- name: Add nodes to kube_node group
|
||||||
group_by:
|
group_by:
|
||||||
key: 'kube_node'
|
key: 'kube_node'
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
tags: always
|
tags: always
|
||||||
tasks:
|
tasks:
|
||||||
- name: add nodes to k8s_cluster group
|
- name: Add nodes to k8s_cluster group
|
||||||
group_by:
|
group_by:
|
||||||
key: 'k8s_cluster'
|
key: 'k8s_cluster'
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
tags: always
|
tags: always
|
||||||
tasks:
|
tasks:
|
||||||
- name: add nodes to calico_rr group
|
- name: Add nodes to calico_rr group
|
||||||
group_by:
|
group_by:
|
||||||
key: 'calico_rr'
|
key: 'calico_rr'
|
||||||
|
|
||||||
|
@ -42,6 +42,6 @@
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
tags: always
|
tags: always
|
||||||
tasks:
|
tasks:
|
||||||
- name: add nodes to no-floating group
|
- name: Add nodes to no-floating group
|
||||||
group_by:
|
group_by:
|
||||||
key: 'no_floating'
|
key: 'no_floating'
|
||||||
|
|
|
@ -5,29 +5,34 @@
|
||||||
- name: Ensure compatibility with old groups
|
- name: Ensure compatibility with old groups
|
||||||
import_playbook: legacy_groups.yml
|
import_playbook: legacy_groups.yml
|
||||||
|
|
||||||
- hosts: bastion[0]
|
- name: Install bastion ssh config
|
||||||
|
hosts: bastion[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||||
|
|
||||||
- hosts: etcd[0]
|
- name: Recover etcd
|
||||||
|
hosts: etcd[0]
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- role: recover_control_plane/etcd
|
- role: recover_control_plane/etcd
|
||||||
when: etcd_deployment_type != "kubeadm"
|
when: etcd_deployment_type != "kubeadm"
|
||||||
|
|
||||||
- hosts: kube_control_plane[0]
|
- name: Recover control plane
|
||||||
|
hosts: kube_control_plane[0]
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: recover_control_plane/control-plane }
|
- { role: recover_control_plane/control-plane }
|
||||||
|
|
||||||
- import_playbook: cluster.yml
|
- name: Apply whole cluster install
|
||||||
|
import_playbook: cluster.yml
|
||||||
|
|
||||||
- hosts: kube_control_plane
|
- name: Perform post recover tasks
|
||||||
|
hosts: kube_control_plane
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
|
|
|
@ -5,14 +5,16 @@
|
||||||
- name: Ensure compatibility with old groups
|
- name: Ensure compatibility with old groups
|
||||||
import_playbook: legacy_groups.yml
|
import_playbook: legacy_groups.yml
|
||||||
|
|
||||||
- hosts: bastion[0]
|
- name: Install bastion ssh config
|
||||||
|
hosts: bastion[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults }
|
- { role: kubespray-defaults }
|
||||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||||
|
|
||||||
- hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
|
- name: Confirm node removal
|
||||||
|
hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
tasks:
|
tasks:
|
||||||
- name: Confirm Execution
|
- name: Confirm Execution
|
||||||
|
@ -32,7 +34,8 @@
|
||||||
import_playbook: facts.yml
|
import_playbook: facts.yml
|
||||||
when: reset_nodes | default(True) | bool
|
when: reset_nodes | default(True) | bool
|
||||||
|
|
||||||
- hosts: "{{ node | default('kube_node') }}"
|
- name: Reset node
|
||||||
|
hosts: "{{ node | default('kube_node') }}"
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
|
@ -42,7 +45,8 @@
|
||||||
- { role: reset, tags: reset, when: reset_nodes | default(True) | bool }
|
- { role: reset, tags: reset, when: reset_nodes | default(True) | bool }
|
||||||
|
|
||||||
# Currently cannot remove first master or etcd
|
# Currently cannot remove first master or etcd
|
||||||
- hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
|
- name: Post node removal
|
||||||
|
hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
|
|
|
@ -5,7 +5,8 @@
|
||||||
- name: Ensure compatibility with old groups
|
- name: Ensure compatibility with old groups
|
||||||
import_playbook: legacy_groups.yml
|
import_playbook: legacy_groups.yml
|
||||||
|
|
||||||
- hosts: bastion[0]
|
- name: Install bastion ssh config
|
||||||
|
hosts: bastion[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
|
@ -15,7 +16,8 @@
|
||||||
- name: Gather facts
|
- name: Gather facts
|
||||||
import_playbook: facts.yml
|
import_playbook: facts.yml
|
||||||
|
|
||||||
- hosts: etcd:k8s_cluster:calico_rr
|
- name: Reset cluster
|
||||||
|
hosts: etcd:k8s_cluster:calico_rr
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
pre_tasks:
|
pre_tasks:
|
||||||
- name: Reset Confirmation
|
- name: Reset Confirmation
|
||||||
|
|
|
@ -5,7 +5,8 @@
|
||||||
- name: Ensure compatibility with old groups
|
- name: Ensure compatibility with old groups
|
||||||
import_playbook: legacy_groups.yml
|
import_playbook: legacy_groups.yml
|
||||||
|
|
||||||
- hosts: bastion[0]
|
- name: Install bastion ssh config
|
||||||
|
hosts: bastion[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
|
@ -88,7 +89,7 @@
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
register: kubeadm_upload_cert
|
register: kubeadm_upload_cert
|
||||||
changed_when: false
|
changed_when: false
|
||||||
- name: set fact 'kubeadm_certificate_key' for later use
|
- name: Set fact 'kubeadm_certificate_key' for later use
|
||||||
set_fact:
|
set_fact:
|
||||||
kubeadm_certificate_key: "{{ kubeadm_upload_cert.stdout_lines[-1] | trim }}"
|
kubeadm_certificate_key: "{{ kubeadm_upload_cert.stdout_lines[-1] | trim }}"
|
||||||
when: kubeadm_certificate_key is not defined
|
when: kubeadm_certificate_key is not defined
|
||||||
|
|
|
@ -5,7 +5,8 @@
|
||||||
- name: Ensure compatibility with old groups
|
- name: Ensure compatibility with old groups
|
||||||
import_playbook: legacy_groups.yml
|
import_playbook: legacy_groups.yml
|
||||||
|
|
||||||
- hosts: bastion[0]
|
- name: Install bastion ssh config
|
||||||
|
hosts: bastion[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
|
@ -46,7 +47,8 @@
|
||||||
- { role: kubespray-defaults }
|
- { role: kubespray-defaults }
|
||||||
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
|
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
|
||||||
|
|
||||||
- hosts: etcd:kube_control_plane
|
- name: Install etcd
|
||||||
|
hosts: etcd:kube_control_plane
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -59,7 +61,8 @@
|
||||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||||
when: etcd_deployment_type != "kubeadm"
|
when: etcd_deployment_type != "kubeadm"
|
||||||
|
|
||||||
- hosts: k8s_cluster
|
- name: Install etcd certs on nodes if required
|
||||||
|
hosts: k8s_cluster
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -126,7 +129,8 @@
|
||||||
- { role: kubernetes/node-label, tags: node-label }
|
- { role: kubernetes/node-label, tags: node-label }
|
||||||
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||||
|
|
||||||
- hosts: kube_control_plane[0]
|
- name: Patch Kubernetes for Windows
|
||||||
|
hosts: kube_control_plane[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: true
|
any_errors_fatal: true
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -134,7 +138,8 @@
|
||||||
- { role: kubespray-defaults }
|
- { role: kubespray-defaults }
|
||||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
||||||
|
|
||||||
- hosts: calico_rr
|
- name: Install Calico Route Reflector
|
||||||
|
hosts: calico_rr
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -142,7 +147,8 @@
|
||||||
- { role: kubespray-defaults }
|
- { role: kubespray-defaults }
|
||||||
- { role: network_plugin/calico/rr, tags: network }
|
- { role: network_plugin/calico/rr, tags: network }
|
||||||
|
|
||||||
- hosts: kube_control_plane
|
- name: Install Kubernetes apps
|
||||||
|
hosts: kube_control_plane
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: set bastion host IP and port
|
- name: Set bastion host IP and port
|
||||||
set_fact:
|
set_fact:
|
||||||
bastion_ip: "{{ hostvars[groups['bastion'][0]]['ansible_host'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_host']) }}"
|
bastion_ip: "{{ hostvars[groups['bastion'][0]]['ansible_host'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_host']) }}"
|
||||||
bastion_port: "{{ hostvars[groups['bastion'][0]]['ansible_port'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_port']) | d(22) }}"
|
bastion_port: "{{ hostvars[groups['bastion'][0]]['ansible_port'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_port']) | d(22) }}"
|
||||||
|
@ -12,7 +12,7 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
real_user: "{{ ansible_user }}"
|
real_user: "{{ ansible_user }}"
|
||||||
|
|
||||||
- name: create ssh bastion conf
|
- name: Create ssh bastion conf
|
||||||
become: false
|
become: false
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
connection: local
|
connection: local
|
||||||
|
|
|
@ -6,37 +6,46 @@
|
||||||
# This command should always run, even in check mode
|
# This command should always run, even in check mode
|
||||||
check_mode: false
|
check_mode: false
|
||||||
|
|
||||||
- include_tasks: bootstrap-centos.yml
|
- name: Bootstrap CentOS
|
||||||
|
include_tasks: bootstrap-centos.yml
|
||||||
when: '''ID="centos"'' in os_release.stdout_lines or ''ID="ol"'' in os_release.stdout_lines or ''ID="almalinux"'' in os_release.stdout_lines or ''ID="rocky"'' in os_release.stdout_lines or ''ID="kylin"'' in os_release.stdout_lines or ''ID="uos"'' in os_release.stdout_lines or ''ID="openEuler"'' in os_release.stdout_lines'
|
when: '''ID="centos"'' in os_release.stdout_lines or ''ID="ol"'' in os_release.stdout_lines or ''ID="almalinux"'' in os_release.stdout_lines or ''ID="rocky"'' in os_release.stdout_lines or ''ID="kylin"'' in os_release.stdout_lines or ''ID="uos"'' in os_release.stdout_lines or ''ID="openEuler"'' in os_release.stdout_lines'
|
||||||
|
|
||||||
- include_tasks: bootstrap-amazon.yml
|
- name: Bootstrap Amazon
|
||||||
|
include_tasks: bootstrap-amazon.yml
|
||||||
when: '''ID="amzn"'' in os_release.stdout_lines'
|
when: '''ID="amzn"'' in os_release.stdout_lines'
|
||||||
|
|
||||||
- include_tasks: bootstrap-redhat.yml
|
- name: Bootstrap RedHat
|
||||||
|
include_tasks: bootstrap-redhat.yml
|
||||||
when: '''ID="rhel"'' in os_release.stdout_lines'
|
when: '''ID="rhel"'' in os_release.stdout_lines'
|
||||||
|
|
||||||
- include_tasks: bootstrap-clearlinux.yml
|
- name: Bootstrap Clear Linux
|
||||||
|
include_tasks: bootstrap-clearlinux.yml
|
||||||
when: '''ID=clear-linux-os'' in os_release.stdout_lines'
|
when: '''ID=clear-linux-os'' in os_release.stdout_lines'
|
||||||
|
|
||||||
# Fedora CoreOS
|
# Fedora CoreOS
|
||||||
- include_tasks: bootstrap-fedora-coreos.yml
|
- name: Bootstrap Fedora CoreOS
|
||||||
|
include_tasks: bootstrap-fedora-coreos.yml
|
||||||
when:
|
when:
|
||||||
- '''ID=fedora'' in os_release.stdout_lines'
|
- '''ID=fedora'' in os_release.stdout_lines'
|
||||||
- '''VARIANT_ID=coreos'' in os_release.stdout_lines'
|
- '''VARIANT_ID=coreos'' in os_release.stdout_lines'
|
||||||
|
|
||||||
- include_tasks: bootstrap-flatcar.yml
|
- name: Bootstrap Flatcar
|
||||||
|
include_tasks: bootstrap-flatcar.yml
|
||||||
when: '''ID=flatcar'' in os_release.stdout_lines'
|
when: '''ID=flatcar'' in os_release.stdout_lines'
|
||||||
|
|
||||||
- include_tasks: bootstrap-debian.yml
|
- name: Bootstrap Debian
|
||||||
|
include_tasks: bootstrap-debian.yml
|
||||||
when: '''ID=debian'' in os_release.stdout_lines or ''ID=ubuntu'' in os_release.stdout_lines'
|
when: '''ID=debian'' in os_release.stdout_lines or ''ID=ubuntu'' in os_release.stdout_lines'
|
||||||
|
|
||||||
# Fedora "classic"
|
# Fedora "classic"
|
||||||
- include_tasks: bootstrap-fedora.yml
|
- name: Boostrap Fedora
|
||||||
|
include_tasks: bootstrap-fedora.yml
|
||||||
when:
|
when:
|
||||||
- '''ID=fedora'' in os_release.stdout_lines'
|
- '''ID=fedora'' in os_release.stdout_lines'
|
||||||
- '''VARIANT_ID=coreos'' not in os_release.stdout_lines'
|
- '''VARIANT_ID=coreos'' not in os_release.stdout_lines'
|
||||||
|
|
||||||
- include_tasks: bootstrap-opensuse.yml
|
- name: Bootstrap OpenSUSE
|
||||||
|
include_tasks: bootstrap-opensuse.yml
|
||||||
when: '''ID="opensuse-leap"'' in os_release.stdout_lines or ''ID="opensuse-tumbleweed"'' in os_release.stdout_lines'
|
when: '''ID="opensuse-leap"'' in os_release.stdout_lines or ''ID="opensuse-tumbleweed"'' in os_release.stdout_lines'
|
||||||
|
|
||||||
- name: Create remote_tmp for it is used by another module
|
- name: Create remote_tmp for it is used by another module
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: containerd-common | check if fedora coreos
|
- name: Containerd-common | check if fedora coreos
|
||||||
stat:
|
stat:
|
||||||
path: /run/ostree-booted
|
path: /run/ostree-booted
|
||||||
get_attributes: no
|
get_attributes: no
|
||||||
|
@ -7,11 +7,11 @@
|
||||||
get_mime: no
|
get_mime: no
|
||||||
register: ostree
|
register: ostree
|
||||||
|
|
||||||
- name: containerd-common | set is_ostree
|
- name: Containerd-common | set is_ostree
|
||||||
set_fact:
|
set_fact:
|
||||||
is_ostree: "{{ ostree.stat.exists }}"
|
is_ostree: "{{ ostree.stat.exists }}"
|
||||||
|
|
||||||
- name: containerd-common | gather os specific variables
|
- name: Containerd-common | gather os specific variables
|
||||||
include_vars: "{{ item }}"
|
include_vars: "{{ item }}"
|
||||||
with_first_found:
|
with_first_found:
|
||||||
- files:
|
- files:
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: restart containerd
|
- name: Restart containerd
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify:
|
notify:
|
||||||
- Containerd | restart containerd
|
- Containerd | restart containerd
|
||||||
|
|
|
@ -12,7 +12,8 @@
|
||||||
- role: adduser
|
- role: adduser
|
||||||
user: "{{ addusers.kube }}"
|
user: "{{ addusers.kube }}"
|
||||||
tasks:
|
tasks:
|
||||||
- include_tasks: "../../../../download/tasks/download_file.yml"
|
- name: Download CNI
|
||||||
|
include_tasks: "../../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.cni) }}"
|
download: "{{ download_defaults | combine(downloads.cni) }}"
|
||||||
|
|
||||||
|
|
|
@ -5,33 +5,33 @@
|
||||||
when:
|
when:
|
||||||
- not (allow_unsupported_distribution_setup | default(false)) and (ansible_distribution not in containerd_supported_distributions)
|
- not (allow_unsupported_distribution_setup | default(false)) and (ansible_distribution not in containerd_supported_distributions)
|
||||||
|
|
||||||
- name: containerd | Remove any package manager controlled containerd package
|
- name: Containerd | Remove any package manager controlled containerd package
|
||||||
package:
|
package:
|
||||||
name: "{{ containerd_package }}"
|
name: "{{ containerd_package }}"
|
||||||
state: absent
|
state: absent
|
||||||
when:
|
when:
|
||||||
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
|
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
|
||||||
|
|
||||||
- name: containerd | Remove containerd repository
|
- name: Containerd | Remove containerd repository
|
||||||
file:
|
file:
|
||||||
path: "{{ yum_repo_dir }}/containerd.repo"
|
path: "{{ yum_repo_dir }}/containerd.repo"
|
||||||
state: absent
|
state: absent
|
||||||
when:
|
when:
|
||||||
- ansible_os_family in ['RedHat']
|
- ansible_os_family in ['RedHat']
|
||||||
|
|
||||||
- name: containerd | Remove containerd repository
|
- name: Containerd | Remove containerd repository
|
||||||
apt_repository:
|
apt_repository:
|
||||||
repo: "{{ item }}"
|
repo: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
with_items: "{{ containerd_repo_info.repos }}"
|
with_items: "{{ containerd_repo_info.repos }}"
|
||||||
when: ansible_pkg_mgr == 'apt'
|
when: ansible_pkg_mgr == 'apt'
|
||||||
|
|
||||||
- name: containerd | Download containerd
|
- name: Containerd | Download containerd
|
||||||
include_tasks: "../../../download/tasks/download_file.yml"
|
include_tasks: "../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.containerd) }}"
|
download: "{{ download_defaults | combine(downloads.containerd) }}"
|
||||||
|
|
||||||
- name: containerd | Unpack containerd archive
|
- name: Containerd | Unpack containerd archive
|
||||||
unarchive:
|
unarchive:
|
||||||
src: "{{ downloads.containerd.dest }}"
|
src: "{{ downloads.containerd.dest }}"
|
||||||
dest: "{{ containerd_bin_dir }}"
|
dest: "{{ containerd_bin_dir }}"
|
||||||
|
@ -39,9 +39,9 @@
|
||||||
remote_src: yes
|
remote_src: yes
|
||||||
extra_opts:
|
extra_opts:
|
||||||
- --strip-components=1
|
- --strip-components=1
|
||||||
notify: restart containerd
|
notify: Restart containerd
|
||||||
|
|
||||||
- name: containerd | Remove orphaned binary
|
- name: Containerd | Remove orphaned binary
|
||||||
file:
|
file:
|
||||||
path: "/usr/bin/{{ item }}"
|
path: "/usr/bin/{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
@ -56,14 +56,14 @@
|
||||||
- containerd-shim-runc-v2
|
- containerd-shim-runc-v2
|
||||||
- ctr
|
- ctr
|
||||||
|
|
||||||
- name: containerd | Generate systemd service for containerd
|
- name: Containerd | Generate systemd service for containerd
|
||||||
template:
|
template:
|
||||||
src: containerd.service.j2
|
src: containerd.service.j2
|
||||||
dest: /etc/systemd/system/containerd.service
|
dest: /etc/systemd/system/containerd.service
|
||||||
mode: 0644
|
mode: 0644
|
||||||
notify: restart containerd
|
notify: Restart containerd
|
||||||
|
|
||||||
- name: containerd | Ensure containerd directories exist
|
- name: Containerd | Ensure containerd directories exist
|
||||||
file:
|
file:
|
||||||
dest: "{{ item }}"
|
dest: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -76,50 +76,51 @@
|
||||||
- "{{ containerd_storage_dir }}"
|
- "{{ containerd_storage_dir }}"
|
||||||
- "{{ containerd_state_dir }}"
|
- "{{ containerd_state_dir }}"
|
||||||
|
|
||||||
- name: containerd | Write containerd proxy drop-in
|
- name: Containerd | Write containerd proxy drop-in
|
||||||
template:
|
template:
|
||||||
src: http-proxy.conf.j2
|
src: http-proxy.conf.j2
|
||||||
dest: "{{ containerd_systemd_dir }}/http-proxy.conf"
|
dest: "{{ containerd_systemd_dir }}/http-proxy.conf"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
notify: restart containerd
|
notify: Restart containerd
|
||||||
when: http_proxy is defined or https_proxy is defined
|
when: http_proxy is defined or https_proxy is defined
|
||||||
|
|
||||||
- name: containerd | Generate default base_runtime_spec
|
- name: Containerd | Generate default base_runtime_spec
|
||||||
register: ctr_oci_spec
|
register: ctr_oci_spec
|
||||||
command: "{{ containerd_bin_dir }}/ctr oci spec"
|
command: "{{ containerd_bin_dir }}/ctr oci spec"
|
||||||
check_mode: false
|
check_mode: false
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: containerd | Store generated default base_runtime_spec
|
- name: Containerd | Store generated default base_runtime_spec
|
||||||
set_fact:
|
set_fact:
|
||||||
containerd_default_base_runtime_spec: "{{ ctr_oci_spec.stdout | from_json }}"
|
containerd_default_base_runtime_spec: "{{ ctr_oci_spec.stdout | from_json }}"
|
||||||
|
|
||||||
- name: containerd | Write base_runtime_specs
|
- name: Containerd | Write base_runtime_specs
|
||||||
copy:
|
copy:
|
||||||
content: "{{ item.value }}"
|
content: "{{ item.value }}"
|
||||||
dest: "{{ containerd_cfg_dir }}/{{ item.key }}"
|
dest: "{{ containerd_cfg_dir }}/{{ item.key }}"
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
with_dict: "{{ containerd_base_runtime_specs | default({}) }}"
|
with_dict: "{{ containerd_base_runtime_specs | default({}) }}"
|
||||||
notify: restart containerd
|
notify: Restart containerd
|
||||||
|
|
||||||
- name: containerd | Copy containerd config file
|
- name: Containerd | Copy containerd config file
|
||||||
template:
|
template:
|
||||||
src: config.toml.j2
|
src: config.toml.j2
|
||||||
dest: "{{ containerd_cfg_dir }}/config.toml"
|
dest: "{{ containerd_cfg_dir }}/config.toml"
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: 0640
|
mode: 0640
|
||||||
notify: restart containerd
|
notify: Restart containerd
|
||||||
|
|
||||||
- block:
|
- name: Containerd | Configure containerd registries
|
||||||
- name: containerd | Create registry directories
|
block:
|
||||||
|
- name: Containerd | Create registry directories
|
||||||
file:
|
file:
|
||||||
path: "{{ containerd_cfg_dir }}/certs.d/{{ item.key }}"
|
path: "{{ containerd_cfg_dir }}/certs.d/{{ item.key }}"
|
||||||
state: directory
|
state: directory
|
||||||
mode: 0755
|
mode: 0755
|
||||||
recurse: true
|
recurse: true
|
||||||
with_dict: "{{ containerd_insecure_registries }}"
|
with_dict: "{{ containerd_insecure_registries }}"
|
||||||
- name: containerd | Write hosts.toml file
|
- name: Containerd | Write hosts.toml file
|
||||||
blockinfile:
|
blockinfile:
|
||||||
path: "{{ containerd_cfg_dir }}/certs.d/{{ item.key }}/hosts.toml"
|
path: "{{ containerd_cfg_dir }}/certs.d/{{ item.key }}/hosts.toml"
|
||||||
mode: 0640
|
mode: 0640
|
||||||
|
@ -134,10 +135,10 @@
|
||||||
|
|
||||||
# you can sometimes end up in a state where everything is installed
|
# you can sometimes end up in a state where everything is installed
|
||||||
# but containerd was not started / enabled
|
# but containerd was not started / enabled
|
||||||
- name: containerd | Flush handlers
|
- name: Containerd | Flush handlers
|
||||||
meta: flush_handlers
|
meta: flush_handlers
|
||||||
|
|
||||||
- name: containerd | Ensure containerd is started and enabled
|
- name: Containerd | Ensure containerd is started and enabled
|
||||||
systemd:
|
systemd:
|
||||||
name: containerd
|
name: containerd
|
||||||
daemon_reload: yes
|
daemon_reload: yes
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: containerd | Remove containerd repository for RedHat os family
|
- name: Containerd | Remove containerd repository for RedHat os family
|
||||||
file:
|
file:
|
||||||
path: "{{ yum_repo_dir }}/containerd.repo"
|
path: "{{ yum_repo_dir }}/containerd.repo"
|
||||||
state: absent
|
state: absent
|
||||||
|
@ -8,7 +8,7 @@
|
||||||
tags:
|
tags:
|
||||||
- reset_containerd
|
- reset_containerd
|
||||||
|
|
||||||
- name: containerd | Remove containerd repository for Debian os family
|
- name: Containerd | Remove containerd repository for Debian os family
|
||||||
apt_repository:
|
apt_repository:
|
||||||
repo: "{{ item }}"
|
repo: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
@ -17,7 +17,7 @@
|
||||||
tags:
|
tags:
|
||||||
- reset_containerd
|
- reset_containerd
|
||||||
|
|
||||||
- name: containerd | Stop containerd service
|
- name: Containerd | Stop containerd service
|
||||||
service:
|
service:
|
||||||
name: containerd
|
name: containerd
|
||||||
daemon_reload: true
|
daemon_reload: true
|
||||||
|
@ -26,7 +26,7 @@
|
||||||
tags:
|
tags:
|
||||||
- reset_containerd
|
- reset_containerd
|
||||||
|
|
||||||
- name: containerd | Remove configuration files
|
- name: Containerd | Remove configuration files
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|
|
@ -1,35 +1,35 @@
|
||||||
---
|
---
|
||||||
- name: restart and enable cri-dockerd
|
- name: Restart and enable cri-dockerd
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify:
|
notify:
|
||||||
- cri-dockerd | reload systemd
|
- Cri-dockerd | reload systemd
|
||||||
- cri-dockerd | restart docker.service
|
- Cri-dockerd | restart docker.service
|
||||||
- cri-dockerd | reload cri-dockerd.socket
|
- Cri-dockerd | reload cri-dockerd.socket
|
||||||
- cri-dockerd | reload cri-dockerd.service
|
- Cri-dockerd | reload cri-dockerd.service
|
||||||
- cri-dockerd | enable cri-dockerd service
|
- Cri-dockerd | enable cri-dockerd service
|
||||||
|
|
||||||
- name: cri-dockerd | reload systemd
|
- name: Cri-dockerd | reload systemd
|
||||||
systemd:
|
systemd:
|
||||||
name: cri-dockerd
|
name: cri-dockerd
|
||||||
daemon_reload: true
|
daemon_reload: true
|
||||||
masked: no
|
masked: no
|
||||||
|
|
||||||
- name: cri-dockerd | restart docker.service
|
- name: Cri-dockerd | restart docker.service
|
||||||
service:
|
service:
|
||||||
name: docker.service
|
name: docker.service
|
||||||
state: restarted
|
state: restarted
|
||||||
|
|
||||||
- name: cri-dockerd | reload cri-dockerd.socket
|
- name: Cri-dockerd | reload cri-dockerd.socket
|
||||||
service:
|
service:
|
||||||
name: cri-dockerd.socket
|
name: cri-dockerd.socket
|
||||||
state: restarted
|
state: restarted
|
||||||
|
|
||||||
- name: cri-dockerd | reload cri-dockerd.service
|
- name: Cri-dockerd | reload cri-dockerd.service
|
||||||
service:
|
service:
|
||||||
name: cri-dockerd.service
|
name: cri-dockerd.service
|
||||||
state: restarted
|
state: restarted
|
||||||
|
|
||||||
- name: cri-dockerd | enable cri-dockerd service
|
- name: Cri-dockerd | enable cri-dockerd service
|
||||||
service:
|
service:
|
||||||
name: cri-dockerd.service
|
name: cri-dockerd.service
|
||||||
enabled: yes
|
enabled: yes
|
||||||
|
|
|
@ -8,7 +8,8 @@
|
||||||
- role: adduser
|
- role: adduser
|
||||||
user: "{{ addusers.kube }}"
|
user: "{{ addusers.kube }}"
|
||||||
tasks:
|
tasks:
|
||||||
- include_tasks: "../../../../download/tasks/download_file.yml"
|
- name: Download CNI
|
||||||
|
include_tasks: "../../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.cni) }}"
|
download: "{{ download_defaults | combine(downloads.cni) }}"
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: runc | Download cri-dockerd binary
|
- name: Runc | Download cri-dockerd binary
|
||||||
include_tasks: "../../../download/tasks/download_file.yml"
|
include_tasks: "../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.cri_dockerd) }}"
|
download: "{{ download_defaults | combine(downloads.cri_dockerd) }}"
|
||||||
|
@ -11,7 +11,7 @@
|
||||||
mode: 0755
|
mode: 0755
|
||||||
remote_src: true
|
remote_src: true
|
||||||
notify:
|
notify:
|
||||||
- restart and enable cri-dockerd
|
- Restart and enable cri-dockerd
|
||||||
|
|
||||||
- name: Generate cri-dockerd systemd unit files
|
- name: Generate cri-dockerd systemd unit files
|
||||||
template:
|
template:
|
||||||
|
@ -22,7 +22,7 @@
|
||||||
- cri-dockerd.service
|
- cri-dockerd.service
|
||||||
- cri-dockerd.socket
|
- cri-dockerd.socket
|
||||||
notify:
|
notify:
|
||||||
- restart and enable cri-dockerd
|
- Restart and enable cri-dockerd
|
||||||
|
|
||||||
- name: Flush handlers
|
- name: Flush handlers
|
||||||
meta: flush_handlers
|
meta: flush_handlers
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: restart crio
|
- name: Restart crio
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify:
|
notify:
|
||||||
- CRI-O | reload systemd
|
- CRI-O | reload systemd
|
||||||
|
|
|
@ -12,7 +12,8 @@
|
||||||
- role: adduser
|
- role: adduser
|
||||||
user: "{{ addusers.kube }}"
|
user: "{{ addusers.kube }}"
|
||||||
tasks:
|
tasks:
|
||||||
- include_tasks: "../../../../download/tasks/download_file.yml"
|
- name: Download CNI
|
||||||
|
include_tasks: "../../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.cni) }}"
|
download: "{{ download_defaults | combine(downloads.cni) }}"
|
||||||
|
|
||||||
|
|
|
@ -109,7 +109,7 @@
|
||||||
- 1.23
|
- 1.23
|
||||||
- 1.24
|
- 1.24
|
||||||
|
|
||||||
- name: cri-o | remove installed packages
|
- name: Cri-o | remove installed packages
|
||||||
package:
|
package:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: cri-o | check if fedora coreos
|
- name: Cri-o | check if fedora coreos
|
||||||
stat:
|
stat:
|
||||||
path: /run/ostree-booted
|
path: /run/ostree-booted
|
||||||
get_attributes: no
|
get_attributes: no
|
||||||
|
@ -7,48 +7,48 @@
|
||||||
get_mime: no
|
get_mime: no
|
||||||
register: ostree
|
register: ostree
|
||||||
|
|
||||||
- name: cri-o | set is_ostree
|
- name: Cri-o | set is_ostree
|
||||||
set_fact:
|
set_fact:
|
||||||
is_ostree: "{{ ostree.stat.exists }}"
|
is_ostree: "{{ ostree.stat.exists }}"
|
||||||
|
|
||||||
- name: cri-o | get ostree version
|
- name: Cri-o | get ostree version
|
||||||
shell: "set -o pipefail && rpm-ostree --version | awk -F\\' '/Version/{print $2}'"
|
shell: "set -o pipefail && rpm-ostree --version | awk -F\\' '/Version/{print $2}'"
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
register: ostree_version
|
register: ostree_version
|
||||||
when: is_ostree
|
when: is_ostree
|
||||||
|
|
||||||
- name: cri-o | Download cri-o
|
- name: Cri-o | Download cri-o
|
||||||
include_tasks: "../../../download/tasks/download_file.yml"
|
include_tasks: "../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.crio) }}"
|
download: "{{ download_defaults | combine(downloads.crio) }}"
|
||||||
|
|
||||||
- name: cri-o | special handling for amazon linux
|
- name: Cri-o | special handling for amazon linux
|
||||||
import_tasks: "setup-amazon.yaml"
|
import_tasks: "setup-amazon.yaml"
|
||||||
when: ansible_distribution in ["Amazon"]
|
when: ansible_distribution in ["Amazon"]
|
||||||
|
|
||||||
- name: cri-o | clean up reglacy repos
|
- name: Cri-o | clean up reglacy repos
|
||||||
import_tasks: "cleanup.yaml"
|
import_tasks: "cleanup.yaml"
|
||||||
|
|
||||||
- name: cri-o | build a list of crio runtimes with Katacontainers runtimes
|
- name: Cri-o | build a list of crio runtimes with Katacontainers runtimes
|
||||||
set_fact:
|
set_fact:
|
||||||
crio_runtimes: "{{ crio_runtimes + kata_runtimes }}"
|
crio_runtimes: "{{ crio_runtimes + kata_runtimes }}"
|
||||||
when:
|
when:
|
||||||
- kata_containers_enabled
|
- kata_containers_enabled
|
||||||
|
|
||||||
- name: cri-o | build a list of crio runtimes with crun runtime
|
- name: Cri-o | build a list of crio runtimes with crun runtime
|
||||||
set_fact:
|
set_fact:
|
||||||
crio_runtimes: "{{ crio_runtimes + [crun_runtime] }}"
|
crio_runtimes: "{{ crio_runtimes + [crun_runtime] }}"
|
||||||
when:
|
when:
|
||||||
- crun_enabled
|
- crun_enabled
|
||||||
|
|
||||||
- name: cri-o | build a list of crio runtimes with youki runtime
|
- name: Cri-o | build a list of crio runtimes with youki runtime
|
||||||
set_fact:
|
set_fact:
|
||||||
crio_runtimes: "{{ crio_runtimes + [youki_runtime] }}"
|
crio_runtimes: "{{ crio_runtimes + [youki_runtime] }}"
|
||||||
when:
|
when:
|
||||||
- youki_enabled
|
- youki_enabled
|
||||||
|
|
||||||
- name: cri-o | make sure needed folders exist in the system
|
- name: Cri-o | make sure needed folders exist in the system
|
||||||
with_items:
|
with_items:
|
||||||
- /etc/crio
|
- /etc/crio
|
||||||
- /etc/containers
|
- /etc/containers
|
||||||
|
@ -58,21 +58,21 @@
|
||||||
state: directory
|
state: directory
|
||||||
mode: 0755
|
mode: 0755
|
||||||
|
|
||||||
- name: cri-o | install cri-o config
|
- name: Cri-o | install cri-o config
|
||||||
template:
|
template:
|
||||||
src: crio.conf.j2
|
src: crio.conf.j2
|
||||||
dest: /etc/crio/crio.conf
|
dest: /etc/crio/crio.conf
|
||||||
mode: 0644
|
mode: 0644
|
||||||
register: config_install
|
register: config_install
|
||||||
|
|
||||||
- name: cri-o | install config.json
|
- name: Cri-o | install config.json
|
||||||
template:
|
template:
|
||||||
src: config.json.j2
|
src: config.json.j2
|
||||||
dest: /etc/crio/config.json
|
dest: /etc/crio/config.json
|
||||||
mode: 0644
|
mode: 0644
|
||||||
register: reg_auth_install
|
register: reg_auth_install
|
||||||
|
|
||||||
- name: cri-o | copy binaries
|
- name: Cri-o | copy binaries
|
||||||
copy:
|
copy:
|
||||||
src: "{{ local_release_dir }}/cri-o/bin/{{ item }}"
|
src: "{{ local_release_dir }}/cri-o/bin/{{ item }}"
|
||||||
dest: "{{ bin_dir }}/{{ item }}"
|
dest: "{{ bin_dir }}/{{ item }}"
|
||||||
|
@ -80,48 +80,48 @@
|
||||||
remote_src: true
|
remote_src: true
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ crio_bin_files }}"
|
- "{{ crio_bin_files }}"
|
||||||
notify: restart crio
|
notify: Restart crio
|
||||||
|
|
||||||
- name: cri-o | copy service file
|
- name: Cri-o | copy service file
|
||||||
copy:
|
copy:
|
||||||
src: "{{ local_release_dir }}/cri-o/contrib/crio.service"
|
src: "{{ local_release_dir }}/cri-o/contrib/crio.service"
|
||||||
dest: /etc/systemd/system/crio.service
|
dest: /etc/systemd/system/crio.service
|
||||||
mode: 0755
|
mode: 0755
|
||||||
remote_src: true
|
remote_src: true
|
||||||
notify: restart crio
|
notify: Restart crio
|
||||||
|
|
||||||
- name: cri-o | update the bin dir for crio.service file
|
- name: Cri-o | update the bin dir for crio.service file
|
||||||
replace:
|
replace:
|
||||||
dest: /etc/systemd/system/crio.service
|
dest: /etc/systemd/system/crio.service
|
||||||
regexp: "/usr/local/bin/crio"
|
regexp: "/usr/local/bin/crio"
|
||||||
replace: "{{ bin_dir }}/crio"
|
replace: "{{ bin_dir }}/crio"
|
||||||
notify: restart crio
|
notify: Restart crio
|
||||||
|
|
||||||
- name: cri-o | copy default policy
|
- name: Cri-o | copy default policy
|
||||||
copy:
|
copy:
|
||||||
src: "{{ local_release_dir }}/cri-o/contrib/policy.json"
|
src: "{{ local_release_dir }}/cri-o/contrib/policy.json"
|
||||||
dest: /etc/containers/policy.json
|
dest: /etc/containers/policy.json
|
||||||
mode: 0755
|
mode: 0755
|
||||||
remote_src: true
|
remote_src: true
|
||||||
notify: restart crio
|
notify: Restart crio
|
||||||
|
|
||||||
- name: cri-o | copy mounts.conf
|
- name: Cri-o | copy mounts.conf
|
||||||
copy:
|
copy:
|
||||||
src: mounts.conf
|
src: mounts.conf
|
||||||
dest: /etc/containers/mounts.conf
|
dest: /etc/containers/mounts.conf
|
||||||
mode: 0644
|
mode: 0644
|
||||||
when:
|
when:
|
||||||
- ansible_os_family == 'RedHat'
|
- ansible_os_family == 'RedHat'
|
||||||
notify: restart crio
|
notify: Restart crio
|
||||||
|
|
||||||
- name: cri-o | create directory for oci hooks
|
- name: Cri-o | create directory for oci hooks
|
||||||
file:
|
file:
|
||||||
path: /etc/containers/oci/hooks.d
|
path: /etc/containers/oci/hooks.d
|
||||||
state: directory
|
state: directory
|
||||||
owner: root
|
owner: root
|
||||||
mode: 0755
|
mode: 0755
|
||||||
|
|
||||||
- name: cri-o | set overlay driver
|
- name: Cri-o | set overlay driver
|
||||||
community.general.ini_file:
|
community.general.ini_file:
|
||||||
dest: /etc/containers/storage.conf
|
dest: /etc/containers/storage.conf
|
||||||
section: storage
|
section: storage
|
||||||
|
@ -135,7 +135,7 @@
|
||||||
value: '"/var/lib/containers/storage"'
|
value: '"/var/lib/containers/storage"'
|
||||||
|
|
||||||
# metacopy=on is available since 4.19 and was backported to RHEL 4.18 kernel
|
# metacopy=on is available since 4.19 and was backported to RHEL 4.18 kernel
|
||||||
- name: cri-o | set metacopy mount options correctly
|
- name: Cri-o | set metacopy mount options correctly
|
||||||
community.general.ini_file:
|
community.general.ini_file:
|
||||||
dest: /etc/containers/storage.conf
|
dest: /etc/containers/storage.conf
|
||||||
section: storage.options.overlay
|
section: storage.options.overlay
|
||||||
|
@ -143,37 +143,37 @@
|
||||||
value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}'
|
value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}'
|
||||||
mode: 0644
|
mode: 0644
|
||||||
|
|
||||||
- name: cri-o | create directory registries configs
|
- name: Cri-o | create directory registries configs
|
||||||
file:
|
file:
|
||||||
path: /etc/containers/registries.conf.d
|
path: /etc/containers/registries.conf.d
|
||||||
state: directory
|
state: directory
|
||||||
owner: root
|
owner: root
|
||||||
mode: 0755
|
mode: 0755
|
||||||
|
|
||||||
- name: cri-o | write registries configs
|
- name: Cri-o | write registries configs
|
||||||
template:
|
template:
|
||||||
src: registry.conf.j2
|
src: registry.conf.j2
|
||||||
dest: "/etc/containers/registries.conf.d/10-{{ item.prefix | default(item.location) | regex_replace(':', '_') }}.conf"
|
dest: "/etc/containers/registries.conf.d/10-{{ item.prefix | default(item.location) | regex_replace(':', '_') }}.conf"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
loop: "{{ crio_registries }}"
|
loop: "{{ crio_registries }}"
|
||||||
notify: restart crio
|
notify: Restart crio
|
||||||
|
|
||||||
- name: cri-o | configure unqualified registry settings
|
- name: Cri-o | configure unqualified registry settings
|
||||||
template:
|
template:
|
||||||
src: unqualified.conf.j2
|
src: unqualified.conf.j2
|
||||||
dest: "/etc/containers/registries.conf.d/01-unqualified.conf"
|
dest: "/etc/containers/registries.conf.d/01-unqualified.conf"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
notify: restart crio
|
notify: Restart crio
|
||||||
|
|
||||||
- name: cri-o | write cri-o proxy drop-in
|
- name: Cri-o | write cri-o proxy drop-in
|
||||||
template:
|
template:
|
||||||
src: http-proxy.conf.j2
|
src: http-proxy.conf.j2
|
||||||
dest: /etc/systemd/system/crio.service.d/http-proxy.conf
|
dest: /etc/systemd/system/crio.service.d/http-proxy.conf
|
||||||
mode: 0644
|
mode: 0644
|
||||||
notify: restart crio
|
notify: Restart crio
|
||||||
when: http_proxy is defined or https_proxy is defined
|
when: http_proxy is defined or https_proxy is defined
|
||||||
|
|
||||||
- name: cri-o | configure the uid/gid space for user namespaces
|
- name: Cri-o | configure the uid/gid space for user namespaces
|
||||||
lineinfile:
|
lineinfile:
|
||||||
path: '{{ item.path }}'
|
path: '{{ item.path }}'
|
||||||
line: '{{ item.entry }}'
|
line: '{{ item.entry }}'
|
||||||
|
@ -187,7 +187,7 @@
|
||||||
loop_control:
|
loop_control:
|
||||||
label: '{{ item.path }}'
|
label: '{{ item.path }}'
|
||||||
|
|
||||||
- name: cri-o | ensure crio service is started and enabled
|
- name: Cri-o | ensure crio service is started and enabled
|
||||||
service:
|
service:
|
||||||
name: crio
|
name: crio
|
||||||
daemon_reload: true
|
daemon_reload: true
|
||||||
|
@ -195,7 +195,7 @@
|
||||||
state: started
|
state: started
|
||||||
register: service_start
|
register: service_start
|
||||||
|
|
||||||
- name: cri-o | trigger service restart only when needed
|
- name: Cri-o | trigger service restart only when needed
|
||||||
service:
|
service:
|
||||||
name: crio
|
name: crio
|
||||||
state: restarted
|
state: restarted
|
||||||
|
@ -203,7 +203,7 @@
|
||||||
- config_install.changed or reg_auth_install.changed
|
- config_install.changed or reg_auth_install.changed
|
||||||
- not service_start.changed
|
- not service_start.changed
|
||||||
|
|
||||||
- name: cri-o | verify that crio is running
|
- name: Cri-o | verify that crio is running
|
||||||
command: "{{ bin_dir }}/crio-status info"
|
command: "{{ bin_dir }}/crio-status info"
|
||||||
register: get_crio_info
|
register: get_crio_info
|
||||||
until: get_crio_info is succeeded
|
until: get_crio_info is succeeded
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: crictl | Download crictl
|
- name: Crictl | Download crictl
|
||||||
include_tasks: "../../../download/tasks/download_file.yml"
|
include_tasks: "../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.crictl) }}"
|
download: "{{ download_defaults | combine(downloads.crictl) }}"
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
---
|
---
|
||||||
- name: install crictl
|
- name: Install crictl
|
||||||
include_tasks: crictl.yml
|
include_tasks: crictl.yml
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: crun | Download crun binary
|
- name: Crun | Download crun binary
|
||||||
include_tasks: "../../../download/tasks/download_file.yml"
|
include_tasks: "../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.crun) }}"
|
download: "{{ download_defaults | combine(downloads.crun) }}"
|
||||||
|
|
|
@ -1,18 +1,18 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: docker-storage-setup | install git and make
|
- name: Docker-storage-setup | install git and make
|
||||||
with_items: [git, make]
|
with_items: [git, make]
|
||||||
package:
|
package:
|
||||||
pkg: "{{ item }}"
|
pkg: "{{ item }}"
|
||||||
state: present
|
state: present
|
||||||
|
|
||||||
- name: docker-storage-setup | docker-storage-setup sysconfig template
|
- name: Docker-storage-setup | docker-storage-setup sysconfig template
|
||||||
template:
|
template:
|
||||||
src: docker-storage-setup.j2
|
src: docker-storage-setup.j2
|
||||||
dest: /etc/sysconfig/docker-storage-setup
|
dest: /etc/sysconfig/docker-storage-setup
|
||||||
mode: 0644
|
mode: 0644
|
||||||
|
|
||||||
- name: docker-storage-override-directory | docker service storage-setup override dir
|
- name: Docker-storage-override-directory | docker service storage-setup override dir
|
||||||
file:
|
file:
|
||||||
dest: /etc/systemd/system/docker.service.d
|
dest: /etc/systemd/system/docker.service.d
|
||||||
mode: 0755
|
mode: 0755
|
||||||
|
@ -20,7 +20,7 @@
|
||||||
group: root
|
group: root
|
||||||
state: directory
|
state: directory
|
||||||
|
|
||||||
- name: docker-storage-override | docker service storage-setup override file
|
- name: Docker-storage-override | docker service storage-setup override file
|
||||||
copy:
|
copy:
|
||||||
dest: /etc/systemd/system/docker.service.d/override.conf
|
dest: /etc/systemd/system/docker.service.d/override.conf
|
||||||
content: |-
|
content: |-
|
||||||
|
@ -33,12 +33,12 @@
|
||||||
mode: 0644
|
mode: 0644
|
||||||
|
|
||||||
# https://docs.docker.com/engine/installation/linux/docker-ce/centos/#install-using-the-repository
|
# https://docs.docker.com/engine/installation/linux/docker-ce/centos/#install-using-the-repository
|
||||||
- name: docker-storage-setup | install lvm2
|
- name: Docker-storage-setup | install lvm2
|
||||||
package:
|
package:
|
||||||
name: lvm2
|
name: lvm2
|
||||||
state: present
|
state: present
|
||||||
|
|
||||||
- name: docker-storage-setup | install and run container-storage-setup
|
- name: Docker-storage-setup | install and run container-storage-setup
|
||||||
become: yes
|
become: yes
|
||||||
script: |
|
script: |
|
||||||
install_container_storage_setup.sh \
|
install_container_storage_setup.sh \
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: restart docker
|
- name: Restart docker
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify:
|
notify:
|
||||||
- Docker | reload systemd
|
- Docker | reload systemd
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: check if fedora coreos
|
- name: Check if fedora coreos
|
||||||
stat:
|
stat:
|
||||||
path: /run/ostree-booted
|
path: /run/ostree-booted
|
||||||
get_attributes: no
|
get_attributes: no
|
||||||
|
@ -7,18 +7,18 @@
|
||||||
get_mime: no
|
get_mime: no
|
||||||
register: ostree
|
register: ostree
|
||||||
|
|
||||||
- name: set is_ostree
|
- name: Set is_ostree
|
||||||
set_fact:
|
set_fact:
|
||||||
is_ostree: "{{ ostree.stat.exists }}"
|
is_ostree: "{{ ostree.stat.exists }}"
|
||||||
|
|
||||||
- name: set docker_version for openEuler
|
- name: Set docker_version for openEuler
|
||||||
set_fact:
|
set_fact:
|
||||||
docker_version: '19.03'
|
docker_version: '19.03'
|
||||||
when: ansible_distribution == "openEuler"
|
when: ansible_distribution == "openEuler"
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
- name: gather os specific variables
|
- name: Gather os specific variables
|
||||||
include_vars: "{{ item }}"
|
include_vars: "{{ item }}"
|
||||||
with_first_found:
|
with_first_found:
|
||||||
- files:
|
- files:
|
||||||
|
@ -44,14 +44,16 @@
|
||||||
msg: "SUSE distributions always install Docker from the distro repos"
|
msg: "SUSE distributions always install Docker from the distro repos"
|
||||||
when: ansible_pkg_mgr == 'zypper'
|
when: ansible_pkg_mgr == 'zypper'
|
||||||
|
|
||||||
- include_tasks: set_facts_dns.yml
|
- name: Gather DNS facts
|
||||||
|
include_tasks: set_facts_dns.yml
|
||||||
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
|
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
- import_tasks: pre-upgrade.yml
|
- name: Pre-upgrade docker
|
||||||
|
import_tasks: pre-upgrade.yml
|
||||||
|
|
||||||
- name: ensure docker-ce repository public key is installed
|
- name: Ensure docker-ce repository public key is installed
|
||||||
apt_key:
|
apt_key:
|
||||||
id: "{{ item }}"
|
id: "{{ item }}"
|
||||||
url: "{{ docker_repo_key_info.url }}"
|
url: "{{ docker_repo_key_info.url }}"
|
||||||
|
@ -64,7 +66,7 @@
|
||||||
environment: "{{ proxy_env }}"
|
environment: "{{ proxy_env }}"
|
||||||
when: ansible_pkg_mgr == 'apt'
|
when: ansible_pkg_mgr == 'apt'
|
||||||
|
|
||||||
- name: ensure docker-ce repository is enabled
|
- name: Ensure docker-ce repository is enabled
|
||||||
apt_repository:
|
apt_repository:
|
||||||
repo: "{{ item }}"
|
repo: "{{ item }}"
|
||||||
state: present
|
state: present
|
||||||
|
@ -99,7 +101,7 @@
|
||||||
- docker-ce
|
- docker-ce
|
||||||
- docker-ce-cli
|
- docker-ce-cli
|
||||||
|
|
||||||
- name: ensure docker packages are installed
|
- name: Ensure docker packages are installed
|
||||||
package:
|
package:
|
||||||
name: "{{ docker_package_info.pkgs }}"
|
name: "{{ docker_package_info.pkgs }}"
|
||||||
state: "{{ docker_package_info.state | default('present') }}"
|
state: "{{ docker_package_info.state | default('present') }}"
|
||||||
|
@ -117,7 +119,7 @@
|
||||||
until: docker_task_result is succeeded
|
until: docker_task_result is succeeded
|
||||||
retries: 4
|
retries: 4
|
||||||
delay: "{{ retry_stagger | d(3) }}"
|
delay: "{{ retry_stagger | d(3) }}"
|
||||||
notify: restart docker
|
notify: Restart docker
|
||||||
when:
|
when:
|
||||||
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||||
- not is_ostree
|
- not is_ostree
|
||||||
|
@ -135,9 +137,9 @@
|
||||||
- docker-ce
|
- docker-ce
|
||||||
- docker-ce-cli
|
- docker-ce-cli
|
||||||
|
|
||||||
- name: ensure docker started, remove our config if docker start failed and try again
|
- name: Ensure docker started, remove our config if docker start failed and try again
|
||||||
block:
|
block:
|
||||||
- name: ensure service is started if docker packages are already present
|
- name: Ensure service is started if docker packages are already present
|
||||||
service:
|
service:
|
||||||
name: docker
|
name: docker
|
||||||
state: started
|
state: started
|
||||||
|
@ -145,7 +147,7 @@
|
||||||
rescue:
|
rescue:
|
||||||
- debug: # noqa name[missing]
|
- debug: # noqa name[missing]
|
||||||
msg: "Docker start failed. Try to remove our config"
|
msg: "Docker start failed. Try to remove our config"
|
||||||
- name: remove kubespray generated config
|
- name: Remove kubespray generated config
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
@ -154,13 +156,14 @@
|
||||||
- /etc/systemd/system/docker.service.d/docker-options.conf
|
- /etc/systemd/system/docker.service.d/docker-options.conf
|
||||||
- /etc/systemd/system/docker.service.d/docker-dns.conf
|
- /etc/systemd/system/docker.service.d/docker-dns.conf
|
||||||
- /etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf
|
- /etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf
|
||||||
notify: restart docker
|
notify: Restart docker
|
||||||
|
|
||||||
- name: flush handlers so we can wait for docker to come up
|
- name: Flush handlers so we can wait for docker to come up
|
||||||
meta: flush_handlers
|
meta: flush_handlers
|
||||||
|
|
||||||
# Install each plugin using a looped include to make error handling in the included task simpler.
|
# Install each plugin using a looped include to make error handling in the included task simpler.
|
||||||
- include_tasks: docker_plugin.yml
|
- name: Install docker plugin
|
||||||
|
include_tasks: docker_plugin.yml
|
||||||
loop: "{{ docker_plugins }}"
|
loop: "{{ docker_plugins }}"
|
||||||
loop_control:
|
loop_control:
|
||||||
loop_var: docker_plugin
|
loop_var: docker_plugin
|
||||||
|
@ -168,7 +171,7 @@
|
||||||
- name: Set docker systemd config
|
- name: Set docker systemd config
|
||||||
import_tasks: systemd.yml
|
import_tasks: systemd.yml
|
||||||
|
|
||||||
- name: ensure docker service is started and enabled
|
- name: Ensure docker service is started and enabled
|
||||||
service:
|
service:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
enabled: yes
|
enabled: yes
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
when: docker_packages_list | length>0
|
when: docker_packages_list | length>0
|
||||||
|
|
||||||
- name: reset | remove all containers
|
- name: Reset | remove all containers
|
||||||
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv"
|
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv"
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
|
|
|
@ -1,23 +1,23 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: set dns server for docker
|
- name: Set dns server for docker
|
||||||
set_fact:
|
set_fact:
|
||||||
docker_dns_servers: "{{ dns_servers }}"
|
docker_dns_servers: "{{ dns_servers }}"
|
||||||
|
|
||||||
- name: show docker_dns_servers
|
- name: Show docker_dns_servers
|
||||||
debug:
|
debug:
|
||||||
msg: "{{ docker_dns_servers }}"
|
msg: "{{ docker_dns_servers }}"
|
||||||
|
|
||||||
- name: add upstream dns servers
|
- name: Add upstream dns servers
|
||||||
set_fact:
|
set_fact:
|
||||||
docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers | default([]) }}"
|
docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers | default([]) }}"
|
||||||
when: dns_mode in ['coredns', 'coredns_dual']
|
when: dns_mode in ['coredns', 'coredns_dual']
|
||||||
|
|
||||||
- name: add global searchdomains
|
- name: Add global searchdomains
|
||||||
set_fact:
|
set_fact:
|
||||||
docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains | default([]) }}"
|
docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains | default([]) }}"
|
||||||
|
|
||||||
- name: check system nameservers
|
- name: Check system nameservers
|
||||||
shell: set -o pipefail && grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/'
|
shell: set -o pipefail && grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/'
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
|
@ -25,7 +25,7 @@
|
||||||
register: system_nameservers
|
register: system_nameservers
|
||||||
check_mode: no
|
check_mode: no
|
||||||
|
|
||||||
- name: check system search domains
|
- name: Check system search domains
|
||||||
# noqa risky-shell-pipe - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false
|
# noqa risky-shell-pipe - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false
|
||||||
# Therefore -o pipefail is not applicable in this specific instance
|
# Therefore -o pipefail is not applicable in this specific instance
|
||||||
shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
|
shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
|
||||||
|
@ -35,32 +35,32 @@
|
||||||
register: system_search_domains
|
register: system_search_domains
|
||||||
check_mode: no
|
check_mode: no
|
||||||
|
|
||||||
- name: add system nameservers to docker options
|
- name: Add system nameservers to docker options
|
||||||
set_fact:
|
set_fact:
|
||||||
docker_dns_servers: "{{ docker_dns_servers | union(system_nameservers.stdout_lines) | unique }}"
|
docker_dns_servers: "{{ docker_dns_servers | union(system_nameservers.stdout_lines) | unique }}"
|
||||||
when: system_nameservers.stdout
|
when: system_nameservers.stdout
|
||||||
|
|
||||||
- name: add system search domains to docker options
|
- name: Add system search domains to docker options
|
||||||
set_fact:
|
set_fact:
|
||||||
docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split() | default([])) | unique }}"
|
docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split() | default([])) | unique }}"
|
||||||
when: system_search_domains.stdout
|
when: system_search_domains.stdout
|
||||||
|
|
||||||
- name: check number of nameservers
|
- name: Check number of nameservers
|
||||||
fail:
|
fail:
|
||||||
msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=false in docker.yml and we will only use the first 3."
|
msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=false in docker.yml and we will only use the first 3."
|
||||||
when: docker_dns_servers | length > 3 and docker_dns_servers_strict | bool
|
when: docker_dns_servers | length > 3 and docker_dns_servers_strict | bool
|
||||||
|
|
||||||
- name: rtrim number of nameservers to 3
|
- name: Rtrim number of nameservers to 3
|
||||||
set_fact:
|
set_fact:
|
||||||
docker_dns_servers: "{{ docker_dns_servers[0:3] }}"
|
docker_dns_servers: "{{ docker_dns_servers[0:3] }}"
|
||||||
when: docker_dns_servers | length > 3 and not docker_dns_servers_strict | bool
|
when: docker_dns_servers | length > 3 and not docker_dns_servers_strict | bool
|
||||||
|
|
||||||
- name: check number of search domains
|
- name: Check number of search domains
|
||||||
fail:
|
fail:
|
||||||
msg: "Too many search domains"
|
msg: "Too many search domains"
|
||||||
when: docker_dns_search_domains | length > 6
|
when: docker_dns_search_domains | length > 6
|
||||||
|
|
||||||
- name: check length of search domains
|
- name: Check length of search domains
|
||||||
fail:
|
fail:
|
||||||
msg: "Search domains exceeded limit of 256 characters"
|
msg: "Search domains exceeded limit of 256 characters"
|
||||||
when: docker_dns_search_domains | join(' ') | length > 256
|
when: docker_dns_search_domains | join(' ') | length > 256
|
||||||
|
|
|
@ -10,10 +10,10 @@
|
||||||
src: http-proxy.conf.j2
|
src: http-proxy.conf.j2
|
||||||
dest: /etc/systemd/system/docker.service.d/http-proxy.conf
|
dest: /etc/systemd/system/docker.service.d/http-proxy.conf
|
||||||
mode: 0644
|
mode: 0644
|
||||||
notify: restart docker
|
notify: Restart docker
|
||||||
when: http_proxy is defined or https_proxy is defined
|
when: http_proxy is defined or https_proxy is defined
|
||||||
|
|
||||||
- name: get systemd version
|
- name: Get systemd version
|
||||||
# noqa command-instead-of-module - systemctl is called intentionally here
|
# noqa command-instead-of-module - systemctl is called intentionally here
|
||||||
shell: set -o pipefail && systemctl --version | head -n 1 | cut -d " " -f 2
|
shell: set -o pipefail && systemctl --version | head -n 1 | cut -d " " -f 2
|
||||||
args:
|
args:
|
||||||
|
@ -29,7 +29,7 @@
|
||||||
dest: /etc/systemd/system/docker.service
|
dest: /etc/systemd/system/docker.service
|
||||||
mode: 0644
|
mode: 0644
|
||||||
register: docker_service_file
|
register: docker_service_file
|
||||||
notify: restart docker
|
notify: Restart docker
|
||||||
when:
|
when:
|
||||||
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||||
- not is_fedora_coreos
|
- not is_fedora_coreos
|
||||||
|
@ -39,14 +39,14 @@
|
||||||
src: docker-options.conf.j2
|
src: docker-options.conf.j2
|
||||||
dest: "/etc/systemd/system/docker.service.d/docker-options.conf"
|
dest: "/etc/systemd/system/docker.service.d/docker-options.conf"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
notify: restart docker
|
notify: Restart docker
|
||||||
|
|
||||||
- name: Write docker dns systemd drop-in
|
- name: Write docker dns systemd drop-in
|
||||||
template:
|
template:
|
||||||
src: docker-dns.conf.j2
|
src: docker-dns.conf.j2
|
||||||
dest: "/etc/systemd/system/docker.service.d/docker-dns.conf"
|
dest: "/etc/systemd/system/docker.service.d/docker-dns.conf"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
notify: restart docker
|
notify: Restart docker
|
||||||
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
|
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
|
||||||
|
|
||||||
- name: Copy docker orphan clean up script to the node
|
- name: Copy docker orphan clean up script to the node
|
||||||
|
@ -61,7 +61,7 @@
|
||||||
src: docker-orphan-cleanup.conf.j2
|
src: docker-orphan-cleanup.conf.j2
|
||||||
dest: "/etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf"
|
dest: "/etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
notify: restart docker
|
notify: Restart docker
|
||||||
when: docker_orphan_clean_up | bool
|
when: docker_orphan_clean_up | bool
|
||||||
|
|
||||||
- name: Flush handlers
|
- name: Flush handlers
|
||||||
|
|
|
@ -8,7 +8,8 @@
|
||||||
- role: adduser
|
- role: adduser
|
||||||
user: "{{ addusers.kube }}"
|
user: "{{ addusers.kube }}"
|
||||||
tasks:
|
tasks:
|
||||||
- include_tasks: "../../../../download/tasks/download_file.yml"
|
- name: Download CNI
|
||||||
|
include_tasks: "../../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.cni) }}"
|
download: "{{ download_defaults | combine(downloads.cni) }}"
|
||||||
|
|
||||||
|
|
|
@ -1,15 +1,15 @@
|
||||||
---
|
---
|
||||||
- name: gVisor | Download runsc binary
|
- name: GVisor | Download runsc binary
|
||||||
include_tasks: "../../../download/tasks/download_file.yml"
|
include_tasks: "../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.gvisor_runsc) }}"
|
download: "{{ download_defaults | combine(downloads.gvisor_runsc) }}"
|
||||||
|
|
||||||
- name: gVisor | Download containerd-shim-runsc-v1 binary
|
- name: GVisor | Download containerd-shim-runsc-v1 binary
|
||||||
include_tasks: "../../../download/tasks/download_file.yml"
|
include_tasks: "../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.gvisor_containerd_shim) }}"
|
download: "{{ download_defaults | combine(downloads.gvisor_containerd_shim) }}"
|
||||||
|
|
||||||
- name: gVisor | Copy binaries
|
- name: GVisor | Copy binaries
|
||||||
copy:
|
copy:
|
||||||
src: "{{ item.src }}"
|
src: "{{ item.src }}"
|
||||||
dest: "{{ bin_dir }}/{{ item.dest }}"
|
dest: "{{ bin_dir }}/{{ item.dest }}"
|
||||||
|
|
|
@ -8,7 +8,8 @@
|
||||||
- role: adduser
|
- role: adduser
|
||||||
user: "{{ addusers.kube }}"
|
user: "{{ addusers.kube }}"
|
||||||
tasks:
|
tasks:
|
||||||
- include_tasks: "../../../../download/tasks/download_file.yml"
|
- name: Download CNI
|
||||||
|
include_tasks: "../../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.cni) }}"
|
download: "{{ download_defaults | combine(downloads.cni) }}"
|
||||||
|
|
||||||
|
|
|
@ -1,23 +1,23 @@
|
||||||
---
|
---
|
||||||
- name: kata-containers | Download kata binary
|
- name: Kata-containers | Download kata binary
|
||||||
include_tasks: "../../../download/tasks/download_file.yml"
|
include_tasks: "../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.kata_containers) }}"
|
download: "{{ download_defaults | combine(downloads.kata_containers) }}"
|
||||||
|
|
||||||
- name: kata-containers | Copy kata-containers binary
|
- name: Kata-containers | Copy kata-containers binary
|
||||||
unarchive:
|
unarchive:
|
||||||
src: "{{ downloads.kata_containers.dest }}"
|
src: "{{ downloads.kata_containers.dest }}"
|
||||||
dest: "/"
|
dest: "/"
|
||||||
mode: 0755
|
mode: 0755
|
||||||
remote_src: yes
|
remote_src: yes
|
||||||
|
|
||||||
- name: kata-containers | Create config directory
|
- name: Kata-containers | Create config directory
|
||||||
file:
|
file:
|
||||||
path: "{{ kata_containers_config_dir }}"
|
path: "{{ kata_containers_config_dir }}"
|
||||||
state: directory
|
state: directory
|
||||||
mode: 0755
|
mode: 0755
|
||||||
|
|
||||||
- name: kata-containers | Set configuration
|
- name: Kata-containers | Set configuration
|
||||||
template:
|
template:
|
||||||
src: "{{ item }}.j2"
|
src: "{{ item }}.j2"
|
||||||
dest: "{{ kata_containers_config_dir }}/{{ item }}"
|
dest: "{{ kata_containers_config_dir }}/{{ item }}"
|
||||||
|
@ -25,7 +25,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- configuration-qemu.toml
|
- configuration-qemu.toml
|
||||||
|
|
||||||
- name: kata-containers | Set containerd bin
|
- name: Kata-containers | Set containerd bin
|
||||||
vars:
|
vars:
|
||||||
shim: "{{ item }}"
|
shim: "{{ item }}"
|
||||||
template:
|
template:
|
||||||
|
@ -35,7 +35,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- qemu
|
- qemu
|
||||||
|
|
||||||
- name: kata-containers | Load vhost kernel modules
|
- name: Kata-containers | Load vhost kernel modules
|
||||||
community.general.modprobe:
|
community.general.modprobe:
|
||||||
state: present
|
state: present
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
|
@ -43,7 +43,7 @@
|
||||||
- vhost_vsock
|
- vhost_vsock
|
||||||
- vhost_net
|
- vhost_net
|
||||||
|
|
||||||
- name: kata-containers | Persist vhost kernel modules
|
- name: Kata-containers | Persist vhost kernel modules
|
||||||
copy:
|
copy:
|
||||||
dest: /etc/modules-load.d/kubespray-kata-containers.conf
|
dest: /etc/modules-load.d/kubespray-kata-containers.conf
|
||||||
mode: 0644
|
mode: 0644
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
---
|
---
|
||||||
- name: nerdctl | Download nerdctl
|
- name: Nerdctl | Download nerdctl
|
||||||
include_tasks: "../../../download/tasks/download_file.yml"
|
include_tasks: "../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.nerdctl) }}"
|
download: "{{ download_defaults | combine(downloads.nerdctl) }}"
|
||||||
|
|
||||||
- name: nerdctl | Copy nerdctl binary from download dir
|
- name: Nerdctl | Copy nerdctl binary from download dir
|
||||||
copy:
|
copy:
|
||||||
src: "{{ local_release_dir }}/nerdctl"
|
src: "{{ local_release_dir }}/nerdctl"
|
||||||
dest: "{{ bin_dir }}/nerdctl"
|
dest: "{{ bin_dir }}/nerdctl"
|
||||||
|
@ -17,7 +17,7 @@
|
||||||
- Get nerdctl completion
|
- Get nerdctl completion
|
||||||
- Install nerdctl completion
|
- Install nerdctl completion
|
||||||
|
|
||||||
- name: nerdctl | Create configuration dir
|
- name: Nerdctl | Create configuration dir
|
||||||
file:
|
file:
|
||||||
path: /etc/nerdctl
|
path: /etc/nerdctl
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -26,7 +26,7 @@
|
||||||
group: root
|
group: root
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
- name: nerdctl | Install nerdctl configuration
|
- name: Nerdctl | Install nerdctl configuration
|
||||||
template:
|
template:
|
||||||
src: nerdctl.toml.j2
|
src: nerdctl.toml.j2
|
||||||
dest: /etc/nerdctl/nerdctl.toml
|
dest: /etc/nerdctl/nerdctl.toml
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: runc | check if fedora coreos
|
- name: Runc | check if fedora coreos
|
||||||
stat:
|
stat:
|
||||||
path: /run/ostree-booted
|
path: /run/ostree-booted
|
||||||
get_attributes: no
|
get_attributes: no
|
||||||
|
@ -7,18 +7,18 @@
|
||||||
get_mime: no
|
get_mime: no
|
||||||
register: ostree
|
register: ostree
|
||||||
|
|
||||||
- name: runc | set is_ostree
|
- name: Runc | set is_ostree
|
||||||
set_fact:
|
set_fact:
|
||||||
is_ostree: "{{ ostree.stat.exists }}"
|
is_ostree: "{{ ostree.stat.exists }}"
|
||||||
|
|
||||||
- name: runc | Uninstall runc package managed by package manager
|
- name: Runc | Uninstall runc package managed by package manager
|
||||||
package:
|
package:
|
||||||
name: "{{ runc_package_name }}"
|
name: "{{ runc_package_name }}"
|
||||||
state: absent
|
state: absent
|
||||||
when:
|
when:
|
||||||
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
|
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
|
||||||
|
|
||||||
- name: runc | Download runc binary
|
- name: Runc | Download runc binary
|
||||||
include_tasks: "../../../download/tasks/download_file.yml"
|
include_tasks: "../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.runc) }}"
|
download: "{{ download_defaults | combine(downloads.runc) }}"
|
||||||
|
@ -30,7 +30,7 @@
|
||||||
mode: 0755
|
mode: 0755
|
||||||
remote_src: true
|
remote_src: true
|
||||||
|
|
||||||
- name: runc | Remove orphaned binary
|
- name: Runc | Remove orphaned binary
|
||||||
file:
|
file:
|
||||||
path: /usr/bin/runc
|
path: /usr/bin/runc
|
||||||
state: absent
|
state: absent
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: skopeo | check if fedora coreos
|
- name: Skopeo | check if fedora coreos
|
||||||
stat:
|
stat:
|
||||||
path: /run/ostree-booted
|
path: /run/ostree-booted
|
||||||
get_attributes: no
|
get_attributes: no
|
||||||
|
@ -7,11 +7,11 @@
|
||||||
get_mime: no
|
get_mime: no
|
||||||
register: ostree
|
register: ostree
|
||||||
|
|
||||||
- name: skopeo | set is_ostree
|
- name: Skopeo | set is_ostree
|
||||||
set_fact:
|
set_fact:
|
||||||
is_ostree: "{{ ostree.stat.exists }}"
|
is_ostree: "{{ ostree.stat.exists }}"
|
||||||
|
|
||||||
- name: skopeo | Uninstall skopeo package managed by package manager
|
- name: Skopeo | Uninstall skopeo package managed by package manager
|
||||||
package:
|
package:
|
||||||
name: skopeo
|
name: skopeo
|
||||||
state: absent
|
state: absent
|
||||||
|
@ -19,7 +19,7 @@
|
||||||
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
|
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
|
|
||||||
- name: skopeo | Download skopeo binary
|
- name: Skopeo | Download skopeo binary
|
||||||
include_tasks: "../../../download/tasks/download_file.yml"
|
include_tasks: "../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.skopeo) }}"
|
download: "{{ download_defaults | combine(downloads.skopeo) }}"
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: validate-container-engine | check if fedora coreos
|
- name: Validate-container-engine | check if fedora coreos
|
||||||
stat:
|
stat:
|
||||||
path: /run/ostree-booted
|
path: /run/ostree-booted
|
||||||
get_attributes: no
|
get_attributes: no
|
||||||
|
@ -9,7 +9,7 @@
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
- name: validate-container-engine | set is_ostree
|
- name: Validate-container-engine | set is_ostree
|
||||||
set_fact:
|
set_fact:
|
||||||
is_ostree: "{{ ostree.stat.exists }}"
|
is_ostree: "{{ ostree.stat.exists }}"
|
||||||
tags:
|
tags:
|
||||||
|
|
|
@ -8,7 +8,8 @@
|
||||||
- role: adduser
|
- role: adduser
|
||||||
user: "{{ addusers.kube }}"
|
user: "{{ addusers.kube }}"
|
||||||
tasks:
|
tasks:
|
||||||
- include_tasks: "../../../../download/tasks/download_file.yml"
|
- name: Download CNI
|
||||||
|
include_tasks: "../../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.cni) }}"
|
download: "{{ download_defaults | combine(downloads.cni) }}"
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
---
|
---
|
||||||
- name: youki | Download youki
|
- name: Youki | Download youki
|
||||||
include_tasks: "../../../download/tasks/download_file.yml"
|
include_tasks: "../../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.youki) }}"
|
download: "{{ download_defaults | combine(downloads.youki) }}"
|
||||||
|
|
||||||
- name: youki | Copy youki binary from download dir
|
- name: Youki | Copy youki binary from download dir
|
||||||
copy:
|
copy:
|
||||||
src: "{{ local_release_dir }}/youki_v{{ youki_version | regex_replace('\\.', '_') }}_linux/youki-v{{ youki_version }}/youki"
|
src: "{{ local_release_dir }}/youki_v{{ youki_version | regex_replace('\\.', '_') }}_linux/youki-v{{ youki_version }}/youki"
|
||||||
dest: "{{ youki_bin_dir }}/youki"
|
dest: "{{ youki_bin_dir }}/youki"
|
||||||
|
|
|
@ -1,20 +1,20 @@
|
||||||
---
|
---
|
||||||
# The image_info_command depends on the Container Runtime and will output something like the following:
|
# The image_info_command depends on the Container Runtime and will output something like the following:
|
||||||
# nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
|
# nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
|
||||||
- name: check_pull_required | Generate a list of information about the images on a node # noqa command-instead-of-shell - image_info_command contains a pipe, therefore requiring shell
|
- name: Check_pull_required | Generate a list of information about the images on a node # noqa command-instead-of-shell - image_info_command contains a pipe, therefore requiring shell
|
||||||
shell: "{{ image_info_command }}"
|
shell: "{{ image_info_command }}"
|
||||||
register: docker_images
|
register: docker_images
|
||||||
changed_when: false
|
changed_when: false
|
||||||
check_mode: no
|
check_mode: no
|
||||||
when: not download_always_pull
|
when: not download_always_pull
|
||||||
|
|
||||||
- name: check_pull_required | Set pull_required if the desired image is not yet loaded
|
- name: Check_pull_required | Set pull_required if the desired image is not yet loaded
|
||||||
set_fact:
|
set_fact:
|
||||||
pull_required: >-
|
pull_required: >-
|
||||||
{%- if image_reponame | regex_replace('^docker\.io/(library/)?', '') in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%}
|
{%- if image_reponame | regex_replace('^docker\.io/(library/)?', '') in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%}
|
||||||
when: not download_always_pull
|
when: not download_always_pull
|
||||||
|
|
||||||
- name: check_pull_required | Check that the local digest sha256 corresponds to the given image tag
|
- name: Check_pull_required | Check that the local digest sha256 corresponds to the given image tag
|
||||||
assert:
|
assert:
|
||||||
that: "{{ download.repo }}:{{ download.tag }} in docker_images.stdout.split(',')"
|
that: "{{ download.repo }}:{{ download.tag }} in docker_images.stdout.split(',')"
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
- block:
|
- block:
|
||||||
- name: set default values for flag variables
|
- name: Set default values for flag variables
|
||||||
set_fact:
|
set_fact:
|
||||||
image_is_cached: false
|
image_is_cached: false
|
||||||
image_changed: false
|
image_changed: false
|
||||||
|
@ -8,12 +8,12 @@
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
- name: download_container | Set a few facts
|
- name: Download_container | Set a few facts
|
||||||
import_tasks: set_container_facts.yml
|
import_tasks: set_container_facts.yml
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
- name: download_container | Prepare container download
|
- name: Download_container | Prepare container download
|
||||||
include_tasks: check_pull_required.yml
|
include_tasks: check_pull_required.yml
|
||||||
when:
|
when:
|
||||||
- not download_always_pull
|
- not download_always_pull
|
||||||
|
@ -21,7 +21,7 @@
|
||||||
- debug: # noqa name[missing]
|
- debug: # noqa name[missing]
|
||||||
msg: "Pull {{ image_reponame }} required is: {{ pull_required }}"
|
msg: "Pull {{ image_reponame }} required is: {{ pull_required }}"
|
||||||
|
|
||||||
- name: download_container | Determine if image is in cache
|
- name: Download_container | Determine if image is in cache
|
||||||
stat:
|
stat:
|
||||||
path: "{{ image_path_cached }}"
|
path: "{{ image_path_cached }}"
|
||||||
get_attributes: no
|
get_attributes: no
|
||||||
|
@ -36,7 +36,7 @@
|
||||||
when:
|
when:
|
||||||
- download_force_cache
|
- download_force_cache
|
||||||
|
|
||||||
- name: download_container | Set fact indicating if image is in cache
|
- name: Download_container | Set fact indicating if image is in cache
|
||||||
set_fact:
|
set_fact:
|
||||||
image_is_cached: "{{ cache_image.stat.exists }}"
|
image_is_cached: "{{ cache_image.stat.exists }}"
|
||||||
tags:
|
tags:
|
||||||
|
@ -52,7 +52,7 @@
|
||||||
- download_force_cache
|
- download_force_cache
|
||||||
- not download_run_once
|
- not download_run_once
|
||||||
|
|
||||||
- name: download_container | Download image if required
|
- name: Download_container | Download image if required
|
||||||
command: "{{ image_pull_command_on_localhost if download_localhost else image_pull_command }} {{ image_reponame }}"
|
command: "{{ image_pull_command_on_localhost if download_localhost else image_pull_command }} {{ image_reponame }}"
|
||||||
delegate_to: "{{ download_delegate if download_run_once else inventory_hostname }}"
|
delegate_to: "{{ download_delegate if download_run_once else inventory_hostname }}"
|
||||||
delegate_facts: yes
|
delegate_facts: yes
|
||||||
|
@ -67,7 +67,7 @@
|
||||||
- pull_required or download_run_once
|
- pull_required or download_run_once
|
||||||
- not image_is_cached
|
- not image_is_cached
|
||||||
|
|
||||||
- name: download_container | Save and compress image
|
- name: Download_container | Save and compress image
|
||||||
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa command-instead-of-shell - image_save_command_on_localhost contains a pipe, therefore requires shell
|
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa command-instead-of-shell - image_save_command_on_localhost contains a pipe, therefore requires shell
|
||||||
delegate_to: "{{ download_delegate }}"
|
delegate_to: "{{ download_delegate }}"
|
||||||
delegate_facts: no
|
delegate_facts: no
|
||||||
|
@ -79,7 +79,7 @@
|
||||||
- not image_is_cached
|
- not image_is_cached
|
||||||
- download_run_once
|
- download_run_once
|
||||||
|
|
||||||
- name: download_container | Copy image to ansible host cache
|
- name: Download_container | Copy image to ansible host cache
|
||||||
ansible.posix.synchronize:
|
ansible.posix.synchronize:
|
||||||
src: "{{ image_path_final }}"
|
src: "{{ image_path_final }}"
|
||||||
dest: "{{ image_path_cached }}"
|
dest: "{{ image_path_cached }}"
|
||||||
|
@ -91,7 +91,7 @@
|
||||||
- not download_localhost
|
- not download_localhost
|
||||||
- download_delegate == inventory_hostname
|
- download_delegate == inventory_hostname
|
||||||
|
|
||||||
- name: download_container | Upload image to node if it is cached
|
- name: Download_container | Upload image to node if it is cached
|
||||||
ansible.posix.synchronize:
|
ansible.posix.synchronize:
|
||||||
src: "{{ image_path_cached }}"
|
src: "{{ image_path_cached }}"
|
||||||
dest: "{{ image_path_final }}"
|
dest: "{{ image_path_final }}"
|
||||||
|
@ -107,7 +107,7 @@
|
||||||
- pull_required
|
- pull_required
|
||||||
- download_force_cache
|
- download_force_cache
|
||||||
|
|
||||||
- name: download_container | Load image into the local container registry
|
- name: Download_container | Load image into the local container registry
|
||||||
shell: "{{ image_load_command }}" # noqa command-instead-of-shell - image_load_command uses pipes, therefore requires shell
|
shell: "{{ image_load_command }}" # noqa command-instead-of-shell - image_load_command uses pipes, therefore requires shell
|
||||||
register: container_load_status
|
register: container_load_status
|
||||||
failed_when: container_load_status is failed
|
failed_when: container_load_status is failed
|
||||||
|
@ -115,7 +115,7 @@
|
||||||
- pull_required
|
- pull_required
|
||||||
- download_force_cache
|
- download_force_cache
|
||||||
|
|
||||||
- name: download_container | Remove container image from cache
|
- name: Download_container | Remove container image from cache
|
||||||
file:
|
file:
|
||||||
state: absent
|
state: absent
|
||||||
path: "{{ image_path_final }}"
|
path: "{{ image_path_final }}"
|
||||||
|
|
|
@ -1,21 +1,22 @@
|
||||||
---
|
---
|
||||||
- block:
|
- name: "Download_file | download {{ download.dest }}"
|
||||||
- name: prep_download | Set a few facts
|
block:
|
||||||
|
- name: Prep_download | Set a few facts
|
||||||
set_fact:
|
set_fact:
|
||||||
download_force_cache: "{{ true if download_run_once else download_force_cache }}"
|
download_force_cache: "{{ true if download_run_once else download_force_cache }}"
|
||||||
|
|
||||||
- name: download_file | Starting download of file
|
- name: Download_file | Starting download of file
|
||||||
debug:
|
debug:
|
||||||
msg: "{{ download.url }}"
|
msg: "{{ download.url }}"
|
||||||
run_once: "{{ download_run_once }}"
|
run_once: "{{ download_run_once }}"
|
||||||
|
|
||||||
- name: download_file | Set pathname of cached file
|
- name: Download_file | Set pathname of cached file
|
||||||
set_fact:
|
set_fact:
|
||||||
file_path_cached: "{{ download_cache_dir }}/{{ download.dest | basename }}"
|
file_path_cached: "{{ download_cache_dir }}/{{ download.dest | basename }}"
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
- name: download_file | Create dest directory on node
|
- name: Download_file | Create dest directory on node
|
||||||
file:
|
file:
|
||||||
path: "{{ download.dest | dirname }}"
|
path: "{{ download.dest | dirname }}"
|
||||||
owner: "{{ download.owner | default(omit) }}"
|
owner: "{{ download.owner | default(omit) }}"
|
||||||
|
@ -23,7 +24,7 @@
|
||||||
state: directory
|
state: directory
|
||||||
recurse: yes
|
recurse: yes
|
||||||
|
|
||||||
- name: download_file | Create local cache directory
|
- name: Download_file | Create local cache directory
|
||||||
file:
|
file:
|
||||||
path: "{{ file_path_cached | dirname }}"
|
path: "{{ file_path_cached | dirname }}"
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -38,7 +39,7 @@
|
||||||
tags:
|
tags:
|
||||||
- localhost
|
- localhost
|
||||||
|
|
||||||
- name: download_file | Create cache directory on download_delegate host
|
- name: Download_file | Create cache directory on download_delegate host
|
||||||
file:
|
file:
|
||||||
path: "{{ file_path_cached | dirname }}"
|
path: "{{ file_path_cached | dirname }}"
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -52,7 +53,7 @@
|
||||||
|
|
||||||
# We check a number of mirrors that may hold the file and pick a working one at random
|
# We check a number of mirrors that may hold the file and pick a working one at random
|
||||||
# This task will avoid logging it's parameters to not leak environment passwords in the log
|
# This task will avoid logging it's parameters to not leak environment passwords in the log
|
||||||
- name: download_file | Validate mirrors
|
- name: Download_file | Validate mirrors
|
||||||
uri:
|
uri:
|
||||||
url: "{{ mirror }}"
|
url: "{{ mirror }}"
|
||||||
method: HEAD
|
method: HEAD
|
||||||
|
@ -75,14 +76,14 @@
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
# Ansible 2.9 requires we convert a generator to a list
|
# Ansible 2.9 requires we convert a generator to a list
|
||||||
- name: download_file | Get the list of working mirrors
|
- name: Download_file | Get the list of working mirrors
|
||||||
set_fact:
|
set_fact:
|
||||||
valid_mirror_urls: "{{ uri_result.results | selectattr('failed', 'eq', False) | map(attribute='mirror') | list }}"
|
valid_mirror_urls: "{{ uri_result.results | selectattr('failed', 'eq', False) | map(attribute='mirror') | list }}"
|
||||||
delegate_to: "{{ download_delegate if download_force_cache else inventory_hostname }}"
|
delegate_to: "{{ download_delegate if download_force_cache else inventory_hostname }}"
|
||||||
|
|
||||||
# This must always be called, to check if the checksum matches. On no-match the file is re-downloaded.
|
# This must always be called, to check if the checksum matches. On no-match the file is re-downloaded.
|
||||||
# This task will avoid logging it's parameters to not leak environment passwords in the log
|
# This task will avoid logging it's parameters to not leak environment passwords in the log
|
||||||
- name: download_file | Download item
|
- name: Download_file | Download item
|
||||||
get_url:
|
get_url:
|
||||||
url: "{{ valid_mirror_urls | random }}"
|
url: "{{ valid_mirror_urls | random }}"
|
||||||
dest: "{{ file_path_cached if download_force_cache else download.dest }}"
|
dest: "{{ file_path_cached if download_force_cache else download.dest }}"
|
||||||
|
@ -104,7 +105,7 @@
|
||||||
environment: "{{ proxy_env }}"
|
environment: "{{ proxy_env }}"
|
||||||
no_log: "{{ not (unsafe_show_logs | bool) }}"
|
no_log: "{{ not (unsafe_show_logs | bool) }}"
|
||||||
|
|
||||||
- name: download_file | Copy file back to ansible host file cache
|
- name: Download_file | Copy file back to ansible host file cache
|
||||||
ansible.posix.synchronize:
|
ansible.posix.synchronize:
|
||||||
src: "{{ file_path_cached }}"
|
src: "{{ file_path_cached }}"
|
||||||
dest: "{{ file_path_cached }}"
|
dest: "{{ file_path_cached }}"
|
||||||
|
@ -115,7 +116,7 @@
|
||||||
- not download_localhost
|
- not download_localhost
|
||||||
- download_delegate == inventory_hostname
|
- download_delegate == inventory_hostname
|
||||||
|
|
||||||
- name: download_file | Copy file from cache to nodes, if it is available
|
- name: Download_file | Copy file from cache to nodes, if it is available
|
||||||
ansible.posix.synchronize:
|
ansible.posix.synchronize:
|
||||||
src: "{{ file_path_cached }}"
|
src: "{{ file_path_cached }}"
|
||||||
dest: "{{ download.dest }}"
|
dest: "{{ download.dest }}"
|
||||||
|
@ -128,7 +129,7 @@
|
||||||
when:
|
when:
|
||||||
- download_force_cache
|
- download_force_cache
|
||||||
|
|
||||||
- name: download_file | Set mode and owner
|
- name: Download_file | Set mode and owner
|
||||||
file:
|
file:
|
||||||
path: "{{ download.dest }}"
|
path: "{{ download.dest }}"
|
||||||
mode: "{{ download.mode | default(omit) }}"
|
mode: "{{ download.mode | default(omit) }}"
|
||||||
|
@ -136,7 +137,7 @@
|
||||||
when:
|
when:
|
||||||
- download_force_cache
|
- download_force_cache
|
||||||
|
|
||||||
- name: "download_file | Extract file archives"
|
- name: "Download_file | Extract file archives"
|
||||||
include_tasks: "extract_file.yml"
|
include_tasks: "extract_file.yml"
|
||||||
|
|
||||||
tags:
|
tags:
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: extract_file | Unpacking archive
|
- name: Extract_file | Unpacking archive
|
||||||
unarchive:
|
unarchive:
|
||||||
src: "{{ download.dest }}"
|
src: "{{ download.dest }}"
|
||||||
dest: "{{ download.dest | dirname }}"
|
dest: "{{ download.dest | dirname }}"
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: download | Prepare working directories and variables
|
- name: Download | Prepare working directories and variables
|
||||||
import_tasks: prep_download.yml
|
import_tasks: prep_download.yml
|
||||||
when:
|
when:
|
||||||
- not skip_downloads | default(false)
|
- not skip_downloads | default(false)
|
||||||
|
@ -7,7 +7,7 @@
|
||||||
- download
|
- download
|
||||||
- upload
|
- upload
|
||||||
|
|
||||||
- name: download | Get kubeadm binary and list of required images
|
- name: Download | Get kubeadm binary and list of required images
|
||||||
include_tasks: prep_kubeadm_images.yml
|
include_tasks: prep_kubeadm_images.yml
|
||||||
when:
|
when:
|
||||||
- not skip_downloads | default(false)
|
- not skip_downloads | default(false)
|
||||||
|
@ -16,7 +16,7 @@
|
||||||
- download
|
- download
|
||||||
- upload
|
- upload
|
||||||
|
|
||||||
- name: download | Download files / images
|
- name: Download | Download files / images
|
||||||
include_tasks: "{{ include_file }}"
|
include_tasks: "{{ include_file }}"
|
||||||
loop: "{{ downloads | combine(kubeadm_images) | dict2items }}"
|
loop: "{{ downloads | combine(kubeadm_images) | dict2items }}"
|
||||||
vars:
|
vars:
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
---
|
---
|
||||||
- name: prep_download | Set a few facts
|
- name: Prep_download | Set a few facts
|
||||||
set_fact:
|
set_fact:
|
||||||
download_force_cache: "{{ true if download_run_once else download_force_cache }}"
|
download_force_cache: "{{ true if download_run_once else download_force_cache }}"
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
- name: prep_download | On localhost, check if passwordless root is possible
|
- name: Prep_download | On localhost, check if passwordless root is possible
|
||||||
command: "true"
|
command: "true"
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
connection: local
|
connection: local
|
||||||
|
@ -20,7 +20,7 @@
|
||||||
- localhost
|
- localhost
|
||||||
- asserts
|
- asserts
|
||||||
|
|
||||||
- name: prep_download | On localhost, check if user has access to the container runtime without using sudo
|
- name: Prep_download | On localhost, check if user has access to the container runtime without using sudo
|
||||||
shell: "{{ image_info_command_on_localhost }}" # noqa command-instead-of-shell - image_info_command_on_localhost contains pipe, therefore requires shell
|
shell: "{{ image_info_command_on_localhost }}" # noqa command-instead-of-shell - image_info_command_on_localhost contains pipe, therefore requires shell
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
connection: local
|
connection: local
|
||||||
|
@ -35,7 +35,7 @@
|
||||||
- localhost
|
- localhost
|
||||||
- asserts
|
- asserts
|
||||||
|
|
||||||
- name: prep_download | Parse the outputs of the previous commands
|
- name: Prep_download | Parse the outputs of the previous commands
|
||||||
set_fact:
|
set_fact:
|
||||||
user_in_docker_group: "{{ not test_docker.failed }}"
|
user_in_docker_group: "{{ not test_docker.failed }}"
|
||||||
user_can_become_root: "{{ not test_become.failed }}"
|
user_can_become_root: "{{ not test_become.failed }}"
|
||||||
|
@ -45,7 +45,7 @@
|
||||||
- localhost
|
- localhost
|
||||||
- asserts
|
- asserts
|
||||||
|
|
||||||
- name: prep_download | Check that local user is in group or can become root
|
- name: Prep_download | Check that local user is in group or can become root
|
||||||
assert:
|
assert:
|
||||||
that: "user_in_docker_group or user_can_become_root"
|
that: "user_in_docker_group or user_can_become_root"
|
||||||
msg: >-
|
msg: >-
|
||||||
|
@ -56,7 +56,7 @@
|
||||||
- localhost
|
- localhost
|
||||||
- asserts
|
- asserts
|
||||||
|
|
||||||
- name: prep_download | Register docker images info
|
- name: Prep_download | Register docker images info
|
||||||
shell: "{{ image_info_command }}" # noqa command-instead-of-shell - image_info_command contains pipe therefore requires shell
|
shell: "{{ image_info_command }}" # noqa command-instead-of-shell - image_info_command contains pipe therefore requires shell
|
||||||
no_log: "{{ not (unsafe_show_logs | bool) }}"
|
no_log: "{{ not (unsafe_show_logs | bool) }}"
|
||||||
register: docker_images
|
register: docker_images
|
||||||
|
@ -65,7 +65,7 @@
|
||||||
check_mode: no
|
check_mode: no
|
||||||
when: download_container
|
when: download_container
|
||||||
|
|
||||||
- name: prep_download | Create staging directory on remote node
|
- name: Prep_download | Create staging directory on remote node
|
||||||
file:
|
file:
|
||||||
path: "{{ local_release_dir }}/images"
|
path: "{{ local_release_dir }}/images"
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -75,7 +75,7 @@
|
||||||
when:
|
when:
|
||||||
- ansible_os_family not in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
- ansible_os_family not in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||||
|
|
||||||
- name: prep_download | Create local cache for files and images on control node
|
- name: Prep_download | Create local cache for files and images on control node
|
||||||
file:
|
file:
|
||||||
path: "{{ download_cache_dir }}/images"
|
path: "{{ download_cache_dir }}/images"
|
||||||
state: directory
|
state: directory
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
---
|
---
|
||||||
- name: prep_kubeadm_images | Check kubeadm version matches kubernetes version
|
- name: Prep_kubeadm_images | Check kubeadm version matches kubernetes version
|
||||||
fail:
|
fail:
|
||||||
msg: "Kubeadm version {{ kubeadm_version }} do not matches kubernetes {{ kube_version }}"
|
msg: "Kubeadm version {{ kubeadm_version }} do not matches kubernetes {{ kube_version }}"
|
||||||
when:
|
when:
|
||||||
- not skip_downloads | default(false)
|
- not skip_downloads | default(false)
|
||||||
- not kubeadm_version == downloads.kubeadm.version
|
- not kubeadm_version == downloads.kubeadm.version
|
||||||
|
|
||||||
- name: prep_kubeadm_images | Download kubeadm binary
|
- name: Prep_kubeadm_images | Download kubeadm binary
|
||||||
include_tasks: "download_file.yml"
|
include_tasks: "download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.kubeadm) }}"
|
download: "{{ download_defaults | combine(downloads.kubeadm) }}"
|
||||||
|
@ -14,7 +14,7 @@
|
||||||
- not skip_downloads | default(false)
|
- not skip_downloads | default(false)
|
||||||
- downloads.kubeadm.enabled
|
- downloads.kubeadm.enabled
|
||||||
|
|
||||||
- name: prep_kubeadm_images | Create kubeadm config
|
- name: Prep_kubeadm_images | Create kubeadm config
|
||||||
template:
|
template:
|
||||||
src: "kubeadm-images.yaml.j2"
|
src: "kubeadm-images.yaml.j2"
|
||||||
dest: "{{ kube_config_dir }}/kubeadm-images.yaml"
|
dest: "{{ kube_config_dir }}/kubeadm-images.yaml"
|
||||||
|
@ -22,20 +22,20 @@
|
||||||
when:
|
when:
|
||||||
- not skip_kubeadm_images | default(false)
|
- not skip_kubeadm_images | default(false)
|
||||||
|
|
||||||
- name: prep_kubeadm_images | Copy kubeadm binary from download dir to system path
|
- name: Prep_kubeadm_images | Copy kubeadm binary from download dir to system path
|
||||||
copy:
|
copy:
|
||||||
src: "{{ downloads.kubeadm.dest }}"
|
src: "{{ downloads.kubeadm.dest }}"
|
||||||
dest: "{{ bin_dir }}/kubeadm"
|
dest: "{{ bin_dir }}/kubeadm"
|
||||||
mode: 0755
|
mode: 0755
|
||||||
remote_src: true
|
remote_src: true
|
||||||
|
|
||||||
- name: prep_kubeadm_images | Set kubeadm binary permissions
|
- name: Prep_kubeadm_images | Set kubeadm binary permissions
|
||||||
file:
|
file:
|
||||||
path: "{{ bin_dir }}/kubeadm"
|
path: "{{ bin_dir }}/kubeadm"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
state: file
|
state: file
|
||||||
|
|
||||||
- name: prep_kubeadm_images | Generate list of required images
|
- name: Prep_kubeadm_images | Generate list of required images
|
||||||
shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns|pause'"
|
shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns|pause'"
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
|
@ -45,7 +45,7 @@
|
||||||
when:
|
when:
|
||||||
- not skip_kubeadm_images | default(false)
|
- not skip_kubeadm_images | default(false)
|
||||||
|
|
||||||
- name: prep_kubeadm_images | Parse list of images
|
- name: Prep_kubeadm_images | Parse list of images
|
||||||
vars:
|
vars:
|
||||||
kubeadm_images_list: "{{ kubeadm_images_raw.stdout_lines }}"
|
kubeadm_images_list: "{{ kubeadm_images_raw.stdout_lines }}"
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -63,7 +63,7 @@
|
||||||
when:
|
when:
|
||||||
- not skip_kubeadm_images | default(false)
|
- not skip_kubeadm_images | default(false)
|
||||||
|
|
||||||
- name: prep_kubeadm_images | Convert list of images to dict for later use
|
- name: Prep_kubeadm_images | Convert list of images to dict for later use
|
||||||
set_fact:
|
set_fact:
|
||||||
kubeadm_images: "{{ kubeadm_images_cooked.results | map(attribute='ansible_facts.kubeadm_image') | list | items2dict }}"
|
kubeadm_images: "{{ kubeadm_images_cooked.results | map(attribute='ansible_facts.kubeadm_image') | list | items2dict }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
|
@ -1,22 +1,22 @@
|
||||||
---
|
---
|
||||||
- name: set_container_facts | Display the name of the image being processed
|
- name: Set_container_facts | Display the name of the image being processed
|
||||||
debug:
|
debug:
|
||||||
msg: "{{ download.repo }}"
|
msg: "{{ download.repo }}"
|
||||||
|
|
||||||
- name: set_container_facts | Set if containers should be pulled by digest
|
- name: Set_container_facts | Set if containers should be pulled by digest
|
||||||
set_fact:
|
set_fact:
|
||||||
pull_by_digest: "{{ download.sha256 is defined and download.sha256 }}"
|
pull_by_digest: "{{ download.sha256 is defined and download.sha256 }}"
|
||||||
|
|
||||||
- name: set_container_facts | Define by what name to pull the image
|
- name: Set_container_facts | Define by what name to pull the image
|
||||||
set_fact:
|
set_fact:
|
||||||
image_reponame: >-
|
image_reponame: >-
|
||||||
{%- if pull_by_digest %}{{ download.repo }}@sha256:{{ download.sha256 }}{%- else -%}{{ download.repo }}:{{ download.tag }}{%- endif -%}
|
{%- if pull_by_digest %}{{ download.repo }}@sha256:{{ download.sha256 }}{%- else -%}{{ download.repo }}:{{ download.tag }}{%- endif -%}
|
||||||
|
|
||||||
- name: set_container_facts | Define file name of image
|
- name: Set_container_facts | Define file name of image
|
||||||
set_fact:
|
set_fact:
|
||||||
image_filename: "{{ image_reponame | regex_replace('/|\0|:', '_') }}.tar"
|
image_filename: "{{ image_reponame | regex_replace('/|\0|:', '_') }}.tar"
|
||||||
|
|
||||||
- name: set_container_facts | Define path of image
|
- name: Set_container_facts | Define path of image
|
||||||
set_fact:
|
set_fact:
|
||||||
image_path_cached: "{{ download_cache_dir }}/images/{{ image_filename }}"
|
image_path_cached: "{{ download_cache_dir }}/images/{{ image_filename }}"
|
||||||
image_path_final: "{{ local_release_dir }}/images/{{ image_filename }}"
|
image_path_final: "{{ local_release_dir }}/images/{{ image_filename }}"
|
||||||
|
|
|
@ -1,39 +1,40 @@
|
||||||
---
|
---
|
||||||
- name: restart etcd
|
- name: Restart etcd
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify:
|
notify:
|
||||||
- Backup etcd data
|
- Backup etcd data
|
||||||
- etcd | reload systemd
|
- Etcd | reload systemd
|
||||||
- reload etcd
|
- Reload etcd
|
||||||
- wait for etcd up
|
- Wait for etcd up
|
||||||
- Cleanup etcd backups
|
- Cleanup etcd backups
|
||||||
|
|
||||||
- name: restart etcd-events
|
- name: Restart etcd-events
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify:
|
notify:
|
||||||
- etcd | reload systemd
|
- Etcd | reload systemd
|
||||||
- reload etcd-events
|
- Reload etcd-events
|
||||||
- wait for etcd-events up
|
- Wait for etcd-events up
|
||||||
|
|
||||||
- import_tasks: backup.yml
|
- name: Backup etcd
|
||||||
|
import_tasks: backup.yml
|
||||||
|
|
||||||
- name: etcd | reload systemd
|
- name: Etcd | reload systemd
|
||||||
systemd:
|
systemd:
|
||||||
daemon_reload: true
|
daemon_reload: true
|
||||||
|
|
||||||
- name: reload etcd
|
- name: Reload etcd
|
||||||
service:
|
service:
|
||||||
name: etcd
|
name: etcd
|
||||||
state: restarted
|
state: restarted
|
||||||
when: is_etcd_master
|
when: is_etcd_master
|
||||||
|
|
||||||
- name: reload etcd-events
|
- name: Reload etcd-events
|
||||||
service:
|
service:
|
||||||
name: etcd-events
|
name: etcd-events
|
||||||
state: restarted
|
state: restarted
|
||||||
when: is_etcd_master
|
when: is_etcd_master
|
||||||
|
|
||||||
- name: wait for etcd up
|
- name: Wait for etcd up
|
||||||
uri:
|
uri:
|
||||||
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
|
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
|
||||||
validate_certs: no
|
validate_certs: no
|
||||||
|
@ -44,9 +45,10 @@
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 1
|
delay: 1
|
||||||
|
|
||||||
- import_tasks: backup_cleanup.yml
|
- name: Cleanup etcd backups
|
||||||
|
import_tasks: backup_cleanup.yml
|
||||||
|
|
||||||
- name: wait for etcd-events up
|
- name: Wait for etcd-events up
|
||||||
uri:
|
uri:
|
||||||
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health"
|
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health"
|
||||||
validate_certs: no
|
validate_certs: no
|
||||||
|
@ -57,6 +59,6 @@
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 1
|
delay: 1
|
||||||
|
|
||||||
- name: set etcd_secret_changed
|
- name: Set etcd_secret_changed
|
||||||
set_fact:
|
set_fact:
|
||||||
etcd_secret_changed: true
|
etcd_secret_changed: true
|
||||||
|
|
|
@ -41,7 +41,8 @@
|
||||||
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
|
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
|
||||||
ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}"
|
ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}"
|
||||||
|
|
||||||
- include_tasks: refresh_config.yml
|
- name: Configure | Refresh etcd config
|
||||||
|
include_tasks: refresh_config.yml
|
||||||
when: is_etcd_master
|
when: is_etcd_master
|
||||||
|
|
||||||
- name: Configure | Copy etcd.service systemd file
|
- name: Configure | Copy etcd.service systemd file
|
||||||
|
|
|
@ -56,7 +56,7 @@
|
||||||
run_once: yes
|
run_once: yes
|
||||||
delegate_to: "{{ groups['etcd'][0] }}"
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
when: gen_certs | default(false)
|
when: gen_certs | default(false)
|
||||||
notify: set etcd_secret_changed
|
notify: Set etcd_secret_changed
|
||||||
|
|
||||||
- name: Gen_certs | run cert generation script for all clients
|
- name: Gen_certs | run cert generation script for all clients
|
||||||
command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
|
command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
|
||||||
|
@ -73,7 +73,7 @@
|
||||||
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||||
- gen_certs | default(false)
|
- gen_certs | default(false)
|
||||||
notify: set etcd_secret_changed
|
notify: Set etcd_secret_changed
|
||||||
|
|
||||||
- name: Gen_certs | Gather etcd member/admin and kube_control_plane client certs from first etcd node
|
- name: Gen_certs | Gather etcd member/admin and kube_control_plane client certs from first etcd node
|
||||||
slurp:
|
slurp:
|
||||||
|
@ -97,7 +97,7 @@
|
||||||
- inventory_hostname in groups['etcd']
|
- inventory_hostname in groups['etcd']
|
||||||
- sync_certs | default(false)
|
- sync_certs | default(false)
|
||||||
- inventory_hostname != groups['etcd'][0]
|
- inventory_hostname != groups['etcd'][0]
|
||||||
notify: set etcd_secret_changed
|
notify: Set etcd_secret_changed
|
||||||
|
|
||||||
- name: Gen_certs | Write etcd member/admin and kube_control_plane client certs to other etcd nodes
|
- name: Gen_certs | Write etcd member/admin and kube_control_plane client certs to other etcd nodes
|
||||||
copy:
|
copy:
|
||||||
|
@ -129,7 +129,7 @@
|
||||||
- inventory_hostname != groups['etcd'][0]
|
- inventory_hostname != groups['etcd'][0]
|
||||||
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||||
notify: set etcd_secret_changed
|
notify: Set etcd_secret_changed
|
||||||
|
|
||||||
- name: Gen_certs | Write node certs to other etcd nodes
|
- name: Gen_certs | Write node certs to other etcd nodes
|
||||||
copy:
|
copy:
|
||||||
|
@ -147,12 +147,14 @@
|
||||||
loop_control:
|
loop_control:
|
||||||
label: "{{ item.item }}"
|
label: "{{ item.item }}"
|
||||||
|
|
||||||
- include_tasks: gen_nodes_certs_script.yml
|
- name: Gen_certs | Generate etcd certs
|
||||||
|
include_tasks: gen_nodes_certs_script.yml
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups['kube_control_plane'] and
|
- inventory_hostname in groups['kube_control_plane'] and
|
||||||
sync_certs | default(false) and inventory_hostname not in groups['etcd']
|
sync_certs | default(false) and inventory_hostname not in groups['etcd']
|
||||||
|
|
||||||
- include_tasks: gen_nodes_certs_script.yml
|
- name: Gen_certs | Generate etcd certs on nodes if needed
|
||||||
|
include_tasks: gen_nodes_certs_script.yml
|
||||||
when:
|
when:
|
||||||
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
---
|
---
|
||||||
- import_tasks: install_etcdctl_docker.yml
|
|
||||||
|
- name: Install etcdctl from docker
|
||||||
|
import_tasks: install_etcdctl_docker.yml
|
||||||
when: etcd_cluster_setup
|
when: etcd_cluster_setup
|
||||||
|
|
||||||
- name: Get currently-deployed etcd version
|
- name: Get currently-deployed etcd version
|
||||||
|
@ -14,14 +16,14 @@
|
||||||
|
|
||||||
- name: Restart etcd if necessary
|
- name: Restart etcd if necessary
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify: restart etcd
|
notify: Restart etcd
|
||||||
when:
|
when:
|
||||||
- etcd_cluster_setup
|
- etcd_cluster_setup
|
||||||
- etcd_image_tag not in etcd_current_docker_image.stdout | default('')
|
- etcd_image_tag not in etcd_current_docker_image.stdout | default('')
|
||||||
|
|
||||||
- name: Restart etcd-events if necessary
|
- name: Restart etcd-events if necessary
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify: restart etcd-events
|
notify: Restart etcd-events
|
||||||
when:
|
when:
|
||||||
- etcd_events_cluster_setup
|
- etcd_events_cluster_setup
|
||||||
- etcd_image_tag not in etcd_events_current_docker_image.stdout | default('')
|
- etcd_image_tag not in etcd_events_current_docker_image.stdout | default('')
|
||||||
|
|
|
@ -8,19 +8,19 @@
|
||||||
|
|
||||||
- name: Restart etcd if necessary
|
- name: Restart etcd if necessary
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify: restart etcd
|
notify: Restart etcd
|
||||||
when:
|
when:
|
||||||
- etcd_cluster_setup
|
- etcd_cluster_setup
|
||||||
- etcd_version.lstrip('v') not in etcd_current_host_version.stdout | default('')
|
- etcd_version.lstrip('v') not in etcd_current_host_version.stdout | default('')
|
||||||
|
|
||||||
- name: Restart etcd-events if necessary
|
- name: Restart etcd-events if necessary
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify: restart etcd-events
|
notify: Restart etcd-events
|
||||||
when:
|
when:
|
||||||
- etcd_events_cluster_setup
|
- etcd_events_cluster_setup
|
||||||
- etcd_version.lstrip('v') not in etcd_current_host_version.stdout | default('')
|
- etcd_version.lstrip('v') not in etcd_current_host_version.stdout | default('')
|
||||||
|
|
||||||
- name: install | Download etcd and etcdctl
|
- name: Install | Download etcd and etcdctl
|
||||||
include_tasks: "../../download/tasks/download_file.yml"
|
include_tasks: "../../download/tasks/download_file.yml"
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.etcd) }}"
|
download: "{{ download_defaults | combine(downloads.etcd) }}"
|
||||||
|
@ -29,7 +29,7 @@
|
||||||
- never
|
- never
|
||||||
- etcd
|
- etcd
|
||||||
|
|
||||||
- name: install | Copy etcd and etcdctl binary from download dir
|
- name: Install | Copy etcd and etcdctl binary from download dir
|
||||||
copy:
|
copy:
|
||||||
src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-{{ host_architecture }}/{{ item }}"
|
src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-{{ host_architecture }}/{{ item }}"
|
||||||
dest: "{{ bin_dir }}/{{ item }}"
|
dest: "{{ bin_dir }}/{{ item }}"
|
||||||
|
|
|
@ -12,7 +12,8 @@
|
||||||
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
|
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
|
||||||
ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}"
|
ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}"
|
||||||
|
|
||||||
- include_tasks: refresh_config.yml
|
- name: Join Member | Refresh etcd config
|
||||||
|
include_tasks: refresh_config.yml
|
||||||
vars:
|
vars:
|
||||||
# noqa: jinja[spacing]
|
# noqa: jinja[spacing]
|
||||||
etcd_events_peer_addresses: >-
|
etcd_events_peer_addresses: >-
|
||||||
|
|
|
@ -13,7 +13,8 @@
|
||||||
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
|
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
|
||||||
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
|
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
|
||||||
|
|
||||||
- include_tasks: refresh_config.yml
|
- name: Join Member | Refresh etcd config
|
||||||
|
include_tasks: refresh_config.yml
|
||||||
vars:
|
vars:
|
||||||
# noqa: jinja[spacing]
|
# noqa: jinja[spacing]
|
||||||
etcd_peer_addresses: >-
|
etcd_peer_addresses: >-
|
||||||
|
|
|
@ -1,23 +1,27 @@
|
||||||
---
|
---
|
||||||
- include_tasks: check_certs.yml
|
- name: Check etcd certs
|
||||||
|
include_tasks: check_certs.yml
|
||||||
when: cert_management == "script"
|
when: cert_management == "script"
|
||||||
tags:
|
tags:
|
||||||
- etcd-secrets
|
- etcd-secrets
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
- include_tasks: "gen_certs_script.yml"
|
- name: Generate etcd certs
|
||||||
|
include_tasks: "gen_certs_script.yml"
|
||||||
when:
|
when:
|
||||||
- cert_management | d('script') == "script"
|
- cert_management | d('script') == "script"
|
||||||
tags:
|
tags:
|
||||||
- etcd-secrets
|
- etcd-secrets
|
||||||
|
|
||||||
- include_tasks: upd_ca_trust.yml
|
- name: Trust etcd CA
|
||||||
|
include_tasks: upd_ca_trust.yml
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups['etcd'] | union(groups['kube_control_plane']) | unique | sort
|
- inventory_hostname in groups['etcd'] | union(groups['kube_control_plane']) | unique | sort
|
||||||
tags:
|
tags:
|
||||||
- etcd-secrets
|
- etcd-secrets
|
||||||
|
|
||||||
- include_tasks: upd_ca_trust.yml
|
- name: Trust etcd CA on nodes if needed
|
||||||
|
include_tasks: upd_ca_trust.yml
|
||||||
when:
|
when:
|
||||||
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||||
|
@ -49,29 +53,33 @@
|
||||||
- master
|
- master
|
||||||
- network
|
- network
|
||||||
|
|
||||||
- include_tasks: "install_{{ etcd_deployment_type }}.yml"
|
- name: Install etcd
|
||||||
|
include_tasks: "install_{{ etcd_deployment_type }}.yml"
|
||||||
when: is_etcd_master
|
when: is_etcd_master
|
||||||
tags:
|
tags:
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
- include_tasks: configure.yml
|
- name: Configure etcd
|
||||||
|
include_tasks: configure.yml
|
||||||
when: is_etcd_master
|
when: is_etcd_master
|
||||||
|
|
||||||
- include_tasks: refresh_config.yml
|
- name: Refresh etcd config
|
||||||
|
include_tasks: refresh_config.yml
|
||||||
when: is_etcd_master
|
when: is_etcd_master
|
||||||
|
|
||||||
- name: Restart etcd if certs changed
|
- name: Restart etcd if certs changed
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify: restart etcd
|
notify: Restart etcd
|
||||||
when: is_etcd_master and etcd_cluster_setup and etcd_secret_changed | default(false)
|
when: is_etcd_master and etcd_cluster_setup and etcd_secret_changed | default(false)
|
||||||
|
|
||||||
- name: Restart etcd-events if certs changed
|
- name: Restart etcd-events if certs changed
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify: restart etcd
|
notify: Restart etcd
|
||||||
when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed | default(false)
|
when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed | default(false)
|
||||||
|
|
||||||
# After etcd cluster is assembled, make sure that
|
# After etcd cluster is assembled, make sure that
|
||||||
# initial state of the cluster is in `existing`
|
# initial state of the cluster is in `existing`
|
||||||
# state instead of `new`.
|
# state instead of `new`.
|
||||||
- include_tasks: refresh_config.yml
|
- name: Refresh etcd config again for idempotency
|
||||||
|
include_tasks: refresh_config.yml
|
||||||
when: is_etcd_master
|
when: is_etcd_master
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
src: etcd.env.j2
|
src: etcd.env.j2
|
||||||
dest: /etc/etcd.env
|
dest: /etc/etcd.env
|
||||||
mode: 0640
|
mode: 0640
|
||||||
notify: restart etcd
|
notify: Restart etcd
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when: is_etcd_master and etcd_cluster_setup
|
||||||
|
|
||||||
- name: Refresh config | Create etcd-events config file
|
- name: Refresh config | Create etcd-events config file
|
||||||
|
@ -12,5 +12,5 @@
|
||||||
src: etcd-events.env.j2
|
src: etcd-events.env.j2
|
||||||
dest: /etc/etcd-events.env
|
dest: /etc/etcd-events.env
|
||||||
mode: 0640
|
mode: 0640
|
||||||
notify: restart etcd-events
|
notify: Restart etcd-events
|
||||||
when: is_etcd_master and etcd_events_cluster_setup
|
when: is_etcd_master and etcd_events_cluster_setup
|
||||||
|
|
|
@ -14,7 +14,8 @@
|
||||||
get_mime: no
|
get_mime: no
|
||||||
register: stat_etcdctl
|
register: stat_etcdctl
|
||||||
|
|
||||||
- block:
|
- name: Remove old etcd binary
|
||||||
|
block:
|
||||||
- name: Check version
|
- name: Check version
|
||||||
command: "{{ bin_dir }}/etcdctl version"
|
command: "{{ bin_dir }}/etcdctl version"
|
||||||
register: etcdctl_version
|
register: etcdctl_version
|
||||||
|
@ -36,7 +37,8 @@
|
||||||
get_mime: no
|
get_mime: no
|
||||||
register: stat_etcdctl
|
register: stat_etcdctl
|
||||||
|
|
||||||
- block:
|
- name: Copy etcdctl script to host
|
||||||
|
block:
|
||||||
- name: Copy etcdctl script to host
|
- name: Copy etcdctl script to host
|
||||||
shell: "{{ docker_bin_dir }}/docker cp \"$({{ docker_bin_dir }}/docker ps -qf ancestor={{ etcd_image_repo }}:{{ etcd_image_tag }})\":/usr/local/bin/etcdctl {{ etcd_data_dir }}/etcdctl"
|
shell: "{{ docker_bin_dir }}/docker cp \"$({{ docker_bin_dir }}/docker ps -qf ancestor={{ etcd_image_repo }}:{{ etcd_image_tag }})\":/usr/local/bin/etcdctl {{ etcd_data_dir }}/etcdctl"
|
||||||
when: container_manager == "docker"
|
when: container_manager == "docker"
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- import_tasks: credentials-check.yml
|
- name: OCI Cloud Controller | Check Oracle Cloud credentials
|
||||||
|
import_tasks: credentials-check.yml
|
||||||
|
|
||||||
- name: "OCI Cloud Controller | Generate Cloud Provider Configuration"
|
- name: "OCI Cloud Controller | Generate Cloud Provider Configuration"
|
||||||
template:
|
template:
|
||||||
|
|
|
@ -59,7 +59,8 @@
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: node-webhook
|
tags: node-webhook
|
||||||
|
|
||||||
- include_tasks: oci.yml
|
- name: Configure Oracle Cloud provider
|
||||||
|
include_tasks: oci.yml
|
||||||
tags: oci
|
tags: oci
|
||||||
when:
|
when:
|
||||||
- cloud_provider is defined
|
- cloud_provider is defined
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: crun | Copy runtime class manifest
|
- name: Crun | Copy runtime class manifest
|
||||||
template:
|
template:
|
||||||
src: runtimeclass-crun.yml
|
src: runtimeclass-crun.yml
|
||||||
dest: "{{ kube_config_dir }}/runtimeclass-crun.yml"
|
dest: "{{ kube_config_dir }}/runtimeclass-crun.yml"
|
||||||
|
@ -8,7 +8,7 @@
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: crun | Apply manifests
|
- name: Crun | Apply manifests
|
||||||
kube:
|
kube:
|
||||||
name: "runtimeclass-crun"
|
name: "runtimeclass-crun"
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: gVisor | Create addon dir
|
- name: GVisor | Create addon dir
|
||||||
file:
|
file:
|
||||||
path: "{{ kube_config_dir }}/addons/gvisor"
|
path: "{{ kube_config_dir }}/addons/gvisor"
|
||||||
owner: root
|
owner: root
|
||||||
|
@ -7,12 +7,12 @@
|
||||||
mode: 0755
|
mode: 0755
|
||||||
recurse: true
|
recurse: true
|
||||||
|
|
||||||
- name: gVisor | Templates List
|
- name: GVisor | Templates List
|
||||||
set_fact:
|
set_fact:
|
||||||
gvisor_templates:
|
gvisor_templates:
|
||||||
- { name: runtimeclass-gvisor, file: runtimeclass-gvisor.yml, type: runtimeclass }
|
- { name: runtimeclass-gvisor, file: runtimeclass-gvisor.yml, type: runtimeclass }
|
||||||
|
|
||||||
- name: gVisort | Create manifests
|
- name: GVisort | Create manifests
|
||||||
template:
|
template:
|
||||||
src: "{{ item.file }}.j2"
|
src: "{{ item.file }}.j2"
|
||||||
dest: "{{ kube_config_dir }}/addons/gvisor/{{ item.file }}"
|
dest: "{{ kube_config_dir }}/addons/gvisor/{{ item.file }}"
|
||||||
|
@ -22,7 +22,7 @@
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: gVisor | Apply manifests
|
- name: GVisor | Apply manifests
|
||||||
kube:
|
kube:
|
||||||
name: "{{ item.item.name }}"
|
name: "{{ item.item.name }}"
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: youki | Copy runtime class manifest
|
- name: Youki | Copy runtime class manifest
|
||||||
template:
|
template:
|
||||||
src: runtimeclass-youki.yml
|
src: runtimeclass-youki.yml
|
||||||
dest: "{{ kube_config_dir }}/runtimeclass-youki.yml"
|
dest: "{{ kube_config_dir }}/runtimeclass-youki.yml"
|
||||||
|
@ -8,7 +8,7 @@
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: youki | Apply manifests
|
- name: Youki | Apply manifests
|
||||||
kube:
|
kube:
|
||||||
name: "runtimeclass-youki"
|
name: "runtimeclass-youki"
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
- include_tasks: azure-credential-check.yml
|
- name: Azure CSI Driver | Check Azure credentials
|
||||||
|
include_tasks: azure-credential-check.yml
|
||||||
|
|
||||||
- name: Azure CSI Driver | Write Azure CSI cloud-config
|
- name: Azure CSI Driver | Write Azure CSI cloud-config
|
||||||
template:
|
template:
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
- include_tasks: cinder-credential-check.yml
|
- name: Cinder CSI Driver | Check Cinder credentials
|
||||||
|
include_tasks: cinder-credential-check.yml
|
||||||
|
|
||||||
- name: Cinder CSI Driver | Write cacert file
|
- name: Cinder CSI Driver | Write cacert file
|
||||||
include_tasks: cinder-write-cacert.yml
|
include_tasks: cinder-write-cacert.yml
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
---
|
---
|
||||||
- include_tasks: vsphere-credentials-check.yml
|
- name: VSphere CSI Driver | Check vsphare credentials
|
||||||
|
include_tasks: vsphere-credentials-check.yml
|
||||||
|
|
||||||
- name: vSphere CSI Driver | Generate CSI cloud-config
|
- name: VSphere CSI Driver | Generate CSI cloud-config
|
||||||
template:
|
template:
|
||||||
src: "{{ item }}.j2"
|
src: "{{ item }}.j2"
|
||||||
dest: "{{ kube_config_dir }}/{{ item }}"
|
dest: "{{ kube_config_dir }}/{{ item }}"
|
||||||
|
@ -10,7 +11,7 @@
|
||||||
- vsphere-csi-cloud-config
|
- vsphere-csi-cloud-config
|
||||||
when: inventory_hostname == groups['kube_control_plane'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: vSphere CSI Driver | Generate Manifests
|
- name: VSphere CSI Driver | Generate Manifests
|
||||||
template:
|
template:
|
||||||
src: "{{ item }}.j2"
|
src: "{{ item }}.j2"
|
||||||
dest: "{{ kube_config_dir }}/{{ item }}"
|
dest: "{{ kube_config_dir }}/{{ item }}"
|
||||||
|
@ -27,7 +28,7 @@
|
||||||
register: vsphere_csi_manifests
|
register: vsphere_csi_manifests
|
||||||
when: inventory_hostname == groups['kube_control_plane'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: vSphere CSI Driver | Apply Manifests
|
- name: VSphere CSI Driver | Apply Manifests
|
||||||
kube:
|
kube:
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item }}"
|
filename: "{{ kube_config_dir }}/{{ item.item }}"
|
||||||
|
@ -40,13 +41,13 @@
|
||||||
loop_control:
|
loop_control:
|
||||||
label: "{{ item.item }}"
|
label: "{{ item.item }}"
|
||||||
|
|
||||||
- name: vSphere CSI Driver | Generate a CSI secret manifest
|
- name: VSphere CSI Driver | Generate a CSI secret manifest
|
||||||
command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n {{ vsphere_csi_namespace }} --dry-run --save-config -o yaml"
|
command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n {{ vsphere_csi_namespace }} --dry-run --save-config -o yaml"
|
||||||
register: vsphere_csi_secret_manifest
|
register: vsphere_csi_secret_manifest
|
||||||
when: inventory_hostname == groups['kube_control_plane'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
no_log: "{{ not (unsafe_show_logs | bool) }}"
|
no_log: "{{ not (unsafe_show_logs | bool) }}"
|
||||||
|
|
||||||
- name: vSphere CSI Driver | Apply a CSI secret manifest
|
- name: VSphere CSI Driver | Apply a CSI secret manifest
|
||||||
command:
|
command:
|
||||||
cmd: "{{ kubectl }} apply -f -"
|
cmd: "{{ kubectl }} apply -f -"
|
||||||
stdin: "{{ vsphere_csi_secret_manifest.stdout }}"
|
stdin: "{{ vsphere_csi_secret_manifest.stdout }}"
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
- include_tasks: openstack-credential-check.yml
|
- name: External OpenStack Cloud Controller | Check OpenStack credentials
|
||||||
|
include_tasks: openstack-credential-check.yml
|
||||||
tags: external-openstack
|
tags: external-openstack
|
||||||
|
|
||||||
- name: External OpenStack Cloud Controller | Get base64 cacert
|
- name: External OpenStack Cloud Controller | Get base64 cacert
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
- include_tasks: vsphere-credentials-check.yml
|
- name: External vSphere Cloud Controller | Check vsphere credentials
|
||||||
|
include_tasks: vsphere-credentials-check.yml
|
||||||
|
|
||||||
- name: External vSphere Cloud Controller | Generate CPI cloud-config
|
- name: External vSphere Cloud Controller | Generate CPI cloud-config
|
||||||
template:
|
template:
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: kube-router | Start Resources
|
- name: Kube-router | Start Resources
|
||||||
kube:
|
kube:
|
||||||
name: "kube-router"
|
name: "kube-router"
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
@ -11,7 +11,7 @@
|
||||||
delegate_to: "{{ groups['kube_control_plane'] | first }}"
|
delegate_to: "{{ groups['kube_control_plane'] | first }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: kube-router | Wait for kube-router pods to be ready
|
- name: Kube-router | Wait for kube-router pods to be ready
|
||||||
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa ignore-errors
|
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa ignore-errors
|
||||||
register: pods_not_ready
|
register: pods_not_ready
|
||||||
until: pods_not_ready.stdout.find("kube-router")==-1
|
until: pods_not_ready.stdout.find("kube-router")==-1
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: check if snapshot namespace exists
|
- name: Check if snapshot namespace exists
|
||||||
register: snapshot_namespace_exists
|
register: snapshot_namespace_exists
|
||||||
kube:
|
kube:
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
|
|
@ -100,7 +100,7 @@
|
||||||
run_once: yes
|
run_once: yes
|
||||||
when: kubectl_localhost
|
when: kubectl_localhost
|
||||||
|
|
||||||
- name: create helper script kubectl.sh on ansible host
|
- name: Create helper script kubectl.sh on ansible host
|
||||||
copy:
|
copy:
|
||||||
content: |
|
content: |
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
|
@ -47,7 +47,7 @@
|
||||||
timeout: 180
|
timeout: 180
|
||||||
|
|
||||||
|
|
||||||
- name: check already run
|
- name: Check already run
|
||||||
debug:
|
debug:
|
||||||
msg: "{{ kubeadm_already_run.stat.exists }}"
|
msg: "{{ kubeadm_already_run.stat.exists }}"
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
- kube_oidc_auth
|
- kube_oidc_auth
|
||||||
- kube_oidc_ca_cert is defined
|
- kube_oidc_ca_cert is defined
|
||||||
|
|
||||||
- name: kubeadm | Check if kubeadm has already run
|
- name: Kubeadm | Check if kubeadm has already run
|
||||||
stat:
|
stat:
|
||||||
path: "/var/lib/kubelet/config.yaml"
|
path: "/var/lib/kubelet/config.yaml"
|
||||||
get_attributes: no
|
get_attributes: no
|
||||||
|
@ -18,12 +18,12 @@
|
||||||
get_mime: no
|
get_mime: no
|
||||||
register: kubeadm_already_run
|
register: kubeadm_already_run
|
||||||
|
|
||||||
- name: kubeadm | Backup kubeadm certs / kubeconfig
|
- name: Kubeadm | Backup kubeadm certs / kubeconfig
|
||||||
import_tasks: kubeadm-backup.yml
|
import_tasks: kubeadm-backup.yml
|
||||||
when:
|
when:
|
||||||
- kubeadm_already_run.stat.exists
|
- kubeadm_already_run.stat.exists
|
||||||
|
|
||||||
- name: kubeadm | aggregate all SANs
|
- name: Kubeadm | aggregate all SANs
|
||||||
set_fact:
|
set_fact:
|
||||||
apiserver_sans: "{{ (sans_base + groups['kube_control_plane'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn + sans_kube_vip_address) | unique }}"
|
apiserver_sans: "{{ (sans_base + groups['kube_control_plane'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn + sans_kube_vip_address) | unique }}"
|
||||||
vars:
|
vars:
|
||||||
|
@ -69,7 +69,7 @@
|
||||||
when: kubernetes_audit_webhook | default(false)
|
when: kubernetes_audit_webhook | default(false)
|
||||||
|
|
||||||
# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
|
# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
|
||||||
- name: set kubeadm_config_api_fqdn define
|
- name: Set kubeadm_config_api_fqdn define
|
||||||
set_fact:
|
set_fact:
|
||||||
kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name | default('lb-apiserver.kubernetes.local') }}"
|
kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name | default('lb-apiserver.kubernetes.local') }}"
|
||||||
when: loadbalancer_apiserver is defined
|
when: loadbalancer_apiserver is defined
|
||||||
|
@ -78,27 +78,27 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
kubeadmConfig_api_version: v1beta3
|
kubeadmConfig_api_version: v1beta3
|
||||||
|
|
||||||
- name: kubeadm | Create kubeadm config
|
- name: Kubeadm | Create kubeadm config
|
||||||
template:
|
template:
|
||||||
src: "kubeadm-config.{{ kubeadmConfig_api_version }}.yaml.j2"
|
src: "kubeadm-config.{{ kubeadmConfig_api_version }}.yaml.j2"
|
||||||
dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
|
dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
|
||||||
mode: 0640
|
mode: 0640
|
||||||
|
|
||||||
- name: kubeadm | Create directory to store admission control configurations
|
- name: Kubeadm | Create directory to store admission control configurations
|
||||||
file:
|
file:
|
||||||
path: "{{ kube_config_dir }}/admission-controls"
|
path: "{{ kube_config_dir }}/admission-controls"
|
||||||
state: directory
|
state: directory
|
||||||
mode: 0640
|
mode: 0640
|
||||||
when: kube_apiserver_admission_control_config_file
|
when: kube_apiserver_admission_control_config_file
|
||||||
|
|
||||||
- name: kubeadm | Push admission control config file
|
- name: Kubeadm | Push admission control config file
|
||||||
template:
|
template:
|
||||||
src: "admission-controls.yaml.j2"
|
src: "admission-controls.yaml.j2"
|
||||||
dest: "{{ kube_config_dir }}/admission-controls/admission-controls.yaml"
|
dest: "{{ kube_config_dir }}/admission-controls/admission-controls.yaml"
|
||||||
mode: 0640
|
mode: 0640
|
||||||
when: kube_apiserver_admission_control_config_file
|
when: kube_apiserver_admission_control_config_file
|
||||||
|
|
||||||
- name: kubeadm | Push admission control config files
|
- name: Kubeadm | Push admission control config files
|
||||||
template:
|
template:
|
||||||
src: "{{ item | lower }}.yaml.j2"
|
src: "{{ item | lower }}.yaml.j2"
|
||||||
dest: "{{ kube_config_dir }}/admission-controls/{{ item | lower }}.yaml"
|
dest: "{{ kube_config_dir }}/admission-controls/{{ item | lower }}.yaml"
|
||||||
|
@ -108,15 +108,15 @@
|
||||||
- item in kube_apiserver_admission_plugins_needs_configuration
|
- item in kube_apiserver_admission_plugins_needs_configuration
|
||||||
loop: "{{ kube_apiserver_enable_admission_plugins }}"
|
loop: "{{ kube_apiserver_enable_admission_plugins }}"
|
||||||
|
|
||||||
- name: kubeadm | Check apiserver.crt SANs
|
- name: Kubeadm | Check apiserver.crt SANs
|
||||||
block:
|
block:
|
||||||
- name: kubeadm | Check apiserver.crt SAN IPs
|
- name: Kubeadm | Check apiserver.crt SAN IPs
|
||||||
command:
|
command:
|
||||||
cmd: "openssl x509 -noout -in {{ kube_cert_dir }}/apiserver.crt -checkip {{ item }}"
|
cmd: "openssl x509 -noout -in {{ kube_cert_dir }}/apiserver.crt -checkip {{ item }}"
|
||||||
loop: "{{ apiserver_ips }}"
|
loop: "{{ apiserver_ips }}"
|
||||||
register: apiserver_sans_ip_check
|
register: apiserver_sans_ip_check
|
||||||
changed_when: apiserver_sans_ip_check.stdout is not search('does match certificate')
|
changed_when: apiserver_sans_ip_check.stdout is not search('does match certificate')
|
||||||
- name: kubeadm | Check apiserver.crt SAN hosts
|
- name: Kubeadm | Check apiserver.crt SAN hosts
|
||||||
command:
|
command:
|
||||||
cmd: "openssl x509 -noout -in {{ kube_cert_dir }}/apiserver.crt -checkhost {{ item }}"
|
cmd: "openssl x509 -noout -in {{ kube_cert_dir }}/apiserver.crt -checkhost {{ item }}"
|
||||||
loop: "{{ apiserver_hosts }}"
|
loop: "{{ apiserver_hosts }}"
|
||||||
|
@ -129,7 +129,7 @@
|
||||||
- kubeadm_already_run.stat.exists
|
- kubeadm_already_run.stat.exists
|
||||||
- not kube_external_ca_mode
|
- not kube_external_ca_mode
|
||||||
|
|
||||||
- name: kubeadm | regenerate apiserver cert 1/2
|
- name: Kubeadm | regenerate apiserver cert 1/2
|
||||||
file:
|
file:
|
||||||
state: absent
|
state: absent
|
||||||
path: "{{ kube_cert_dir }}/{{ item }}"
|
path: "{{ kube_cert_dir }}/{{ item }}"
|
||||||
|
@ -141,7 +141,7 @@
|
||||||
- apiserver_sans_ip_check.changed or apiserver_sans_host_check.changed
|
- apiserver_sans_ip_check.changed or apiserver_sans_host_check.changed
|
||||||
- not kube_external_ca_mode
|
- not kube_external_ca_mode
|
||||||
|
|
||||||
- name: kubeadm | regenerate apiserver cert 2/2
|
- name: Kubeadm | regenerate apiserver cert 2/2
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/kubeadm
|
{{ bin_dir }}/kubeadm
|
||||||
init phase certs apiserver
|
init phase certs apiserver
|
||||||
|
@ -151,14 +151,14 @@
|
||||||
- apiserver_sans_ip_check.changed or apiserver_sans_host_check.changed
|
- apiserver_sans_ip_check.changed or apiserver_sans_host_check.changed
|
||||||
- not kube_external_ca_mode
|
- not kube_external_ca_mode
|
||||||
|
|
||||||
- name: kubeadm | Create directory to store kubeadm patches
|
- name: Kubeadm | Create directory to store kubeadm patches
|
||||||
file:
|
file:
|
||||||
path: "{{ kubeadm_patches.dest_dir }}"
|
path: "{{ kubeadm_patches.dest_dir }}"
|
||||||
state: directory
|
state: directory
|
||||||
mode: 0640
|
mode: 0640
|
||||||
when: kubeadm_patches is defined and kubeadm_patches.enabled
|
when: kubeadm_patches is defined and kubeadm_patches.enabled
|
||||||
|
|
||||||
- name: kubeadm | Copy kubeadm patches from inventory files
|
- name: Kubeadm | Copy kubeadm patches from inventory files
|
||||||
copy:
|
copy:
|
||||||
src: "{{ kubeadm_patches.source_dir }}/"
|
src: "{{ kubeadm_patches.source_dir }}/"
|
||||||
dest: "{{ kubeadm_patches.dest_dir }}"
|
dest: "{{ kubeadm_patches.dest_dir }}"
|
||||||
|
@ -166,7 +166,7 @@
|
||||||
mode: 0644
|
mode: 0644
|
||||||
when: kubeadm_patches is defined and kubeadm_patches.enabled
|
when: kubeadm_patches is defined and kubeadm_patches.enabled
|
||||||
|
|
||||||
- name: kubeadm | Initialize first master
|
- name: Kubeadm | Initialize first master
|
||||||
command: >-
|
command: >-
|
||||||
timeout -k {{ kubeadm_init_timeout }} {{ kubeadm_init_timeout }}
|
timeout -k {{ kubeadm_init_timeout }} {{ kubeadm_init_timeout }}
|
||||||
{{ bin_dir }}/kubeadm init
|
{{ bin_dir }}/kubeadm init
|
||||||
|
@ -184,7 +184,7 @@
|
||||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||||
notify: Master | restart kubelet
|
notify: Master | restart kubelet
|
||||||
|
|
||||||
- name: set kubeadm certificate key
|
- name: Set kubeadm certificate key
|
||||||
set_fact:
|
set_fact:
|
||||||
kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)', '\\1') | first }}"
|
kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)', '\\1') | first }}"
|
||||||
with_items: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_init'].stdout_lines | default([]) }}"
|
with_items: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_init'].stdout_lines | default([]) }}"
|
||||||
|
@ -229,17 +229,17 @@
|
||||||
- podsecuritypolicy_enabled
|
- podsecuritypolicy_enabled
|
||||||
- inventory_hostname == first_kube_control_plane
|
- inventory_hostname == first_kube_control_plane
|
||||||
|
|
||||||
- name: kubeadm | Join other masters
|
- name: Kubeadm | Join other masters
|
||||||
include_tasks: kubeadm-secondary.yml
|
include_tasks: kubeadm-secondary.yml
|
||||||
|
|
||||||
- name: kubeadm | upgrade kubernetes cluster
|
- name: Kubeadm | upgrade kubernetes cluster
|
||||||
include_tasks: kubeadm-upgrade.yml
|
include_tasks: kubeadm-upgrade.yml
|
||||||
when:
|
when:
|
||||||
- upgrade_cluster_setup
|
- upgrade_cluster_setup
|
||||||
- kubeadm_already_run.stat.exists
|
- kubeadm_already_run.stat.exists
|
||||||
|
|
||||||
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
|
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
|
||||||
- name: kubeadm | Remove taint for master with node role
|
- name: Kubeadm | Remove taint for master with node role
|
||||||
command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}"
|
command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}"
|
||||||
delegate_to: "{{ first_kube_control_plane }}"
|
delegate_to: "{{ first_kube_control_plane }}"
|
||||||
with_items:
|
with_items:
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: kubeadm | Check api is up
|
- name: Kubeadm | Check api is up
|
||||||
uri:
|
uri:
|
||||||
url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz"
|
url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz"
|
||||||
validate_certs: false
|
validate_certs: false
|
||||||
|
@ -9,7 +9,7 @@
|
||||||
delay: 5
|
delay: 5
|
||||||
until: _result.status == 200
|
until: _result.status == 200
|
||||||
|
|
||||||
- name: kubeadm | Upgrade first master
|
- name: Kubeadm | Upgrade first master
|
||||||
command: >-
|
command: >-
|
||||||
timeout -k 600s 600s
|
timeout -k 600s 600s
|
||||||
{{ bin_dir }}/kubeadm
|
{{ bin_dir }}/kubeadm
|
||||||
|
@ -31,7 +31,7 @@
|
||||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||||
notify: Master | restart kubelet
|
notify: Master | restart kubelet
|
||||||
|
|
||||||
- name: kubeadm | Upgrade other masters
|
- name: Kubeadm | Upgrade other masters
|
||||||
command: >-
|
command: >-
|
||||||
timeout -k 600s 600s
|
timeout -k 600s 600s
|
||||||
{{ bin_dir }}/kubeadm
|
{{ bin_dir }}/kubeadm
|
||||||
|
@ -53,7 +53,7 @@
|
||||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||||
notify: Master | restart kubelet
|
notify: Master | restart kubelet
|
||||||
|
|
||||||
- name: kubeadm | clean kubectl cache to refresh api types
|
- name: Kubeadm | clean kubectl cache to refresh api types
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
@ -62,7 +62,7 @@
|
||||||
- /root/.kube/http-cache
|
- /root/.kube/http-cache
|
||||||
|
|
||||||
# FIXME: https://github.com/kubernetes/kubeadm/issues/1318
|
# FIXME: https://github.com/kubernetes/kubeadm/issues/1318
|
||||||
- name: kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode
|
- name: Kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode
|
||||||
command: >-
|
command: >-
|
||||||
{{ kubectl }}
|
{{ kubectl }}
|
||||||
-n kube-system
|
-n kube-system
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
- import_tasks: pre-upgrade.yml
|
- name: Pre-upgrade control plane
|
||||||
|
import_tasks: pre-upgrade.yml
|
||||||
tags:
|
tags:
|
||||||
- k8s-pre-upgrade
|
- k8s-pre-upgrade
|
||||||
|
|
||||||
|
@ -23,7 +24,8 @@
|
||||||
dest: "{{ kube_config_dir }}/kubescheduler-config.yaml"
|
dest: "{{ kube_config_dir }}/kubescheduler-config.yaml"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
|
|
||||||
- import_tasks: encrypt-at-rest.yml
|
- name: Apply Kubernetes encrypt at rest config
|
||||||
|
import_tasks: encrypt-at-rest.yml
|
||||||
when:
|
when:
|
||||||
- kube_encrypt_secret_data
|
- kube_encrypt_secret_data
|
||||||
|
|
||||||
|
|
|
@ -65,14 +65,14 @@
|
||||||
mode: 0640
|
mode: 0640
|
||||||
when: not is_kube_master
|
when: not is_kube_master
|
||||||
|
|
||||||
- name: kubeadm | Create directory to store kubeadm patches
|
- name: Kubeadm | Create directory to store kubeadm patches
|
||||||
file:
|
file:
|
||||||
path: "{{ kubeadm_patches.dest_dir }}"
|
path: "{{ kubeadm_patches.dest_dir }}"
|
||||||
state: directory
|
state: directory
|
||||||
mode: 0640
|
mode: 0640
|
||||||
when: kubeadm_patches is defined and kubeadm_patches.enabled
|
when: kubeadm_patches is defined and kubeadm_patches.enabled
|
||||||
|
|
||||||
- name: kubeadm | Copy kubeadm patches from inventory files
|
- name: Kubeadm | Copy kubeadm patches from inventory files
|
||||||
copy:
|
copy:
|
||||||
src: "{{ kubeadm_patches.source_dir }}/"
|
src: "{{ kubeadm_patches.source_dir }}/"
|
||||||
dest: "{{ kubeadm_patches.dest_dir }}"
|
dest: "{{ kubeadm_patches.dest_dir }}"
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue