Fix outdated tag and experimental ansible-lint rules (#10254)
* project: fix outdated tag and experimental Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> * project: remove no longer useful noqa 301 Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> * project: replace unnamed-task by name[missing] Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> * project: fix daemon-reload -> daemon_reload Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch> --------- Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>pull/10269/head
parent
4f85b75087
commit
f8f197e26b
|
@ -7,24 +7,11 @@ skip_list:
|
||||||
|
|
||||||
# These rules are intentionally skipped:
|
# These rules are intentionally skipped:
|
||||||
#
|
#
|
||||||
# [E204]: "Lines should be no longer than 160 chars"
|
|
||||||
# This could be re-enabled with a major rewrite in the future.
|
|
||||||
# For now, there's not enough value gain from strictly limiting line length.
|
|
||||||
# (Disabled in May 2019)
|
|
||||||
- '204'
|
|
||||||
|
|
||||||
# [E701]: "meta/main.yml should contain relevant info"
|
|
||||||
# Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
|
|
||||||
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
|
||||||
# (Disabled in May 2019)
|
|
||||||
- '701'
|
|
||||||
|
|
||||||
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
||||||
# Meta roles in Kubespray don't need proper names
|
# Meta roles in Kubespray don't need proper names
|
||||||
# (Disabled in June 2021)
|
# (Disabled in June 2021)
|
||||||
- 'role-name'
|
- 'role-name'
|
||||||
|
|
||||||
- 'experimental'
|
|
||||||
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
|
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
|
||||||
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
||||||
# (Disabled in June 2021)
|
# (Disabled in June 2021)
|
||||||
|
@ -65,10 +52,6 @@ skip_list:
|
||||||
# Disable run-once check with free strategy
|
# Disable run-once check with free strategy
|
||||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||||
- 'run-once[task]'
|
- 'run-once[task]'
|
||||||
|
|
||||||
# Disable outdated-tag check
|
|
||||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
|
||||||
- 'warning[outdated-tag]'
|
|
||||||
exclude_paths:
|
exclude_paths:
|
||||||
# Generated files
|
# Generated files
|
||||||
- tests/files/custom_cni/cilium.yaml
|
- tests/files/custom_cni/cilium.yaml
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: Query Azure VMs # noqa 301
|
- name: Query Azure VMs
|
||||||
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||||
register: vm_list_cmd
|
register: vm_list_cmd
|
||||||
|
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: Query Azure VMs IPs # noqa 301
|
- name: Query Azure VMs IPs
|
||||||
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
||||||
register: vm_ip_list_cmd
|
register: vm_ip_list_cmd
|
||||||
|
|
||||||
- name: Query Azure VMs Roles # noqa 301
|
- name: Query Azure VMs Roles
|
||||||
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||||
register: vm_list_cmd
|
register: vm_list_cmd
|
||||||
|
|
||||||
- name: Query Azure Load Balancer Public IP # noqa 301
|
- name: Query Azure Load Balancer Public IP
|
||||||
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
||||||
register: lb_pubip_cmd
|
register: lb_pubip_cmd
|
||||||
|
|
||||||
|
|
|
@ -69,7 +69,7 @@
|
||||||
|
|
||||||
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
||||||
# handle manually
|
# handle manually
|
||||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
|
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
|
||||||
raw: |
|
raw: |
|
||||||
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
||||||
mv -b /etc/machine-id.new /etc/machine-id
|
mv -b /etc/machine-id.new /etc/machine-id
|
||||||
|
@ -79,7 +79,6 @@
|
||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
|
|
||||||
- name: Early hack image install to adapt for DIND
|
- name: Early hack image install to adapt for DIND
|
||||||
# noqa 302 - this task uses the raw module intentionally
|
|
||||||
raw: |
|
raw: |
|
||||||
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
||||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
register: glusterfs_ppa_added
|
register: glusterfs_ppa_added
|
||||||
when: glusterfs_ppa_use
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503
|
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa no-handler
|
||||||
apt:
|
apt:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
name: "{{ gluster_volume_node_mount_dir }}"
|
name: "{{ gluster_volume_node_mount_dir }}"
|
||||||
src: "{{ disk_volume_device_1 }}"
|
src: "{{ disk_volume_device_1 }}"
|
||||||
fstype: xfs
|
fstype: xfs
|
||||||
state: mounted"
|
state: mounted
|
||||||
|
|
||||||
# Setup/install tasks.
|
# Setup/install tasks.
|
||||||
- include_tasks: setup-RedHat.yml
|
- include_tasks: setup-RedHat.yml
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
register: glusterfs_ppa_added
|
register: glusterfs_ppa_added
|
||||||
when: glusterfs_ppa_use
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503
|
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa no-handler
|
||||||
apt:
|
apt:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
- name: "Delete bootstrap Heketi."
|
- name: "Delete bootstrap Heketi."
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
||||||
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
|
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
|
||||||
- name: "Ensure there is nothing left over." # noqa 301
|
- name: "Ensure there is nothing left over."
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
- name: "Copy topology configuration into container."
|
- name: "Copy topology configuration into container."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||||
- name: "Load heketi topology." # noqa 503
|
- name: "Load heketi topology." # noqa no-handler
|
||||||
when: "render.changed"
|
when: "render.changed"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||||
register: "load_heketi"
|
register: "load_heketi"
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
- name: "Provision database volume."
|
- name: "Provision database volume."
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
||||||
when: "heketi_database_volume_exists is undefined"
|
when: "heketi_database_volume_exists is undefined"
|
||||||
- name: "Copy configuration from pod." # noqa 301
|
- name: "Copy configuration from pod."
|
||||||
become: true
|
become: true
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||||
- name: "Get heketi volume ids."
|
- name: "Get heketi volume ids."
|
||||||
|
|
|
@ -11,10 +11,10 @@
|
||||||
src: "topology.json.j2"
|
src: "topology.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/topology.json"
|
dest: "{{ kube_config_dir }}/topology.json"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
- name: "Copy topology configuration into container." # noqa 503
|
- name: "Copy topology configuration into container." # noqa no-handler
|
||||||
when: "rendering.changed"
|
when: "rendering.changed"
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||||
- name: "Load heketi topology." # noqa 503
|
- name: "Load heketi topology." # noqa no-handler
|
||||||
when: "rendering.changed"
|
when: "rendering.changed"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||||
- name: "Get heketi topology."
|
- name: "Get heketi topology."
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Remove volume groups." # noqa 301
|
- name: "Remove volume groups."
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
|
@ -30,7 +30,7 @@
|
||||||
with_items: "{{ volume_groups.stdout_lines }}"
|
with_items: "{{ volume_groups.stdout_lines }}"
|
||||||
loop_control: { loop_var: "volume_group" }
|
loop_control: { loop_var: "volume_group" }
|
||||||
|
|
||||||
- name: "Remove physical volume from cluster disks." # noqa 301
|
- name: "Remove physical volume from cluster disks."
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
|
|
|
@ -1,43 +1,43 @@
|
||||||
---
|
---
|
||||||
- name: Remove storage class. # noqa 301
|
- name: Remove storage class.
|
||||||
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Tear down heketi. # noqa 301
|
- name: Tear down heketi.
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Tear down heketi. # noqa 301
|
- name: Tear down heketi.
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Tear down bootstrap.
|
- name: Tear down bootstrap.
|
||||||
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
||||||
- name: Ensure there is nothing left over. # noqa 301
|
- name: Ensure there is nothing left over.
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
- name: Ensure there is nothing left over. # noqa 301
|
- name: Ensure there is nothing left over.
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
- name: Tear down glusterfs. # noqa 301
|
- name: Tear down glusterfs.
|
||||||
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi storage service. # noqa 301
|
- name: Remove heketi storage service.
|
||||||
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi gluster role binding # noqa 301
|
- name: Remove heketi gluster role binding
|
||||||
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi config secret # noqa 301
|
- name: Remove heketi config secret
|
||||||
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi db backup # noqa 301
|
- name: Remove heketi db backup
|
||||||
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi service account # noqa 301
|
- name: Remove heketi service account
|
||||||
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Get secrets
|
- name: Get secrets
|
||||||
|
|
|
@ -16,13 +16,13 @@
|
||||||
src: get_cinder_pvs.sh
|
src: get_cinder_pvs.sh
|
||||||
dest: /tmp
|
dest: /tmp
|
||||||
mode: u+rwx
|
mode: u+rwx
|
||||||
- name: Get PVs provisioned by in-tree cloud provider # noqa 301
|
- name: Get PVs provisioned by in-tree cloud provider
|
||||||
command: /tmp/get_cinder_pvs.sh
|
command: /tmp/get_cinder_pvs.sh
|
||||||
register: pvs
|
register: pvs
|
||||||
- name: Remove get_cinder_pvs.sh
|
- name: Remove get_cinder_pvs.sh
|
||||||
file:
|
file:
|
||||||
path: /tmp/get_cinder_pvs.sh
|
path: /tmp/get_cinder_pvs.sh
|
||||||
state: absent
|
state: absent
|
||||||
- name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation # noqa 301
|
- name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation
|
||||||
command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org"
|
command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org"
|
||||||
loop: "{{ pvs.stdout_lines | list }}"
|
loop: "{{ pvs.stdout_lines | list }}"
|
||||||
|
|
|
@ -22,7 +22,6 @@
|
||||||
name: containerd
|
name: containerd
|
||||||
daemon_reload: true
|
daemon_reload: true
|
||||||
enabled: false
|
enabled: false
|
||||||
masked: true
|
|
||||||
state: stopped
|
state: stopped
|
||||||
tags:
|
tags:
|
||||||
- reset_containerd
|
- reset_containerd
|
||||||
|
|
|
@ -196,7 +196,7 @@
|
||||||
register: service_start
|
register: service_start
|
||||||
|
|
||||||
- name: cri-o | trigger service restart only when needed
|
- name: cri-o | trigger service restart only when needed
|
||||||
service: # noqa 503
|
service:
|
||||||
name: crio
|
name: crio
|
||||||
state: restarted
|
state: restarted
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -63,7 +63,6 @@
|
||||||
name: crio
|
name: crio
|
||||||
daemon_reload: true
|
daemon_reload: true
|
||||||
enabled: false
|
enabled: false
|
||||||
masked: true
|
|
||||||
state: stopped
|
state: stopped
|
||||||
tags:
|
tags:
|
||||||
- reset_crio
|
- reset_crio
|
||||||
|
|
|
@ -143,7 +143,7 @@
|
||||||
state: started
|
state: started
|
||||||
when: docker_task_result is not changed
|
when: docker_task_result is not changed
|
||||||
rescue:
|
rescue:
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
msg: "Docker start failed. Try to remove our config"
|
msg: "Docker start failed. Try to remove our config"
|
||||||
- name: remove kubespray generated config
|
- name: remove kubespray generated config
|
||||||
file:
|
file:
|
||||||
|
|
|
@ -101,6 +101,6 @@
|
||||||
- /etc/docker
|
- /etc/docker
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
|
|
||||||
- name: Docker | systemctl daemon-reload # noqa 503
|
- name: Docker | systemctl daemon-reload # noqa no-handler
|
||||||
systemd:
|
systemd:
|
||||||
daemon_reload: true
|
daemon_reload: true
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
check_mode: no
|
check_mode: no
|
||||||
|
|
||||||
- name: check system search domains
|
- name: check system search domains
|
||||||
# noqa 306 - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false
|
# noqa risky-shell-pipe - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false
|
||||||
# Therefore -o pipefail is not applicable in this specific instance
|
# Therefore -o pipefail is not applicable in this specific instance
|
||||||
shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
|
shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
|
||||||
args:
|
args:
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
when: http_proxy is defined or https_proxy is defined
|
when: http_proxy is defined or https_proxy is defined
|
||||||
|
|
||||||
- name: get systemd version
|
- name: get systemd version
|
||||||
# noqa 303 - systemctl is called intentionally here
|
# noqa command-instead-of-module - systemctl is called intentionally here
|
||||||
shell: set -o pipefail && systemctl --version | head -n 1 | cut -d " " -f 2
|
shell: set -o pipefail && systemctl --version | head -n 1 | cut -d " " -f 2
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
# The image_info_command depends on the Container Runtime and will output something like the following:
|
# The image_info_command depends on the Container Runtime and will output something like the following:
|
||||||
# nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
|
# nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
|
||||||
- name: check_pull_required | Generate a list of information about the images on a node # noqa 305 image_info_command contains a pipe, therefore requiring shell
|
- name: check_pull_required | Generate a list of information about the images on a node # noqa command-instead-of-shell - image_info_command contains a pipe, therefore requiring shell
|
||||||
shell: "{{ image_info_command }}"
|
shell: "{{ image_info_command }}"
|
||||||
register: docker_images
|
register: docker_images
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
when:
|
when:
|
||||||
- not download_always_pull
|
- not download_always_pull
|
||||||
|
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
msg: "Pull {{ image_reponame }} required is: {{ pull_required }}"
|
msg: "Pull {{ image_reponame }} required is: {{ pull_required }}"
|
||||||
|
|
||||||
- name: download_container | Determine if image is in cache
|
- name: download_container | Determine if image is in cache
|
||||||
|
@ -68,7 +68,7 @@
|
||||||
- not image_is_cached
|
- not image_is_cached
|
||||||
|
|
||||||
- name: download_container | Save and compress image
|
- name: download_container | Save and compress image
|
||||||
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa 305 image_save_command_on_localhost contains a pipe, therefore requires shell
|
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa command-instead-of-shell - image_save_command_on_localhost contains a pipe, therefore requires shell
|
||||||
delegate_to: "{{ download_delegate }}"
|
delegate_to: "{{ download_delegate }}"
|
||||||
delegate_facts: no
|
delegate_facts: no
|
||||||
register: container_save_status
|
register: container_save_status
|
||||||
|
@ -108,7 +108,7 @@
|
||||||
- download_force_cache
|
- download_force_cache
|
||||||
|
|
||||||
- name: download_container | Load image into the local container registry
|
- name: download_container | Load image into the local container registry
|
||||||
shell: "{{ image_load_command }}" # noqa 305 image_load_command uses pipes, therefore requires shell
|
shell: "{{ image_load_command }}" # noqa command-instead-of-shell - image_load_command uses pipes, therefore requires shell
|
||||||
register: container_load_status
|
register: container_load_status
|
||||||
failed_when: container_load_status is failed
|
failed_when: container_load_status is failed
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
- asserts
|
- asserts
|
||||||
|
|
||||||
- name: prep_download | On localhost, check if user has access to the container runtime without using sudo
|
- name: prep_download | On localhost, check if user has access to the container runtime without using sudo
|
||||||
shell: "{{ image_info_command_on_localhost }}" # noqa 305 image_info_command_on_localhost contains pipe, therefore requires shell
|
shell: "{{ image_info_command_on_localhost }}" # noqa command-instead-of-shell - image_info_command_on_localhost contains pipe, therefore requires shell
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
connection: local
|
connection: local
|
||||||
run_once: true
|
run_once: true
|
||||||
|
@ -57,7 +57,7 @@
|
||||||
- asserts
|
- asserts
|
||||||
|
|
||||||
- name: prep_download | Register docker images info
|
- name: prep_download | Register docker images info
|
||||||
shell: "{{ image_info_command }}" # noqa 305 image_info_command contains pipe therefore requires shell
|
shell: "{{ image_info_command }}" # noqa command-instead-of-shell - image_info_command contains pipe therefore requires shell
|
||||||
no_log: "{{ not (unsafe_show_logs|bool) }}"
|
no_log: "{{ not (unsafe_show_logs|bool) }}"
|
||||||
register: docker_images
|
register: docker_images
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Join Member | Add member to etcd-events cluster # noqa 301 305
|
- name: Join Member | Add member to etcd-events cluster
|
||||||
command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}"
|
command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}"
|
||||||
register: member_add_result
|
register: member_add_result
|
||||||
until: member_add_result.rc == 0
|
until: member_add_result.rc == 0
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Join Member | Add member to etcd cluster # noqa 301 305
|
- name: Join Member | Add member to etcd cluster
|
||||||
command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}"
|
command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}"
|
||||||
register: member_add_result
|
register: member_add_result
|
||||||
until: member_add_result.rc == 0 or 'Peer URLs already exists' in member_add_result.stderr
|
until: member_add_result.rc == 0 or 'Peer URLs already exists' in member_add_result.stderr
|
||||||
|
|
|
@ -24,14 +24,14 @@
|
||||||
mode: 0640
|
mode: 0640
|
||||||
register: etcd_ca_cert
|
register: etcd_ca_cert
|
||||||
|
|
||||||
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar) # noqa 503
|
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar) # noqa no-handler
|
||||||
command: update-ca-certificates
|
command: update-ca-certificates
|
||||||
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse"]
|
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse"]
|
||||||
|
|
||||||
- name: Gen_certs | update ca-certificates (RedHat) # noqa 503
|
- name: Gen_certs | update ca-certificates (RedHat) # noqa no-handler
|
||||||
command: update-ca-trust extract
|
command: update-ca-trust extract
|
||||||
when: etcd_ca_cert.changed and ansible_os_family == "RedHat"
|
when: etcd_ca_cert.changed and ansible_os_family == "RedHat"
|
||||||
|
|
||||||
- name: Gen_certs | update ca-certificates (ClearLinux) # noqa 503
|
- name: Gen_certs | update ca-certificates (ClearLinux) # noqa no-handler
|
||||||
command: clrtrust add "{{ ca_cert_path }}"
|
command: clrtrust add "{{ ca_cert_path }}"
|
||||||
when: etcd_ca_cert.changed and ansible_os_family == "ClearLinux"
|
when: etcd_ca_cert.changed and ansible_os_family == "ClearLinux"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: Add Helm repositories
|
- name: Add Helm repositories
|
||||||
kubernetes.core.helm_repository: "{{ helm_repository_defaults | combine(item) }}"
|
kubernetes.core.helm_repository: "{{ helm_repository_defaults | combine(item) }}" # noqa args[module]
|
||||||
loop: "{{ repositories }}"
|
loop: "{{ repositories }}"
|
||||||
|
|
||||||
- name: Update Helm repositories
|
- name: Update Helm repositories
|
||||||
|
@ -15,5 +15,5 @@
|
||||||
- helm_update
|
- helm_update
|
||||||
|
|
||||||
- name: Install Helm Applications
|
- name: Install Helm Applications
|
||||||
kubernetes.core.helm: "{{ helm_defaults | combine(release_common_opts, item) }}"
|
kubernetes.core.helm: "{{ helm_defaults | combine(release_common_opts, item) }}" # noqa args[module]
|
||||||
loop: "{{ releases }}"
|
loop: "{{ releases }}"
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
dest: "{{ local_release_dir }}/krew.yml"
|
dest: "{{ local_release_dir }}/krew.yml"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
|
|
||||||
- name: Krew | Install krew # noqa 301 305
|
- name: Krew | Install krew # noqa command-instead-of-shell
|
||||||
shell: "{{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }} install --archive={{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz --manifest={{ local_release_dir }}/krew.yml"
|
shell: "{{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }} install --archive={{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz --manifest={{ local_release_dir }}/krew.yml"
|
||||||
environment:
|
environment:
|
||||||
KREW_ROOT: "{{ krew_root_dir }}"
|
KREW_ROOT: "{{ krew_root_dir }}"
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: kube-router | Wait for kube-router pods to be ready
|
- name: kube-router | Wait for kube-router pods to be ready
|
||||||
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 ignore-errors
|
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa ignore-errors
|
||||||
register: pods_not_ready
|
register: pods_not_ready
|
||||||
until: pods_not_ready.stdout.find("kube-router")==-1
|
until: pods_not_ready.stdout.find("kube-router")==-1
|
||||||
retries: 30
|
retries: 30
|
||||||
|
|
|
@ -100,5 +100,5 @@
|
||||||
name: k8s-certs-renew.timer
|
name: k8s-certs-renew.timer
|
||||||
enabled: yes
|
enabled: yes
|
||||||
state: started
|
state: started
|
||||||
daemon-reload: "{{ k8s_certs_units is changed }}"
|
daemon_reload: "{{ k8s_certs_units is changed }}"
|
||||||
when: auto_renew_certificates
|
when: auto_renew_certificates
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
register: kube_apiserver_manifest_replaced
|
register: kube_apiserver_manifest_replaced
|
||||||
when: etcd_secret_changed|default(false)
|
when: etcd_secret_changed|default(false)
|
||||||
|
|
||||||
- name: "Pre-upgrade | Delete master containers forcefully" # noqa 503
|
- name: "Pre-upgrade | Delete master containers forcefully" # noqa no-handler
|
||||||
shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
|
shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
|
|
|
@ -35,9 +35,9 @@
|
||||||
- node_labels is defined
|
- node_labels is defined
|
||||||
- node_labels is mapping
|
- node_labels is mapping
|
||||||
|
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
var: role_node_labels
|
var: role_node_labels
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
var: inventory_node_labels
|
var: inventory_node_labels
|
||||||
|
|
||||||
- name: Set label to node
|
- name: Set label to node
|
||||||
|
|
|
@ -93,7 +93,7 @@
|
||||||
- not (disable_host_nameservers | default(false))
|
- not (disable_host_nameservers | default(false))
|
||||||
|
|
||||||
- name: NetworkManager | Check if host has NetworkManager
|
- name: NetworkManager | Check if host has NetworkManager
|
||||||
# noqa 303 Should we use service_facts for this?
|
# noqa command-instead-of-module - Should we use service_facts for this?
|
||||||
command: systemctl is-active --quiet NetworkManager.service
|
command: systemctl is-active --quiet NetworkManager.service
|
||||||
register: networkmanager_enabled
|
register: networkmanager_enabled
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
@ -101,7 +101,7 @@
|
||||||
check_mode: false
|
check_mode: false
|
||||||
|
|
||||||
- name: check systemd-resolved
|
- name: check systemd-resolved
|
||||||
# noqa 303 Should we use service_facts for this?
|
# noqa command-instead-of-module - Should we use service_facts for this?
|
||||||
command: systemctl is-active systemd-resolved
|
command: systemctl is-active systemd-resolved
|
||||||
register: systemd_resolved_enabled
|
register: systemd_resolved_enabled
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
|
@ -33,12 +33,12 @@
|
||||||
changed_when: False
|
changed_when: False
|
||||||
register: fs_type
|
register: fs_type
|
||||||
|
|
||||||
- name: run growpart # noqa 503
|
- name: run growpart # noqa no-handler
|
||||||
command: growpart {{ device }} {{ partition }}
|
command: growpart {{ device }} {{ partition }}
|
||||||
when: growpart_needed.changed
|
when: growpart_needed.changed
|
||||||
environment:
|
environment:
|
||||||
LC_ALL: C
|
LC_ALL: C
|
||||||
|
|
||||||
- name: run xfs_growfs # noqa 503
|
- name: run xfs_growfs # noqa no-handler
|
||||||
command: xfs_growfs {{ root_device }}
|
command: xfs_growfs {{ root_device }}
|
||||||
when: growpart_needed.changed and 'XFS' in fs_type.stdout
|
when: growpart_needed.changed and 'XFS' in fs_type.stdout
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
- name: Calico-rr | Configuring node tasks
|
- name: Calico-rr | Configuring node tasks
|
||||||
include_tasks: update-node.yml
|
include_tasks: update-node.yml
|
||||||
|
|
||||||
- name: Calico-rr | Set label for route reflector # noqa 301
|
- name: Calico-rr | Set label for route reflector
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }}
|
{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }}
|
||||||
'i-am-a-route-reflector=true' --overwrite
|
'i-am-a-route-reflector=true' --overwrite
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
retry_count: "{{ 0 if retry_count is undefined else retry_count|int + 1 }}"
|
retry_count: "{{ 0 if retry_count is undefined else retry_count|int + 1 }}"
|
||||||
|
|
||||||
- name: Calico | Set label for route reflector # noqa 301 305
|
- name: Calico | Set label for route reflector # noqa command-instead-of-shell
|
||||||
shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-rr-id={{ calico_rr_id }} --overwrite"
|
shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-rr-id={{ calico_rr_id }} --overwrite"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
register: calico_rr_id_label
|
register: calico_rr_id_label
|
||||||
|
@ -29,7 +29,7 @@
|
||||||
{{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp':
|
{{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp':
|
||||||
{ 'routeReflectorClusterID': cluster_id }}}, recursive=True) }}
|
{ 'routeReflectorClusterID': cluster_id }}}, recursive=True) }}
|
||||||
|
|
||||||
- name: Calico-rr | Configure route reflector # noqa 301 305
|
- name: Calico-rr | Configure route reflector # noqa command-instead-of-shell
|
||||||
shell: "{{ bin_dir }}/calicoctl.sh replace -f-"
|
shell: "{{ bin_dir }}/calicoctl.sh replace -f-"
|
||||||
args:
|
args:
|
||||||
stdin: "{{ calico_rr_node_patched | to_json }}"
|
stdin: "{{ calico_rr_node_patched | to_json }}"
|
||||||
|
|
|
@ -72,7 +72,7 @@
|
||||||
when: calico_datastore == "etcd"
|
when: calico_datastore == "etcd"
|
||||||
|
|
||||||
- name: Calico | Check if calico network pool has already been configured
|
- name: Calico | Check if calico network pool has already been configured
|
||||||
# noqa 306 - grep will exit 1 if no match found
|
# noqa risky-shell-pipe - grep will exit 1 if no match found
|
||||||
shell: >
|
shell: >
|
||||||
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l
|
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l
|
||||||
args:
|
args:
|
||||||
|
@ -95,7 +95,7 @@
|
||||||
- calico_pool_cidr is defined
|
- calico_pool_cidr is defined
|
||||||
|
|
||||||
- name: Calico | Check if calico IPv6 network pool has already been configured
|
- name: Calico | Check if calico IPv6 network pool has already been configured
|
||||||
# noqa 306 - grep will exit 1 if no match found
|
# noqa risky-shell-pipe - grep will exit 1 if no match found
|
||||||
shell: >
|
shell: >
|
||||||
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}" | wc -l
|
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}" | wc -l
|
||||||
args:
|
args:
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: Calico | Set label for groups nodes # noqa 301 305
|
- name: Calico | Set label for groups nodes
|
||||||
shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-group-id={{ calico_group_id }} --overwrite"
|
command: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-group-id={{ calico_group_id }} --overwrite"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
register: calico_group_id_label
|
register: calico_group_id_label
|
||||||
until: calico_group_id_label is succeeded
|
until: calico_group_id_label is succeeded
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
|
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
|
||||||
|
|
||||||
- name: Cilium | Wait for pods to run
|
- name: Cilium | Wait for pods to run
|
||||||
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
|
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa literal-compare
|
||||||
register: pods_not_ready
|
register: pods_not_ready
|
||||||
until: pods_not_ready.stdout.find("cilium")==-1
|
until: pods_not_ready.stdout.find("cilium")==-1
|
||||||
retries: "{{ cilium_rolling_restart_wait_retries_count | int }}"
|
retries: "{{ cilium_rolling_restart_wait_retries_count | int }}"
|
||||||
|
|
|
@ -43,7 +43,6 @@
|
||||||
- has_quorum
|
- has_quorum
|
||||||
|
|
||||||
- name: Delete old certificates
|
- name: Delete old certificates
|
||||||
# noqa 302 ignore-error - rm is ok here for now
|
|
||||||
shell: "rm {{ etcd_cert_dir }}/*{{ item }}*"
|
shell: "rm {{ etcd_cert_dir }}/*{{ item }}*"
|
||||||
with_items: "{{ groups['broken_etcd'] }}"
|
with_items: "{{ groups['broken_etcd'] }}"
|
||||||
register: delete_old_cerificates
|
register: delete_old_cerificates
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
path: "{{ etcd_data_dir }}"
|
path: "{{ etcd_data_dir }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|
||||||
- name: Restore etcd snapshot # noqa 301 305
|
- name: Restore etcd snapshot # noqa command-instead-of-shell
|
||||||
shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}"
|
shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}"
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
changed_when: false
|
changed_when: false
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: remove-node | Drain node except daemonsets resource # noqa 301
|
- name: remove-node | Drain node except daemonsets resource
|
||||||
command: >-
|
command: >-
|
||||||
{{ kubectl }} drain
|
{{ kubectl }} drain
|
||||||
--force
|
--force
|
||||||
|
|
|
@ -38,7 +38,7 @@
|
||||||
tags:
|
tags:
|
||||||
- docker
|
- docker
|
||||||
|
|
||||||
- name: reset | systemctl daemon-reload # noqa 503
|
- name: reset | systemctl daemon-reload # noqa no-handler
|
||||||
systemd:
|
systemd:
|
||||||
daemon_reload: true
|
daemon_reload: true
|
||||||
when: services_removed.changed
|
when: services_removed.changed
|
||||||
|
@ -174,7 +174,7 @@
|
||||||
tags:
|
tags:
|
||||||
- services
|
- services
|
||||||
|
|
||||||
- name: reset | gather mounted kubelet dirs # noqa 301
|
- name: reset | gather mounted kubelet dirs
|
||||||
shell: set -o pipefail && mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac
|
shell: set -o pipefail && mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
|
@ -185,7 +185,7 @@
|
||||||
tags:
|
tags:
|
||||||
- mounts
|
- mounts
|
||||||
|
|
||||||
- name: reset | unmount kubelet dirs # noqa 301
|
- name: reset | unmount kubelet dirs
|
||||||
command: umount -f {{ item }}
|
command: umount -f {{ item }}
|
||||||
with_items: "{{ mounted_dirs.stdout_lines }}"
|
with_items: "{{ mounted_dirs.stdout_lines }}"
|
||||||
register: umount_dir
|
register: umount_dir
|
||||||
|
|
|
@ -29,11 +29,11 @@
|
||||||
register: patch_kube_proxy_state
|
register: patch_kube_proxy_state
|
||||||
when: current_kube_proxy_state.stdout | trim | lower != "linux"
|
when: current_kube_proxy_state.stdout | trim | lower != "linux"
|
||||||
|
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
msg: "{{ patch_kube_proxy_state.stdout_lines }}"
|
msg: "{{ patch_kube_proxy_state.stdout_lines }}"
|
||||||
when: patch_kube_proxy_state is not skipped
|
when: patch_kube_proxy_state is not skipped
|
||||||
|
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
msg: "{{ patch_kube_proxy_state.stderr_lines }}"
|
msg: "{{ patch_kube_proxy_state.stderr_lines }}"
|
||||||
when: patch_kube_proxy_state is not skipped
|
when: patch_kube_proxy_state is not skipped
|
||||||
tags: init
|
tags: init
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
when:
|
when:
|
||||||
- item.value.converted|bool
|
- item.value.converted|bool
|
||||||
|
|
||||||
- name: Resize images # noqa 301
|
- name: Resize images
|
||||||
command: qemu-img resize {{ images_dir }}/{{ item.key }}.qcow2 +8G
|
command: qemu-img resize {{ images_dir }}/{{ item.key }}.qcow2 +8G
|
||||||
loop: "{{ images|dict2items }}"
|
loop: "{{ images|dict2items }}"
|
||||||
|
|
||||||
|
@ -43,16 +43,16 @@
|
||||||
dest: "{{ images_dir }}/Dockerfile"
|
dest: "{{ images_dir }}/Dockerfile"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
|
|
||||||
- name: Create docker images for each OS # noqa 301
|
- name: Create docker images for each OS
|
||||||
command: docker build -t {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
|
command: docker build -t {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
|
||||||
loop: "{{ images|dict2items }}"
|
loop: "{{ images|dict2items }}"
|
||||||
|
|
||||||
- name: docker login # noqa 301
|
- name: docker login
|
||||||
command: docker login -u="{{ docker_user }}" -p="{{ docker_password }}" "{{ docker_host }}"
|
command: docker login -u="{{ docker_user }}" -p="{{ docker_password }}" "{{ docker_host }}"
|
||||||
|
|
||||||
- name: docker push image # noqa 301
|
- name: docker push image
|
||||||
command: docker push {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }}
|
command: docker push {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }}
|
||||||
loop: "{{ images|dict2items }}"
|
loop: "{{ images|dict2items }}"
|
||||||
|
|
||||||
- name: docker logout # noqa 301
|
- name: docker logout
|
||||||
command: docker logout -u="{{ docker_user }}" "{{ docker_host }}"
|
command: docker logout -u="{{ docker_user }}" "{{ docker_host }}"
|
||||||
|
|
|
@ -20,6 +20,6 @@
|
||||||
|
|
||||||
- name: Template the inventory
|
- name: Template the inventory
|
||||||
template:
|
template:
|
||||||
src: ../templates/inventory-aws.j2 # noqa 404 CI inventory templates are not in role_path
|
src: ../templates/inventory-aws.j2 # noqa no-relative-paths - CI inventory templates are not in role_path
|
||||||
dest: "{{ inventory_path }}"
|
dest: "{{ inventory_path }}"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
|
|
|
@ -86,7 +86,7 @@
|
||||||
|
|
||||||
- name: Template the inventory
|
- name: Template the inventory
|
||||||
template:
|
template:
|
||||||
src: ../templates/inventory-do.j2 # noqa 404 CI templates are not in role_path
|
src: ../templates/inventory-do.j2 # noqa no-relative-paths - CI templates are not in role_path
|
||||||
dest: "{{ inventory_path }}"
|
dest: "{{ inventory_path }}"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
when: state == 'present'
|
when: state == 'present'
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
|
|
||||||
- name: Create gce instances
|
- name: Create gce instances
|
||||||
google.cloud.gcp_compute_instance:
|
google.cloud.gcp_compute_instance: # noqa args[module] - Probably doesn't work
|
||||||
instance_names: "{{ instance_names }}"
|
instance_names: "{{ instance_names }}"
|
||||||
machine_type: "{{ cloud_machine_type }}"
|
machine_type: "{{ cloud_machine_type }}"
|
||||||
image: "{{ cloud_image | default(omit) }}"
|
image: "{{ cloud_image | default(omit) }}"
|
||||||
|
@ -51,7 +51,7 @@
|
||||||
groupname: "waitfor_hosts"
|
groupname: "waitfor_hosts"
|
||||||
with_items: '{{ gce.instance_data }}'
|
with_items: '{{ gce.instance_data }}'
|
||||||
|
|
||||||
- name: Template the inventory # noqa 404 CI inventory templates are not in role_path
|
- name: Template the inventory # noqa no-relative-paths - CI inventory templates are not in role_path
|
||||||
template:
|
template:
|
||||||
src: ../templates/inventory-gce.j2
|
src: ../templates/inventory-gce.j2
|
||||||
dest: "{{ inventory_path }}"
|
dest: "{{ inventory_path }}"
|
||||||
|
@ -64,7 +64,7 @@
|
||||||
mode: 0755
|
mode: 0755
|
||||||
when: mode in ['scale', 'separate-scale', 'ha-scale']
|
when: mode in ['scale', 'separate-scale', 'ha-scale']
|
||||||
|
|
||||||
- name: Template fake hosts group vars # noqa 404 CI templates are not in role_path
|
- name: Template fake hosts group vars # noqa no-relative-paths - CI templates are not in role_path
|
||||||
template:
|
template:
|
||||||
src: ../templates/fake_hosts.yml.j2
|
src: ../templates/fake_hosts.yml.j2
|
||||||
dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"
|
dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
k8s-{{ test_name }}-1,k8s-{{ test_name }}-2
|
k8s-{{ test_name }}-1,k8s-{{ test_name }}-2
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
|
|
||||||
- name: stop gce instances
|
- name: stop gce instances # noqa args[module] - Probably doesn't work
|
||||||
google.cloud.gcp_compute_instance:
|
google.cloud.gcp_compute_instance:
|
||||||
instance_names: "{{ instance_names }}"
|
instance_names: "{{ instance_names }}"
|
||||||
image: "{{ cloud_image | default(omit) }}"
|
image: "{{ cloud_image | default(omit) }}"
|
||||||
|
@ -33,7 +33,7 @@
|
||||||
poll: 3
|
poll: 3
|
||||||
register: gce
|
register: gce
|
||||||
|
|
||||||
- name: delete gce instances
|
- name: delete gce instances # noqa args[module] - Probably doesn't work
|
||||||
google.cloud.gcp_compute_instance:
|
google.cloud.gcp_compute_instance:
|
||||||
instance_names: "{{ instance_names }}"
|
instance_names: "{{ instance_names }}"
|
||||||
image: "{{ cloud_image | default(omit) }}"
|
image: "{{ cloud_image | default(omit) }}"
|
||||||
|
|
|
@ -56,7 +56,7 @@
|
||||||
no_log: True
|
no_log: True
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
||||||
- name: Apply the lifecycle rules # noqa 301
|
- name: Apply the lifecycle rules
|
||||||
command: "{{ dir }}/google-cloud-sdk/bin/gsutil lifecycle set {{ dir }}/gcs_life.json gs://{{ test_name }}"
|
command: "{{ dir }}/google-cloud-sdk/bin/gsutil lifecycle set {{ dir }}/gcs_life.json gs://{{ test_name }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
environment:
|
environment:
|
||||||
|
@ -77,5 +77,5 @@
|
||||||
failed_when: false
|
failed_when: false
|
||||||
no_log: True
|
no_log: True
|
||||||
|
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
msg: "A public url https://storage.googleapis.com/{{ test_name }}/{{ file_name }}"
|
msg: "A public url https://storage.googleapis.com/{{ test_name }}/{{ file_name }}"
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
delay: 5
|
delay: 5
|
||||||
until: apiserver_response is success
|
until: apiserver_response is success
|
||||||
|
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
msg: "{{ apiserver_response.json }}"
|
msg: "{{ apiserver_response.json }}"
|
||||||
|
|
||||||
- name: Check API servers version
|
- name: Check API servers version
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
bin_dir: "/usr/local/bin"
|
bin_dir: "/usr/local/bin"
|
||||||
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||||
|
|
||||||
- import_role: # noqa unnamed-task
|
- import_role: # noqa name[missing]
|
||||||
name: cluster-dump
|
name: cluster-dump
|
||||||
|
|
||||||
- name: Check kubectl output
|
- name: Check kubectl output
|
||||||
|
@ -21,7 +21,7 @@
|
||||||
register: get_nodes
|
register: get_nodes
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
msg: "{{ get_nodes.stdout.split('\n') }}"
|
msg: "{{ get_nodes.stdout.split('\n') }}"
|
||||||
|
|
||||||
- name: Check that all nodes are running and ready
|
- name: Check that all nodes are running and ready
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
bin_dir: "/usr/local/bin"
|
bin_dir: "/usr/local/bin"
|
||||||
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||||
|
|
||||||
- import_role: # noqa unnamed-task
|
- import_role: # noqa name[missing]
|
||||||
name: cluster-dump
|
name: cluster-dump
|
||||||
|
|
||||||
- name: Check kubectl output
|
- name: Check kubectl output
|
||||||
|
@ -21,7 +21,7 @@
|
||||||
register: get_pods
|
register: get_pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
msg: "{{ get_pods.stdout.split('\n') }}"
|
msg: "{{ get_pods.stdout.split('\n') }}"
|
||||||
|
|
||||||
- name: Check that all pods are running and ready
|
- name: Check that all pods are running and ready
|
||||||
|
@ -44,6 +44,6 @@
|
||||||
register: get_pods
|
register: get_pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
msg: "{{ get_pods.stdout.split('\n') }}"
|
msg: "{{ get_pods.stdout.split('\n') }}"
|
||||||
failed_when: not run_pods_log is success
|
failed_when: not run_pods_log is success
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
register: get_csr
|
register: get_csr
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
msg: "{{ get_csr.stdout.split('\n') }}"
|
msg: "{{ get_csr.stdout.split('\n') }}"
|
||||||
|
|
||||||
- name: Check there are csrs
|
- name: Check there are csrs
|
||||||
|
@ -63,7 +63,7 @@
|
||||||
when: get_csr.stdout_lines | length > 0
|
when: get_csr.stdout_lines | length > 0
|
||||||
changed_when: certificate_approve.stdout
|
changed_when: certificate_approve.stdout
|
||||||
|
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
msg: "{{ certificate_approve.stdout.split('\n') }}"
|
msg: "{{ certificate_approve.stdout.split('\n') }}"
|
||||||
|
|
||||||
when:
|
when:
|
||||||
|
@ -114,7 +114,7 @@
|
||||||
- agnhost1
|
- agnhost1
|
||||||
- agnhost2
|
- agnhost2
|
||||||
|
|
||||||
- import_role: # noqa unnamed-task
|
- import_role: # noqa name[missing]
|
||||||
name: cluster-dump
|
name: cluster-dump
|
||||||
|
|
||||||
- name: Check that all pods are running and ready
|
- name: Check that all pods are running and ready
|
||||||
|
@ -137,7 +137,7 @@
|
||||||
register: pods
|
register: pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
msg: "{{ pods.stdout.split('\n') }}"
|
msg: "{{ pods.stdout.split('\n') }}"
|
||||||
failed_when: not run_pods_log is success
|
failed_when: not run_pods_log is success
|
||||||
|
|
||||||
|
@ -162,7 +162,7 @@
|
||||||
register: get_pods
|
register: get_pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
msg: "{{ get_pods.stdout.split('\n') }}"
|
msg: "{{ get_pods.stdout.split('\n') }}"
|
||||||
|
|
||||||
- name: Set networking facts
|
- name: Set networking facts
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
bin_dir: "/usr/local/bin"
|
bin_dir: "/usr/local/bin"
|
||||||
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||||
|
|
||||||
- import_role: # noqa unnamed-task
|
- import_role: # noqa name[missing]
|
||||||
name: cluster-dump
|
name: cluster-dump
|
||||||
|
|
||||||
- name: Wait for netchecker server
|
- name: Wait for netchecker server
|
||||||
|
@ -60,7 +60,7 @@
|
||||||
- netchecker-agent-hostnet
|
- netchecker-agent-hostnet
|
||||||
when: not nca_pod is success
|
when: not nca_pod is success
|
||||||
|
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
var: nca_pod.stdout_lines
|
var: nca_pod.stdout_lines
|
||||||
when: inventory_hostname == groups['kube_control_plane'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@
|
||||||
when:
|
when:
|
||||||
- agents.content != '{}'
|
- agents.content != '{}'
|
||||||
|
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
var: ncs_pod
|
var: ncs_pod
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
|
@ -130,7 +130,7 @@
|
||||||
- agents.content is defined
|
- agents.content is defined
|
||||||
- agents.content[0] == '{'
|
- agents.content[0] == '{'
|
||||||
|
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
var: agents_check_result
|
var: agents_check_result
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
@ -147,7 +147,7 @@
|
||||||
- connectivity_check.content is defined
|
- connectivity_check.content is defined
|
||||||
- connectivity_check.content[0] == '{'
|
- connectivity_check.content[0] == '{'
|
||||||
|
|
||||||
- debug: # noqa unnamed-task
|
- debug: # noqa name[missing]
|
||||||
var: connectivity_check_result
|
var: connectivity_check_result
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
Loading…
Reference in New Issue