Fix outdated tag and experimental ansible-lint rules (#10254)

* project: fix outdated tag and experimental

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: remove no longer useful noqa 301

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: replace unnamed-task by name[missing]

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix daemon-reload -> daemon_reload

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

---------

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>
pull/10269/head
Arthur Outhenin-Chalandre 2023-06-30 11:51:57 +02:00 committed by GitHub
parent 4f85b75087
commit f8f197e26b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
56 changed files with 101 additions and 122 deletions

View File

@ -7,24 +7,11 @@ skip_list:
# These rules are intentionally skipped:
#
# [E204]: "Lines should be no longer than 160 chars"
# This could be re-enabled with a major rewrite in the future.
# For now, there's not enough value gain from strictly limiting line length.
# (Disabled in May 2019)
- '204'
# [E701]: "meta/main.yml should contain relevant info"
# Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
# While it can be useful to have these metadata available, they are also available in the existing documentation.
# (Disabled in May 2019)
- '701'
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
# Meta roles in Kubespray don't need proper names
# (Disabled in June 2021)
- 'role-name'
- 'experimental'
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
# In Kubespray we use variables that use camelCase to match their k8s counterparts
# (Disabled in June 2021)
@ -65,10 +52,6 @@ skip_list:
# Disable run-once check with free strategy
# (Disabled in June 2023 after ansible upgrade; FIXME)
- 'run-once[task]'
# Disable outdated-tag check
# (Disabled in June 2023 after ansible upgrade; FIXME)
- 'warning[outdated-tag]'
exclude_paths:
# Generated files
- tests/files/custom_cni/cilium.yaml

View File

@ -1,6 +1,6 @@
---
- name: Query Azure VMs # noqa 301
- name: Query Azure VMs
command: azure vm list-ip-address --json {{ azure_resource_group }}
register: vm_list_cmd

View File

@ -1,14 +1,14 @@
---
- name: Query Azure VMs IPs # noqa 301
- name: Query Azure VMs IPs
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
register: vm_ip_list_cmd
- name: Query Azure VMs Roles # noqa 301
- name: Query Azure VMs Roles
command: az vm list -o json --resource-group {{ azure_resource_group }}
register: vm_list_cmd
- name: Query Azure Load Balancer Public IP # noqa 301
- name: Query Azure Load Balancer Public IP
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
register: lb_pubip_cmd

View File

@ -69,7 +69,7 @@
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
# handle manually
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
raw: |
echo {{ item | hash('sha1') }} > /etc/machine-id.new
mv -b /etc/machine-id.new /etc/machine-id
@ -79,7 +79,6 @@
with_items: "{{ containers.results }}"
- name: Early hack image install to adapt for DIND
# noqa 302 - this task uses the raw module intentionally
raw: |
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"

View File

@ -7,7 +7,7 @@
register: glusterfs_ppa_added
when: glusterfs_ppa_use
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa no-handler
apt:
name: "{{ item }}"
state: absent

View File

@ -28,7 +28,7 @@
name: "{{ gluster_volume_node_mount_dir }}"
src: "{{ disk_volume_device_1 }}"
fstype: xfs
state: mounted"
state: mounted
# Setup/install tasks.
- include_tasks: setup-RedHat.yml

View File

@ -7,7 +7,7 @@
register: glusterfs_ppa_added
when: glusterfs_ppa_use
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa no-handler
apt:
name: "{{ item }}"
state: absent

View File

@ -6,7 +6,7 @@
- name: "Delete bootstrap Heketi."
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
- name: "Ensure there is nothing left over." # noqa 301
- name: "Ensure there is nothing left over."
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"

View File

@ -14,7 +14,7 @@
- name: "Copy topology configuration into container."
changed_when: false
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
- name: "Load heketi topology." # noqa 503
- name: "Load heketi topology." # noqa no-handler
when: "render.changed"
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
register: "load_heketi"

View File

@ -18,7 +18,7 @@
- name: "Provision database volume."
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
when: "heketi_database_volume_exists is undefined"
- name: "Copy configuration from pod." # noqa 301
- name: "Copy configuration from pod."
become: true
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
- name: "Get heketi volume ids."

View File

@ -11,10 +11,10 @@
src: "topology.json.j2"
dest: "{{ kube_config_dir }}/topology.json"
mode: 0644
- name: "Copy topology configuration into container." # noqa 503
- name: "Copy topology configuration into container." # noqa no-handler
when: "rendering.changed"
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
- name: "Load heketi topology." # noqa 503
- name: "Load heketi topology." # noqa no-handler
when: "rendering.changed"
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
- name: "Get heketi topology."

View File

@ -22,7 +22,7 @@
ignore_errors: true # noqa ignore-errors
changed_when: false
- name: "Remove volume groups." # noqa 301
- name: "Remove volume groups."
environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
become: true
@ -30,7 +30,7 @@
with_items: "{{ volume_groups.stdout_lines }}"
loop_control: { loop_var: "volume_group" }
- name: "Remove physical volume from cluster disks." # noqa 301
- name: "Remove physical volume from cluster disks."
environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
become: true

View File

@ -1,43 +1,43 @@
---
- name: Remove storage class. # noqa 301
- name: Remove storage class.
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
ignore_errors: true # noqa ignore-errors
- name: Tear down heketi. # noqa 301
- name: Tear down heketi.
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
ignore_errors: true # noqa ignore-errors
- name: Tear down heketi. # noqa 301
- name: Tear down heketi.
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
ignore_errors: true # noqa ignore-errors
- name: Tear down bootstrap.
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
- name: Ensure there is nothing left over. # noqa 301
- name: Ensure there is nothing left over.
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
retries: 60
delay: 5
- name: Ensure there is nothing left over. # noqa 301
- name: Ensure there is nothing left over.
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
retries: 60
delay: 5
- name: Tear down glusterfs. # noqa 301
- name: Tear down glusterfs.
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
ignore_errors: true # noqa ignore-errors
- name: Remove heketi storage service. # noqa 301
- name: Remove heketi storage service.
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
ignore_errors: true # noqa ignore-errors
- name: Remove heketi gluster role binding # noqa 301
- name: Remove heketi gluster role binding
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
ignore_errors: true # noqa ignore-errors
- name: Remove heketi config secret # noqa 301
- name: Remove heketi config secret
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
ignore_errors: true # noqa ignore-errors
- name: Remove heketi db backup # noqa 301
- name: Remove heketi db backup
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
ignore_errors: true # noqa ignore-errors
- name: Remove heketi service account # noqa 301
- name: Remove heketi service account
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
ignore_errors: true # noqa ignore-errors
- name: Get secrets

View File

@ -16,13 +16,13 @@
src: get_cinder_pvs.sh
dest: /tmp
mode: u+rwx
- name: Get PVs provisioned by in-tree cloud provider # noqa 301
- name: Get PVs provisioned by in-tree cloud provider
command: /tmp/get_cinder_pvs.sh
register: pvs
- name: Remove get_cinder_pvs.sh
file:
path: /tmp/get_cinder_pvs.sh
state: absent
- name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation # noqa 301
- name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation
command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org"
loop: "{{ pvs.stdout_lines | list }}"

View File

@ -22,7 +22,6 @@
name: containerd
daemon_reload: true
enabled: false
masked: true
state: stopped
tags:
- reset_containerd

View File

@ -196,7 +196,7 @@
register: service_start
- name: cri-o | trigger service restart only when needed
service: # noqa 503
service:
name: crio
state: restarted
when:

View File

@ -63,7 +63,6 @@
name: crio
daemon_reload: true
enabled: false
masked: true
state: stopped
tags:
- reset_crio

View File

@ -143,7 +143,7 @@
state: started
when: docker_task_result is not changed
rescue:
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "Docker start failed. Try to remove our config"
- name: remove kubespray generated config
file:

View File

@ -101,6 +101,6 @@
- /etc/docker
ignore_errors: true # noqa ignore-errors
- name: Docker | systemctl daemon-reload # noqa 503
- name: Docker | systemctl daemon-reload # noqa no-handler
systemd:
daemon_reload: true

View File

@ -26,7 +26,7 @@
check_mode: no
- name: check system search domains
# noqa 306 - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false
# noqa risky-shell-pipe - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false
# Therefore -o pipefail is not applicable in this specific instance
shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
args:

View File

@ -14,7 +14,7 @@
when: http_proxy is defined or https_proxy is defined
- name: get systemd version
# noqa 303 - systemctl is called intentionally here
# noqa command-instead-of-module - systemctl is called intentionally here
shell: set -o pipefail && systemctl --version | head -n 1 | cut -d " " -f 2
args:
executable: /bin/bash

View File

@ -1,7 +1,7 @@
---
# The image_info_command depends on the Container Runtime and will output something like the following:
# nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
- name: check_pull_required | Generate a list of information about the images on a node # noqa 305 image_info_command contains a pipe, therefore requiring shell
- name: check_pull_required | Generate a list of information about the images on a node # noqa command-instead-of-shell - image_info_command contains a pipe, therefore requiring shell
shell: "{{ image_info_command }}"
register: docker_images
changed_when: false

View File

@ -18,7 +18,7 @@
when:
- not download_always_pull
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "Pull {{ image_reponame }} required is: {{ pull_required }}"
- name: download_container | Determine if image is in cache
@ -68,7 +68,7 @@
- not image_is_cached
- name: download_container | Save and compress image
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa 305 image_save_command_on_localhost contains a pipe, therefore requires shell
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa command-instead-of-shell - image_save_command_on_localhost contains a pipe, therefore requires shell
delegate_to: "{{ download_delegate }}"
delegate_facts: no
register: container_save_status
@ -108,7 +108,7 @@
- download_force_cache
- name: download_container | Load image into the local container registry
shell: "{{ image_load_command }}" # noqa 305 image_load_command uses pipes, therefore requires shell
shell: "{{ image_load_command }}" # noqa command-instead-of-shell - image_load_command uses pipes, therefore requires shell
register: container_load_status
failed_when: container_load_status is failed
when:

View File

@ -21,7 +21,7 @@
- asserts
- name: prep_download | On localhost, check if user has access to the container runtime without using sudo
shell: "{{ image_info_command_on_localhost }}" # noqa 305 image_info_command_on_localhost contains pipe, therefore requires shell
shell: "{{ image_info_command_on_localhost }}" # noqa command-instead-of-shell - image_info_command_on_localhost contains pipe, therefore requires shell
delegate_to: localhost
connection: local
run_once: true
@ -57,7 +57,7 @@
- asserts
- name: prep_download | Register docker images info
shell: "{{ image_info_command }}" # noqa 305 image_info_command contains pipe therefore requires shell
shell: "{{ image_info_command }}" # noqa command-instead-of-shell - image_info_command contains pipe therefore requires shell
no_log: "{{ not (unsafe_show_logs|bool) }}"
register: docker_images
failed_when: false

View File

@ -1,5 +1,5 @@
---
- name: Join Member | Add member to etcd-events cluster # noqa 301 305
- name: Join Member | Add member to etcd-events cluster
command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}"
register: member_add_result
until: member_add_result.rc == 0

View File

@ -1,5 +1,5 @@
---
- name: Join Member | Add member to etcd cluster # noqa 301 305
- name: Join Member | Add member to etcd cluster
command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}"
register: member_add_result
until: member_add_result.rc == 0 or 'Peer URLs already exists' in member_add_result.stderr

View File

@ -24,14 +24,14 @@
mode: 0640
register: etcd_ca_cert
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar) # noqa 503
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar) # noqa no-handler
command: update-ca-certificates
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse"]
- name: Gen_certs | update ca-certificates (RedHat) # noqa 503
- name: Gen_certs | update ca-certificates (RedHat) # noqa no-handler
command: update-ca-trust extract
when: etcd_ca_cert.changed and ansible_os_family == "RedHat"
- name: Gen_certs | update ca-certificates (ClearLinux) # noqa 503
- name: Gen_certs | update ca-certificates (ClearLinux) # noqa no-handler
command: clrtrust add "{{ ca_cert_path }}"
when: etcd_ca_cert.changed and ansible_os_family == "ClearLinux"

View File

@ -1,6 +1,6 @@
---
- name: Add Helm repositories
kubernetes.core.helm_repository: "{{ helm_repository_defaults | combine(item) }}"
kubernetes.core.helm_repository: "{{ helm_repository_defaults | combine(item) }}" # noqa args[module]
loop: "{{ repositories }}"
- name: Update Helm repositories
@ -15,5 +15,5 @@
- helm_update
- name: Install Helm Applications
kubernetes.core.helm: "{{ helm_defaults | combine(release_common_opts, item) }}"
kubernetes.core.helm: "{{ helm_defaults | combine(release_common_opts, item) }}" # noqa args[module]
loop: "{{ releases }}"

View File

@ -16,7 +16,7 @@
dest: "{{ local_release_dir }}/krew.yml"
mode: 0644
- name: Krew | Install krew # noqa 301 305
- name: Krew | Install krew # noqa command-instead-of-shell
shell: "{{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }} install --archive={{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz --manifest={{ local_release_dir }}/krew.yml"
environment:
KREW_ROOT: "{{ krew_root_dir }}"

View File

@ -12,7 +12,7 @@
run_once: true
- name: kube-router | Wait for kube-router pods to be ready
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 ignore-errors
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa ignore-errors
register: pods_not_ready
until: pods_not_ready.stdout.find("kube-router")==-1
retries: 30

View File

@ -100,5 +100,5 @@
name: k8s-certs-renew.timer
enabled: yes
state: started
daemon-reload: "{{ k8s_certs_units is changed }}"
daemon_reload: "{{ k8s_certs_units is changed }}"
when: auto_renew_certificates

View File

@ -8,7 +8,7 @@
register: kube_apiserver_manifest_replaced
when: etcd_secret_changed|default(false)
- name: "Pre-upgrade | Delete master containers forcefully" # noqa 503
- name: "Pre-upgrade | Delete master containers forcefully" # noqa no-handler
shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
args:
executable: /bin/bash

View File

@ -35,9 +35,9 @@
- node_labels is defined
- node_labels is mapping
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
var: role_node_labels
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
var: inventory_node_labels
- name: Set label to node

View File

@ -93,7 +93,7 @@
- not (disable_host_nameservers | default(false))
- name: NetworkManager | Check if host has NetworkManager
# noqa 303 Should we use service_facts for this?
# noqa command-instead-of-module - Should we use service_facts for this?
command: systemctl is-active --quiet NetworkManager.service
register: networkmanager_enabled
failed_when: false
@ -101,7 +101,7 @@
check_mode: false
- name: check systemd-resolved
# noqa 303 Should we use service_facts for this?
# noqa command-instead-of-module - Should we use service_facts for this?
command: systemctl is-active systemd-resolved
register: systemd_resolved_enabled
failed_when: false

View File

@ -33,12 +33,12 @@
changed_when: False
register: fs_type
- name: run growpart # noqa 503
- name: run growpart # noqa no-handler
command: growpart {{ device }} {{ partition }}
when: growpart_needed.changed
environment:
LC_ALL: C
- name: run xfs_growfs # noqa 503
- name: run xfs_growfs # noqa no-handler
command: xfs_growfs {{ root_device }}
when: growpart_needed.changed and 'XFS' in fs_type.stdout

View File

@ -5,7 +5,7 @@
- name: Calico-rr | Configuring node tasks
include_tasks: update-node.yml
- name: Calico-rr | Set label for route reflector # noqa 301
- name: Calico-rr | Set label for route reflector
command: >-
{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }}
'i-am-a-route-reflector=true' --overwrite

View File

@ -6,7 +6,7 @@
set_fact:
retry_count: "{{ 0 if retry_count is undefined else retry_count|int + 1 }}"
- name: Calico | Set label for route reflector # noqa 301 305
- name: Calico | Set label for route reflector # noqa command-instead-of-shell
shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-rr-id={{ calico_rr_id }} --overwrite"
changed_when: false
register: calico_rr_id_label
@ -29,7 +29,7 @@
{{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp':
{ 'routeReflectorClusterID': cluster_id }}}, recursive=True) }}
- name: Calico-rr | Configure route reflector # noqa 301 305
- name: Calico-rr | Configure route reflector # noqa command-instead-of-shell
shell: "{{ bin_dir }}/calicoctl.sh replace -f-"
args:
stdin: "{{ calico_rr_node_patched | to_json }}"

View File

@ -72,7 +72,7 @@
when: calico_datastore == "etcd"
- name: Calico | Check if calico network pool has already been configured
# noqa 306 - grep will exit 1 if no match found
# noqa risky-shell-pipe - grep will exit 1 if no match found
shell: >
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l
args:
@ -95,7 +95,7 @@
- calico_pool_cidr is defined
- name: Calico | Check if calico IPv6 network pool has already been configured
# noqa 306 - grep will exit 1 if no match found
# noqa risky-shell-pipe - grep will exit 1 if no match found
shell: >
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}" | wc -l
args:

View File

@ -1,6 +1,6 @@
---
- name: Calico | Set label for groups nodes # noqa 301 305
shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-group-id={{ calico_group_id }} --overwrite"
- name: Calico | Set label for groups nodes
command: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-group-id={{ calico_group_id }} --overwrite"
changed_when: false
register: calico_group_id_label
until: calico_group_id_label is succeeded

View File

@ -11,7 +11,7 @@
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
- name: Cilium | Wait for pods to run
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa literal-compare
register: pods_not_ready
until: pods_not_ready.stdout.find("cilium")==-1
retries: "{{ cilium_rolling_restart_wait_retries_count | int }}"

View File

@ -43,7 +43,6 @@
- has_quorum
- name: Delete old certificates
# noqa 302 ignore-error - rm is ok here for now
shell: "rm {{ etcd_cert_dir }}/*{{ item }}*"
with_items: "{{ groups['broken_etcd'] }}"
register: delete_old_cerificates

View File

@ -26,7 +26,7 @@
path: "{{ etcd_data_dir }}"
state: absent
- name: Restore etcd snapshot # noqa 301 305
- name: Restore etcd snapshot # noqa command-instead-of-shell
shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}"
environment:
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"

View File

@ -9,7 +9,7 @@
changed_when: false
run_once: true
- name: remove-node | Drain node except daemonsets resource # noqa 301
- name: remove-node | Drain node except daemonsets resource
command: >-
{{ kubectl }} drain
--force

View File

@ -38,7 +38,7 @@
tags:
- docker
- name: reset | systemctl daemon-reload # noqa 503
- name: reset | systemctl daemon-reload # noqa no-handler
systemd:
daemon_reload: true
when: services_removed.changed
@ -174,7 +174,7 @@
tags:
- services
- name: reset | gather mounted kubelet dirs # noqa 301
- name: reset | gather mounted kubelet dirs
shell: set -o pipefail && mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac
args:
executable: /bin/bash
@ -185,7 +185,7 @@
tags:
- mounts
- name: reset | unmount kubelet dirs # noqa 301
- name: reset | unmount kubelet dirs
command: umount -f {{ item }}
with_items: "{{ mounted_dirs.stdout_lines }}"
register: umount_dir

View File

@ -29,11 +29,11 @@
register: patch_kube_proxy_state
when: current_kube_proxy_state.stdout | trim | lower != "linux"
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ patch_kube_proxy_state.stdout_lines }}"
when: patch_kube_proxy_state is not skipped
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ patch_kube_proxy_state.stderr_lines }}"
when: patch_kube_proxy_state is not skipped
tags: init

View File

@ -32,7 +32,7 @@
when:
- item.value.converted|bool
- name: Resize images # noqa 301
- name: Resize images
command: qemu-img resize {{ images_dir }}/{{ item.key }}.qcow2 +8G
loop: "{{ images|dict2items }}"
@ -43,16 +43,16 @@
dest: "{{ images_dir }}/Dockerfile"
mode: 0644
- name: Create docker images for each OS # noqa 301
- name: Create docker images for each OS
command: docker build -t {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
loop: "{{ images|dict2items }}"
- name: docker login # noqa 301
- name: docker login
command: docker login -u="{{ docker_user }}" -p="{{ docker_password }}" "{{ docker_host }}"
- name: docker push image # noqa 301
- name: docker push image
command: docker push {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }}
loop: "{{ images|dict2items }}"
- name: docker logout # noqa 301
- name: docker logout
command: docker logout -u="{{ docker_user }}" "{{ docker_host }}"

View File

@ -20,6 +20,6 @@
- name: Template the inventory
template:
src: ../templates/inventory-aws.j2 # noqa 404 CI inventory templates are not in role_path
src: ../templates/inventory-aws.j2 # noqa no-relative-paths - CI inventory templates are not in role_path
dest: "{{ inventory_path }}"
mode: 0644

View File

@ -86,7 +86,7 @@
- name: Template the inventory
template:
src: ../templates/inventory-do.j2 # noqa 404 CI templates are not in role_path
src: ../templates/inventory-do.j2 # noqa no-relative-paths - CI templates are not in role_path
dest: "{{ inventory_path }}"
mode: 0644
when: state == 'present'

View File

@ -28,7 +28,7 @@
{%- endif -%}
- name: Create gce instances
google.cloud.gcp_compute_instance:
google.cloud.gcp_compute_instance: # noqa args[module] - Probably doesn't work
instance_names: "{{ instance_names }}"
machine_type: "{{ cloud_machine_type }}"
image: "{{ cloud_image | default(omit) }}"
@ -51,7 +51,7 @@
groupname: "waitfor_hosts"
with_items: '{{ gce.instance_data }}'
- name: Template the inventory # noqa 404 CI inventory templates are not in role_path
- name: Template the inventory # noqa no-relative-paths - CI inventory templates are not in role_path
template:
src: ../templates/inventory-gce.j2
dest: "{{ inventory_path }}"
@ -64,7 +64,7 @@
mode: 0755
when: mode in ['scale', 'separate-scale', 'ha-scale']
- name: Template fake hosts group vars # noqa 404 CI templates are not in role_path
- name: Template fake hosts group vars # noqa no-relative-paths - CI templates are not in role_path
template:
src: ../templates/fake_hosts.yml.j2
dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"

View File

@ -19,7 +19,7 @@
k8s-{{ test_name }}-1,k8s-{{ test_name }}-2
{%- endif -%}
- name: stop gce instances
- name: stop gce instances # noqa args[module] - Probably doesn't work
google.cloud.gcp_compute_instance:
instance_names: "{{ instance_names }}"
image: "{{ cloud_image | default(omit) }}"
@ -33,7 +33,7 @@
poll: 3
register: gce
- name: delete gce instances
- name: delete gce instances # noqa args[module] - Probably doesn't work
google.cloud.gcp_compute_instance:
instance_names: "{{ instance_names }}"
image: "{{ cloud_image | default(omit) }}"

View File

@ -56,7 +56,7 @@
no_log: True
failed_when: false
- name: Apply the lifecycle rules # noqa 301
- name: Apply the lifecycle rules
command: "{{ dir }}/google-cloud-sdk/bin/gsutil lifecycle set {{ dir }}/gcs_life.json gs://{{ test_name }}"
changed_when: false
environment:
@ -77,5 +77,5 @@
failed_when: false
no_log: True
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "A public url https://storage.googleapis.com/{{ test_name }}/{{ file_name }}"

View File

@ -12,7 +12,7 @@
delay: 5
until: apiserver_response is success
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ apiserver_response.json }}"
- name: Check API servers version

View File

@ -12,7 +12,7 @@
bin_dir: "/usr/local/bin"
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- import_role: # noqa unnamed-task
- import_role: # noqa name[missing]
name: cluster-dump
- name: Check kubectl output
@ -21,7 +21,7 @@
register: get_nodes
no_log: true
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ get_nodes.stdout.split('\n') }}"
- name: Check that all nodes are running and ready

View File

@ -12,7 +12,7 @@
bin_dir: "/usr/local/bin"
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- import_role: # noqa unnamed-task
- import_role: # noqa name[missing]
name: cluster-dump
- name: Check kubectl output
@ -21,7 +21,7 @@
register: get_pods
no_log: true
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ get_pods.stdout.split('\n') }}"
- name: Check that all pods are running and ready
@ -44,6 +44,6 @@
register: get_pods
no_log: true
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ get_pods.stdout.split('\n') }}"
failed_when: not run_pods_log is success

View File

@ -23,7 +23,7 @@
register: get_csr
changed_when: false
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ get_csr.stdout.split('\n') }}"
- name: Check there are csrs
@ -63,7 +63,7 @@
when: get_csr.stdout_lines | length > 0
changed_when: certificate_approve.stdout
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ certificate_approve.stdout.split('\n') }}"
when:
@ -114,7 +114,7 @@
- agnhost1
- agnhost2
- import_role: # noqa unnamed-task
- import_role: # noqa name[missing]
name: cluster-dump
- name: Check that all pods are running and ready
@ -137,7 +137,7 @@
register: pods
no_log: true
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ pods.stdout.split('\n') }}"
failed_when: not run_pods_log is success
@ -162,7 +162,7 @@
register: get_pods
no_log: true
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ get_pods.stdout.split('\n') }}"
- name: Set networking facts

View File

@ -26,7 +26,7 @@
bin_dir: "/usr/local/bin"
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- import_role: # noqa unnamed-task
- import_role: # noqa name[missing]
name: cluster-dump
- name: Wait for netchecker server
@ -60,7 +60,7 @@
- netchecker-agent-hostnet
when: not nca_pod is success
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
var: nca_pod.stdout_lines
when: inventory_hostname == groups['kube_control_plane'][0]
@ -96,7 +96,7 @@
when:
- agents.content != '{}'
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
var: ncs_pod
run_once: true
@ -130,7 +130,7 @@
- agents.content is defined
- agents.content[0] == '{'
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
var: agents_check_result
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
@ -147,7 +147,7 @@
- connectivity_check.content is defined
- connectivity_check.content[0] == '{'
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
var: connectivity_check_result
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true