diff --git a/.ansible-lint b/.ansible-lint index ec6a9e0c3..021341d24 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -16,7 +16,6 @@ skip_list: # In Kubespray we use variables that use camelCase to match their k8s counterparts # (Disabled in June 2021) - 'var-naming' - - 'var-spacing' # [fqcn-builtins] # Roles in kubespray don't need fully qualified collection names diff --git a/.ansible-lint-ignore b/.ansible-lint-ignore new file mode 100644 index 000000000..03a371318 --- /dev/null +++ b/.ansible-lint-ignore @@ -0,0 +1,8 @@ +# This file contains ignores rule violations for ansible-lint +inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml jinja[spacing] +roles/kubernetes/control-plane/defaults/main/kube-proxy.yml jinja[spacing] +roles/kubernetes/control-plane/defaults/main/main.yml jinja[spacing] +roles/kubernetes/kubeadm/defaults/main.yml jinja[spacing] +roles/kubernetes/node/defaults/main.yml jinja[spacing] +roles/kubernetes/preinstall/defaults/main.yml jinja[spacing] +roles/kubespray-defaults/defaults/main.yaml jinja[spacing] diff --git a/contrib/azurerm/roles/generate-templates/defaults/main.yml b/contrib/azurerm/roles/generate-templates/defaults/main.yml index 1ba248043..ff6b31326 100644 --- a/contrib/azurerm/roles/generate-templates/defaults/main.yml +++ b/contrib/azurerm/roles/generate-templates/defaults/main.yml @@ -24,14 +24,14 @@ bastionIPAddressName: bastion-pubip disablePasswordAuthentication: true -sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys" +sshKeyPath: "/home/{{ admin_username }}/.ssh/authorized_keys" imageReference: publisher: "OpenLogic" offer: "CentOS" sku: "7.5" version: "latest" -imageReferenceJson: "{{imageReference|to_json}}" +imageReferenceJson: "{{ imageReference | to_json }}" -storageAccountName: "sa{{nameSuffix | replace('-', '')}}" +storageAccountName: "sa{{ nameSuffix | replace('-', '') }}" storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}" diff --git a/contrib/dind/roles/dind-cluster/tasks/main.yaml b/contrib/dind/roles/dind-cluster/tasks/main.yaml index 59023df3c..2d74f7ea7 100644 --- a/contrib/dind/roles/dind-cluster/tasks/main.yaml +++ b/contrib/dind/roles/dind-cluster/tasks/main.yaml @@ -43,7 +43,7 @@ package: name: "{{ item }}" state: present - with_items: "{{ distro_extra_packages + [ 'rsyslog', 'openssh-server' ] }}" + with_items: "{{ distro_extra_packages + ['rsyslog', 'openssh-server'] }}" - name: Start needed services service: @@ -70,4 +70,4 @@ ansible.posix.authorized_key: user: "{{ distro_user }}" state: present - key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}" + key: "{{ lookup('file', lookup('env', 'HOME') + '/.ssh/id_rsa.pub') }}" diff --git a/contrib/dind/roles/dind-host/tasks/main.yaml b/contrib/dind/roles/dind-host/tasks/main.yaml index 205f77894..030ce7266 100644 --- a/contrib/dind/roles/dind-host/tasks/main.yaml +++ b/contrib/dind/roles/dind-host/tasks/main.yaml @@ -53,7 +53,7 @@ {{ distro_raw_setup_done }} && echo SKIPPED && exit 0 until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done {{ distro_raw_setup }} - delegate_to: "{{ item._ansible_item_label|default(item.item) }}" + delegate_to: "{{ item._ansible_item_label | default(item.item) }}" with_items: "{{ containers.results }}" register: result changed_when: result.stdout.find("SKIPPED") < 0 @@ -63,7 +63,7 @@ until test -S /var/run/dbus/system_bus_socket; do sleep 1; done systemctl disable {{ distro_agetty_svc }} systemctl stop {{ distro_agetty_svc }} - delegate_to: "{{ item._ansible_item_label|default(item.item) }}" + delegate_to: "{{ item._ansible_item_label | default(item.item) }}" with_items: "{{ containers.results }}" changed_when: false @@ -75,13 +75,13 @@ mv -b /etc/machine-id.new /etc/machine-id cmp /etc/machine-id /etc/machine-id~ || true systemctl daemon-reload - delegate_to: "{{ item._ansible_item_label|default(item.item) }}" + delegate_to: "{{ item._ansible_item_label | default(item.item) }}" with_items: "{{ containers.results }}" - name: Early hack image install to adapt for DIND raw: | rm -fv /usr/bin/udevadm /usr/sbin/udevadm - delegate_to: "{{ item._ansible_item_label|default(item.item) }}" + delegate_to: "{{ item._ansible_item_label | default(item.item) }}" with_items: "{{ containers.results }}" register: result changed_when: result.stdout.find("removed") >= 0 diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml index 1146188aa..64e7691bb 100644 --- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml +++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml @@ -58,27 +58,27 @@ name: "{{ gluster_brick_name }}" brick: "{{ gluster_brick_dir }}" replicas: "{{ groups['gfs-cluster'] | length }}" - cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}" + cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}" host: "{{ inventory_hostname }}" force: yes run_once: true - when: groups['gfs-cluster']|length > 1 + when: groups['gfs-cluster'] | length > 1 - name: Configure Gluster volume without replicas gluster.gluster.gluster_volume: state: present name: "{{ gluster_brick_name }}" brick: "{{ gluster_brick_dir }}" - cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}" + cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}" host: "{{ inventory_hostname }}" force: yes run_once: true - when: groups['gfs-cluster']|length <= 1 + when: groups['gfs-cluster'] | length <= 1 - name: Mount glusterfs to retrieve disk size ansible.posix.mount: name: "{{ gluster_mount_dir }}" - src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster" + src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster" fstype: glusterfs opts: "defaults,_netdev" state: mounted @@ -92,7 +92,7 @@ - name: Set Gluster disk size to variable set_fact: - gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}" + gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024 * 1024 * 1024)) | int }}" when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0] - name: Create file on GlusterFS @@ -106,6 +106,6 @@ ansible.posix.mount: name: "{{ gluster_mount_dir }}" fstype: glusterfs - src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster" + src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster" state: unmounted when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0] diff --git a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml index 82b0acb82..ed62e282e 100644 --- a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml +++ b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml @@ -18,6 +18,6 @@ kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item.item.type }}" filename: "{{ kube_config_dir }}/{{ item.item.dest }}" - state: "{{ item.changed | ternary('latest','present') }}" + state: "{{ item.changed | ternary('latest', 'present') }}" with_items: "{{ gluster_pv.results }}" when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml index f0111cec0..7b4330038 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml @@ -7,9 +7,9 @@ - name: "Bootstrap heketi." when: - - "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0" - - "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Deployment']\"))|length == 0" - - "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod']\"))|length == 0" + - "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Service']\")) | length == 0" + - "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Deployment']\")) | length == 0" + - "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod']\")) | length == 0" include_tasks: "bootstrap/deploy.yml" # Prepare heketi topology @@ -20,11 +20,11 @@ - name: "Ensure heketi bootstrap pod is up." assert: - that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1" + that: "(initial_heketi_pod.stdout | from_json | json_query('items[*]')) | length == 1" - name: Store the initial heketi pod name set_fact: - initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}" + initial_heketi_pod_name: "{{ initial_heketi_pod.stdout | from_json | json_query(\"items[*].metadata.name | [0]\") }}" - name: "Test heketi topology." changed_when: false @@ -32,7 +32,7 @@ command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json" - name: "Load heketi topology." - when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0" + when: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*]\") | flatten | length == 0" include_tasks: "bootstrap/topology.yml" # Provision heketi database volume @@ -58,7 +58,7 @@ service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']" job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']" when: - - "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0" - - "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0" - - "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0" - - "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0" + - "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0" + - "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0" + - "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0" + - "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml index 8d03ffc2f..866fe30bf 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml @@ -17,11 +17,11 @@ register: "initial_heketi_state" vars: initial_heketi_state: { stdout: "{}" } - pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]" - deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]" + pods_query: "items[?kind=='Pod'].status.conditions | [0][?type=='Ready'].status | [0]" + deployments_query: "items[?kind=='Deployment'].status.conditions | [0][?type=='Available'].status | [0]" command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json" until: - - "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'" - - "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'" + - "initial_heketi_state.stdout | from_json | json_query(pods_query) == 'True'" + - "initial_heketi_state.stdout | from_json | json_query(deployments_query) == 'True'" retries: 60 delay: 5 diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml index 63a475a85..650c12d12 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml @@ -15,10 +15,10 @@ service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']" job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']" when: - - "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0" - - "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0" - - "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0" - - "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0" + - "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0" + - "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0" + - "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0" + - "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0" register: "heketi_storage_result" - name: "Get state of heketi database copy job." command: "{{ bin_dir }}/kubectl get jobs --output=json" @@ -28,6 +28,6 @@ heketi_storage_state: { stdout: "{}" } job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]" until: - - "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 1" + - "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 1" retries: 60 delay: 5 diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml index 0ffd6f469..ad48882b6 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml @@ -5,10 +5,10 @@ changed_when: false - name: "Delete bootstrap Heketi." command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\"" - when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0" + when: "heketi_resources.stdout | from_json | json_query('items[*]') | length > 0" - name: "Ensure there is nothing left over." command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json" register: "heketi_result" - until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" + until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0" retries: 60 delay: 5 diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml index e623576d1..2f3efd4dd 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml @@ -22,6 +22,6 @@ changed_when: false register: "heketi_topology" command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json" - until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length" + until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length" retries: 60 delay: 5 diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml index 14ab97793..6d26dfc9a 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml @@ -6,14 +6,14 @@ - name: "Get heketi volumes." changed_when: false command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json" - with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}" + with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}" loop_control: { loop_var: "volume_id" } register: "volumes_information" - name: "Test heketi database volume." set_fact: { heketi_database_volume_exists: true } with_items: "{{ volumes_information.results }}" loop_control: { loop_var: "volume_information" } - vars: { volume: "{{ volume_information.stdout|from_json }}" } + vars: { volume: "{{ volume_information.stdout | from_json }}" } when: "volume.name == 'heketidbstorage'" - name: "Provision database volume." command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage" @@ -28,14 +28,14 @@ - name: "Get heketi volumes." changed_when: false command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json" - with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}" + with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}" loop_control: { loop_var: "volume_id" } register: "volumes_information" - name: "Test heketi database volume." set_fact: { heketi_database_volume_created: true } with_items: "{{ volumes_information.results }}" loop_control: { loop_var: "volume_information" } - vars: { volume: "{{ volume_information.stdout|from_json }}" } + vars: { volume: "{{ volume_information.stdout | from_json }}" } when: "volume.name == 'heketidbstorage'" - name: "Ensure heketi database volume exists." assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." } diff --git a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml index 3409cf957..973c66851 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml @@ -23,8 +23,8 @@ changed_when: false vars: daemonset_state: { stdout: "{}" } - ready: "{{ daemonset_state.stdout|from_json|json_query(\"status.numberReady\") }}" - desired: "{{ daemonset_state.stdout|from_json|json_query(\"status.desiredNumberScheduled\") }}" + ready: "{{ daemonset_state.stdout | from_json | json_query(\"status.numberReady\") }}" + desired: "{{ daemonset_state.stdout | from_json | json_query(\"status.desiredNumberScheduled\") }}" until: "ready | int >= 3" retries: 60 delay: 5 diff --git a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml index ae598c3df..4cefd47ac 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml @@ -5,7 +5,7 @@ changed_when: false - name: "Assign storage label" - when: "label_present.stdout_lines|length == 0" + when: "label_present.stdout_lines | length == 0" command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs" - name: Get storage nodes again @@ -15,5 +15,5 @@ - name: Ensure the label has been set assert: - that: "label_present|length > 0" + that: "label_present | length > 0" msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs." diff --git a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml index 9a6ce55b2..a8549df45 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml @@ -24,11 +24,11 @@ deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]" command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json" until: - - "heketi_state.stdout|from_json|json_query(pods_query) == 'True'" - - "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'" + - "heketi_state.stdout | from_json | json_query(pods_query) == 'True'" + - "heketi_state.stdout | from_json | json_query(deployments_query) == 'True'" retries: 60 delay: 5 - name: Set the Heketi pod name set_fact: - heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}" + heketi_pod_name: "{{ heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml index 3380a612f..bd4f6666b 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml @@ -12,7 +12,7 @@ - name: "Render storage class configuration." become: true vars: - endpoint_address: "{{ (heketi_service.stdout|from_json).spec.clusterIP }}" + endpoint_address: "{{ (heketi_service.stdout | from_json).spec.clusterIP }}" template: src: "storageclass.yml.j2" dest: "{{ kube_config_dir }}/storageclass.yml" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml index f5f8e6a94..aa662083e 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml @@ -21,6 +21,6 @@ register: "heketi_topology" changed_when: false command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json" - until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length" + until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length" retries: 60 delay: 5 diff --git a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml index 5b3553bf4..5c271e794 100644 --- a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml +++ b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml @@ -13,13 +13,13 @@ - name: Ensure there is nothing left over. command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json" register: "heketi_result" - until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" + until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0" retries: 60 delay: 5 - name: Ensure there is nothing left over. command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json" register: "heketi_result" - until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" + until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0" retries: 60 delay: 5 - name: Tear down glusterfs. @@ -46,6 +46,6 @@ changed_when: false - name: Remove heketi storage secret vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" } - command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}" + command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout | from_json | json_query(storage_query) }}" when: "storage_query is defined" ignore_errors: true # noqa ignore-errors diff --git a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml index 2699eff2f..70e93776a 100644 --- a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml @@ -117,7 +117,7 @@ kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 kube_network_node_prefix_ipv6: 120 # The port the API Server will be listening on. -kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_ip: "{{ kube_service_addresses | ipaddr('net') | ipaddr(1) | ipaddr('address') }}" kube_apiserver_port: 6443 # (https) # Kube-proxy proxyMode configuration. @@ -141,7 +141,7 @@ kube_proxy_nodeport_addresses: >- # If non-empty, will use this string as identification instead of the actual hostname # kube_override_hostname: >- -# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} +# {%- if cloud_provider is defined and cloud_provider in ['aws'] -%} # {%- else -%} # {{ inventory_hostname }} # {%- endif -%} @@ -165,7 +165,7 @@ ndots: 2 # Custom search domains to be added in addition to the default cluster search domains # searchdomains: # - svc.{{ cluster_name }} -# - default.svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} # Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). # remove_default_searchdomains: false # Can be coredns, coredns_dual, manual or none @@ -219,8 +219,8 @@ resolvconf_mode: host_resolvconf # Deploy netchecker app to verify DNS resolve as an HTTP service deploy_netchecker: false # Ip address of the kubernetes skydns service -skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" -skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +skydns_server: "{{ kube_service_addresses | ipaddr('net') | ipaddr(3) | ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses | ipaddr('net') | ipaddr(4) | ipaddr('address') }}" dns_domain: "{{ cluster_name }}" ## Container runtime diff --git a/playbooks/remove_node.yml b/playbooks/remove_node.yml index b9fdb93d6..be346a768 100644 --- a/playbooks/remove_node.yml +++ b/playbooks/remove_node.yml @@ -30,21 +30,21 @@ - name: Gather facts import_playbook: facts.yml - when: reset_nodes|default(True)|bool + when: reset_nodes | default(True) | bool - hosts: "{{ node | default('kube_node') }}" gather_facts: no environment: "{{ proxy_disable_env }}" roles: - - { role: kubespray-defaults, when: reset_nodes|default(True)|bool } + - { role: kubespray-defaults, when: reset_nodes | default(True) | bool } - { role: remove-node/pre-remove, tags: pre-remove } - { role: remove-node/remove-etcd-node } - - { role: reset, tags: reset, when: reset_nodes|default(True)|bool } + - { role: reset, tags: reset, when: reset_nodes | default(True) | bool } # Currently cannot remove first master or etcd - hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}" gather_facts: no environment: "{{ proxy_disable_env }}" roles: - - { role: kubespray-defaults, when: reset_nodes|default(True)|bool } + - { role: kubespray-defaults, when: reset_nodes | default(True) | bool } - { role: remove-node/post-remove, tags: post-remove } diff --git a/roles/adduser/defaults/main.yml b/roles/adduser/defaults/main.yml index faf258d79..df3fc2d02 100644 --- a/roles/adduser/defaults/main.yml +++ b/roles/adduser/defaults/main.yml @@ -20,8 +20,8 @@ addusers: adduser: name: "{{ user.name }}" - group: "{{ user.name|default(None) }}" - comment: "{{ user.comment|default(None) }}" - shell: "{{ user.shell|default(None) }}" - system: "{{ user.system|default(None) }}" - create_home: "{{ user.create_home|default(None) }}" + group: "{{ user.name | default(None) }}" + comment: "{{ user.comment | default(None) }}" + shell: "{{ user.shell | default(None) }}" + system: "{{ user.system | default(None) }}" + create_home: "{{ user.create_home | default(None) }}" diff --git a/roles/adduser/tasks/main.yml b/roles/adduser/tasks/main.yml index 51dd5bb06..ba5edd7d0 100644 --- a/roles/adduser/tasks/main.yml +++ b/roles/adduser/tasks/main.yml @@ -1,16 +1,16 @@ --- - name: User | Create User Group group: - name: "{{ user.group|default(user.name) }}" - system: "{{ user.system|default(omit) }}" + name: "{{ user.group | default(user.name) }}" + system: "{{ user.system | default(omit) }}" - name: User | Create User user: - comment: "{{ user.comment|default(omit) }}" - create_home: "{{ user.create_home|default(omit) }}" - group: "{{ user.group|default(user.name) }}" - home: "{{ user.home|default(omit) }}" - shell: "{{ user.shell|default(omit) }}" + comment: "{{ user.comment | default(omit) }}" + create_home: "{{ user.create_home | default(omit) }}" + group: "{{ user.group | default(user.name) }}" + home: "{{ user.home | default(omit) }}" + shell: "{{ user.shell | default(omit) }}" name: "{{ user.name }}" - system: "{{ user.system|default(omit) }}" + system: "{{ user.system | default(omit) }}" when: user.name != "root" diff --git a/roles/bootstrap-os/tasks/bootstrap-centos.yml b/roles/bootstrap-os/tasks/bootstrap-centos.yml index aaab37202..5d543aea1 100644 --- a/roles/bootstrap-os/tasks/bootstrap-centos.yml +++ b/roles/bootstrap-os/tasks/bootstrap-centos.yml @@ -6,7 +6,7 @@ - name: Add proxy to yum.conf or dnf.conf if http_proxy is defined community.general.ini_file: - path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}" + path: "{{ ((ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf', '/etc/dnf/dnf.conf') }}" section: main option: proxy value: "{{ http_proxy | default(omit) }}" @@ -23,7 +23,7 @@ dest: /etc/yum.repos.d/public-yum-ol7.repo mode: 0644 when: - - use_oracle_public_repo|default(true) + - use_oracle_public_repo | default(true) - '''ID="ol"'' in os_release.stdout_lines' - (ansible_distribution_version | float) < 7.6 environment: "{{ proxy_env }}" @@ -40,7 +40,7 @@ - ol7_addons - ol7_developer_EPEL when: - - use_oracle_public_repo|default(true) + - use_oracle_public_repo | default(true) - '''ID="ol"'' in os_release.stdout_lines' - (ansible_distribution_version | float) < 7.6 @@ -49,7 +49,7 @@ name: "oracle-epel-release-el{{ ansible_distribution_major_version }}" state: present when: - - use_oracle_public_repo|default(true) + - use_oracle_public_repo | default(true) - '''ID="ol"'' in os_release.stdout_lines' - (ansible_distribution_version | float) >= 7.6 @@ -65,7 +65,7 @@ - { option: "enabled", value: "1" } - { option: "baseurl", value: "http://yum.oracle.com/repo/OracleLinux/OL{{ ansible_distribution_major_version }}/addons/$basearch/" } when: - - use_oracle_public_repo|default(true) + - use_oracle_public_repo | default(true) - '''ID="ol"'' in os_release.stdout_lines' - (ansible_distribution_version | float) >= 7.6 @@ -80,9 +80,9 @@ - { option: "name", value: "CentOS-{{ ansible_distribution_major_version }} - Extras" } - { option: "enabled", value: "1" } - { option: "gpgcheck", value: "0" } - - { option: "baseurl", value: "http://mirror.centos.org/{{ 'altarch' if (ansible_distribution_major_version | int) <= 7 and ansible_architecture == 'aarch64' else 'centos' }}/{{ ansible_distribution_major_version }}/extras/$basearch/{% if ansible_distribution_major_version|int > 7 %}os/{% endif %}" } + - { option: "baseurl", value: "http://mirror.centos.org/{{ 'altarch' if (ansible_distribution_major_version | int) <= 7 and ansible_architecture == 'aarch64' else 'centos' }}/{{ ansible_distribution_major_version }}/extras/$basearch/{% if ansible_distribution_major_version | int > 7 %}os/{% endif %}" } when: - - use_oracle_public_repo|default(true) + - use_oracle_public_repo | default(true) - '''ID="ol"'' in os_release.stdout_lines' - (ansible_distribution_version | float) >= 7.6 - (ansible_distribution_version | float) < 9 @@ -113,6 +113,6 @@ # See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements - name: Install libselinux python package package: - name: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" + name: "{{ ((ansible_distribution_major_version | int) < 8) | ternary('libselinux-python', 'python3-libselinux') }}" state: present become: true diff --git a/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml b/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml index d3fd1c942..91dc020c4 100644 --- a/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml +++ b/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml @@ -20,7 +20,7 @@ when: need_bootstrap.rc != 0 - name: Install required packages on fedora coreos - raw: "export http_proxy={{ http_proxy | default('') }};rpm-ostree install --allow-inactive {{ fedora_coreos_packages|join(' ') }}" + raw: "export http_proxy={{ http_proxy | default('') }};rpm-ostree install --allow-inactive {{ fedora_coreos_packages | join(' ') }}" become: true when: need_bootstrap.rc != 0 diff --git a/roles/bootstrap-os/tasks/bootstrap-redhat.yml b/roles/bootstrap-os/tasks/bootstrap-redhat.yml index a87046165..c3621466e 100644 --- a/roles/bootstrap-os/tasks/bootstrap-redhat.yml +++ b/roles/bootstrap-os/tasks/bootstrap-redhat.yml @@ -6,7 +6,7 @@ - name: Add proxy to yum.conf or dnf.conf if http_proxy is defined community.general.ini_file: - path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}" + path: "{{ ((ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf', '/etc/dnf/dnf.conf') }}" section: main option: proxy value: "{{ http_proxy | default(omit) }}" @@ -57,7 +57,7 @@ sync: true notify: RHEL auto-attach subscription become: true - no_log: "{{ not (unsafe_show_logs|bool) }}" + no_log: "{{ not (unsafe_show_logs | bool) }}" when: - rh_subscription_username is defined - rh_subscription_status.changed @@ -108,6 +108,6 @@ # See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements - name: Install libselinux python package package: - name: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" + name: "{{ ((ansible_distribution_major_version | int) < 8) | ternary('libselinux-python', 'python3-libselinux') }}" state: present become: true diff --git a/roles/bootstrap-os/tasks/main.yml b/roles/bootstrap-os/tasks/main.yml index 853ce095f..42321fd37 100644 --- a/roles/bootstrap-os/tasks/main.yml +++ b/roles/bootstrap-os/tasks/main.yml @@ -89,7 +89,7 @@ name: - ceph-common state: present - when: rbd_provisioner_enabled|default(false) + when: rbd_provisioner_enabled | default(false) - name: Ensure bash_completion.d folder exists file: diff --git a/roles/container-engine/containerd-common/tasks/main.yml b/roles/container-engine/containerd-common/tasks/main.yml index cfd78f3a3..fcca4fb64 100644 --- a/roles/container-engine/containerd-common/tasks/main.yml +++ b/roles/container-engine/containerd-common/tasks/main.yml @@ -15,14 +15,14 @@ include_vars: "{{ item }}" with_first_found: - files: - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}-{{ host_architecture }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}-{{ host_architecture }}.yml" - - "{{ ansible_distribution|lower }}.yml" - - "{{ ansible_os_family|lower }}-{{ host_architecture }}.yml" - - "{{ ansible_os_family|lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release | lower }}-{{ host_architecture }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release | lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml" + - "{{ ansible_distribution | lower }}-{{ host_architecture }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ host_architecture }}.yml" + - "{{ ansible_os_family | lower }}.yml" - defaults.yml paths: - ../vars diff --git a/roles/container-engine/containerd/defaults/main.yml b/roles/container-engine/containerd/defaults/main.yml index e763d91b1..4c2df2aba 100644 --- a/roles/container-engine/containerd/defaults/main.yml +++ b/roles/container-engine/containerd/defaults/main.yml @@ -36,7 +36,7 @@ containerd_default_base_runtime_spec_patch: soft: "{{ containerd_base_runtime_spec_rlimit_nofile }}" containerd_base_runtime_specs: - cri-base.json: "{{ containerd_default_base_runtime_spec | combine(containerd_default_base_runtime_spec_patch,recursive=1) }}" + cri-base.json: "{{ containerd_default_base_runtime_spec | combine(containerd_default_base_runtime_spec_patch, recursive=1) }}" containerd_grpc_max_recv_message_size: 16777216 containerd_grpc_max_send_message_size: 16777216 diff --git a/roles/container-engine/containerd/tasks/main.yml b/roles/container-engine/containerd/tasks/main.yml index 5ec9c28ac..e3ee58643 100644 --- a/roles/container-engine/containerd/tasks/main.yml +++ b/roles/container-engine/containerd/tasks/main.yml @@ -130,7 +130,7 @@ capabilities = ["pull", "resolve", "push"] skip_verify = true with_dict: "{{ containerd_insecure_registries }}" - when: containerd_use_config_path is defined and containerd_use_config_path|bool and containerd_insecure_registries is defined + when: containerd_use_config_path is defined and containerd_use_config_path | bool and containerd_insecure_registries is defined # you can sometimes end up in a state where everything is installed # but containerd was not started / enabled diff --git a/roles/container-engine/containerd/vars/debian.yml b/roles/container-engine/containerd/vars/debian.yml index 99dc4a50c..8b18d9a9f 100644 --- a/roles/container-engine/containerd/vars/debian.yml +++ b/roles/container-engine/containerd/vars/debian.yml @@ -3,5 +3,5 @@ containerd_repo_info: repos: - > deb {{ containerd_debian_repo_base_url }} - {{ ansible_distribution_release|lower }} + {{ ansible_distribution_release | lower }} {{ containerd_debian_repo_component }} diff --git a/roles/container-engine/containerd/vars/ubuntu.yml b/roles/container-engine/containerd/vars/ubuntu.yml index ccce96d0e..dd775323d 100644 --- a/roles/container-engine/containerd/vars/ubuntu.yml +++ b/roles/container-engine/containerd/vars/ubuntu.yml @@ -3,5 +3,5 @@ containerd_repo_info: repos: - > deb {{ containerd_ubuntu_repo_base_url }} - {{ ansible_distribution_release|lower }} + {{ ansible_distribution_release | lower }} {{ containerd_ubuntu_repo_component }} diff --git a/roles/container-engine/cri-o/defaults/main.yml b/roles/container-engine/cri-o/defaults/main.yml index d2c087b8d..949ed69ed 100644 --- a/roles/container-engine/cri-o/defaults/main.yml +++ b/roles/container-engine/cri-o/defaults/main.yml @@ -27,7 +27,7 @@ crio_registry_auth: [] # password: pass crio_seccomp_profile: "" -crio_selinux: "{{ (preinstall_selinux_state == 'enforcing')|lower }}" +crio_selinux: "{{ (preinstall_selinux_state == 'enforcing') | lower }}" crio_signature_policy: "{% if ansible_os_family == 'ClearLinux' %}/usr/share/defaults/crio/policy.json{% endif %}" # Override system default for storage driver diff --git a/roles/container-engine/cri-o/tasks/cleanup.yaml b/roles/container-engine/cri-o/tasks/cleanup.yaml index fd2f119af..2c3872229 100644 --- a/roles/container-engine/cri-o/tasks/cleanup.yaml +++ b/roles/container-engine/cri-o/tasks/cleanup.yaml @@ -2,7 +2,7 @@ # TODO(cristicalin): drop this file after 2.21 - name: CRI-O kubic repo name for debian os family set_fact: - crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x','')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}" + crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x', '')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}" when: ansible_os_family == "Debian" - name: Remove legacy CRI-O kubic apt repo key diff --git a/roles/container-engine/cri-o/tasks/main.yaml b/roles/container-engine/cri-o/tasks/main.yaml index 23cab0e8a..4a667ac9a 100644 --- a/roles/container-engine/cri-o/tasks/main.yaml +++ b/roles/container-engine/cri-o/tasks/main.yaml @@ -32,7 +32,7 @@ - name: cri-o | build a list of crio runtimes with Katacontainers runtimes set_fact: - crio_runtimes: "{{ crio_runtimes + kata_runtimes }}" + crio_runtimes: "{{ crio_runtimes + kata_runtimes }}" when: - kata_containers_enabled diff --git a/roles/container-engine/cri-o/tasks/reset.yml b/roles/container-engine/cri-o/tasks/reset.yml index 0005a38a6..65ee0026a 100644 --- a/roles/container-engine/cri-o/tasks/reset.yml +++ b/roles/container-engine/cri-o/tasks/reset.yml @@ -1,7 +1,7 @@ --- - name: CRI-O | Kubic repo name for debian os family set_fact: - crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x','')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}" + crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x', '')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}" when: ansible_os_family == "Debian" tags: - reset_crio diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml index 314430f27..9413ba914 100644 --- a/roles/container-engine/docker/tasks/main.yml +++ b/roles/container-engine/docker/tasks/main.yml @@ -22,16 +22,16 @@ include_vars: "{{ item }}" with_first_found: - files: - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}-{{ host_architecture }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}-{{ host_architecture }}.yml" - - "{{ ansible_distribution|lower }}.yml" - - "{{ ansible_distribution.split(' ')[0]|lower }}.yml" - - "{{ ansible_os_family|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_os_family|lower }}-{{ host_architecture }}.yml" - - "{{ ansible_os_family|lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release | lower }}-{{ host_architecture }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release | lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml" + - "{{ ansible_distribution | lower }}-{{ host_architecture }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_distribution.split(' ')[0] | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml" + - "{{ ansible_os_family | lower }}-{{ host_architecture }}.yml" + - "{{ ansible_os_family | lower }}.yml" - defaults.yml paths: - ../vars @@ -121,7 +121,7 @@ when: - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] - not is_ostree - - docker_package_info.pkgs|length > 0 + - docker_package_info.pkgs | length > 0 # This is required to ensure any apt upgrade will not break kubernetes - name: Tell Debian hosts not to change the docker version with apt upgrade diff --git a/roles/container-engine/docker/tasks/reset.yml b/roles/container-engine/docker/tasks/reset.yml index fb4f02c9b..51b79e5a7 100644 --- a/roles/container-engine/docker/tasks/reset.yml +++ b/roles/container-engine/docker/tasks/reset.yml @@ -19,7 +19,7 @@ changed_when: true delay: 5 ignore_errors: true # noqa ignore-errors - when: docker_packages_list|length>0 + when: docker_packages_list | length>0 - name: reset | remove all containers shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv" @@ -29,7 +29,7 @@ retries: 4 until: remove_all_containers.rc == 0 delay: 5 - when: docker_packages_list|length>0 + when: docker_packages_list | length>0 - name: Docker | Stop docker service service: @@ -40,7 +40,7 @@ - docker - docker.socket - containerd - when: docker_packages_list|length>0 + when: docker_packages_list | length>0 - name: Docker | Remove dpkg hold dpkg_selections: @@ -63,7 +63,7 @@ when: - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] - not is_ostree - - docker_packages_list|length > 0 + - docker_packages_list | length > 0 - name: Docker | ensure docker-ce repository is removed apt_repository: diff --git a/roles/container-engine/docker/tasks/set_facts_dns.yml b/roles/container-engine/docker/tasks/set_facts_dns.yml index 3b7b67cf5..9d563a259 100644 --- a/roles/container-engine/docker/tasks/set_facts_dns.yml +++ b/roles/container-engine/docker/tasks/set_facts_dns.yml @@ -10,12 +10,12 @@ - name: add upstream dns servers set_fact: - docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers|default([]) }}" + docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers | default([]) }}" when: dns_mode in ['coredns', 'coredns_dual'] - name: add global searchdomains set_fact: - docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains|default([]) }}" + docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains | default([]) }}" - name: check system nameservers shell: set -o pipefail && grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/' @@ -42,25 +42,25 @@ - name: add system search domains to docker options set_fact: - docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split()|default([])) | unique }}" + docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split() | default([])) | unique }}" when: system_search_domains.stdout - name: check number of nameservers fail: msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=false in docker.yml and we will only use the first 3." - when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool + when: docker_dns_servers | length > 3 and docker_dns_servers_strict | bool - name: rtrim number of nameservers to 3 set_fact: docker_dns_servers: "{{ docker_dns_servers[0:3] }}" - when: docker_dns_servers|length > 3 and not docker_dns_servers_strict|bool + when: docker_dns_servers | length > 3 and not docker_dns_servers_strict | bool - name: check number of search domains fail: msg: "Too many search domains" - when: docker_dns_search_domains|length > 6 + when: docker_dns_search_domains | length > 6 - name: check length of search domains fail: msg: "Search domains exceeded limit of 256 characters" - when: docker_dns_search_domains|join(' ')|length > 256 + when: docker_dns_search_domains | join(' ') | length > 256 diff --git a/roles/container-engine/docker/vars/debian-bookworm.yml b/roles/container-engine/docker/vars/debian-bookworm.yml index db20d0b31..74a66ccb3 100644 --- a/roles/container-engine/docker/vars/debian-bookworm.yml +++ b/roles/container-engine/docker/vars/debian-bookworm.yml @@ -17,17 +17,17 @@ containerd_versioned_pkg: # https://download.docker.com/linux/debian/ docker_versioned_pkg: 'latest': docker-ce - '23.0': docker-ce=5:23.0.6-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }} - '24.0': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }} - 'stable': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }} - 'edge': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }} + '23.0': docker-ce=5:23.0.6-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + '24.0': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + 'stable': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + 'edge': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} docker_cli_versioned_pkg: 'latest': docker-ce-cli - '23.0': docker-ce=5:23.0.6-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }} - '24.0': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }} - 'stable': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }} - 'edge': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }} + '23.0': docker-ce=5:23.0.6-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + '24.0': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + 'stable': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + 'edge': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} docker_package_info: pkgs: @@ -44,5 +44,5 @@ docker_repo_info: repos: - > deb {{ docker_debian_repo_base_url }} - {{ ansible_distribution_release|lower }} + {{ ansible_distribution_release | lower }} stable diff --git a/roles/container-engine/docker/vars/debian.yml b/roles/container-engine/docker/vars/debian.yml index d46bfa8b8..f42b001bb 100644 --- a/roles/container-engine/docker/vars/debian.yml +++ b/roles/container-engine/docker/vars/debian.yml @@ -16,19 +16,19 @@ containerd_versioned_pkg: # https://download.docker.com/linux/debian/ docker_versioned_pkg: 'latest': docker-ce - '18.09': docker-ce=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }} - '19.03': docker-ce=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} - '20.10': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} - 'stable': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} - 'edge': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} + '18.09': docker-ce=5:18.09.9~3-0~debian-{{ ansible_distribution_release | lower }} + '19.03': docker-ce=5:19.03.15~3-0~debian-{{ ansible_distribution_release | lower }} + '20.10': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }} + 'stable': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }} + 'edge': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }} docker_cli_versioned_pkg: 'latest': docker-ce-cli - '18.09': docker-ce-cli=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }} - '19.03': docker-ce-cli=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} - '20.10': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} - 'stable': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} - 'edge': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} + '18.09': docker-ce-cli=5:18.09.9~3-0~debian-{{ ansible_distribution_release | lower }} + '19.03': docker-ce-cli=5:19.03.15~3-0~debian-{{ ansible_distribution_release | lower }} + '20.10': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }} + 'stable': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }} + 'edge': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }} docker_package_info: pkgs: @@ -45,5 +45,5 @@ docker_repo_info: repos: - > deb {{ docker_debian_repo_base_url }} - {{ ansible_distribution_release|lower }} + {{ ansible_distribution_release | lower }} stable diff --git a/roles/container-engine/docker/vars/ubuntu.yml b/roles/container-engine/docker/vars/ubuntu.yml index cced07e11..4b9398d26 100644 --- a/roles/container-engine/docker/vars/ubuntu.yml +++ b/roles/container-engine/docker/vars/ubuntu.yml @@ -16,19 +16,19 @@ containerd_versioned_pkg: # https://download.docker.com/linux/ubuntu/ docker_versioned_pkg: 'latest': docker-ce - '18.09': docker-ce=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }} - '19.03': docker-ce=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }} - '20.10': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} - 'stable': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} - 'edge': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '18.09': docker-ce=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release | lower }} + '19.03': docker-ce=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release | lower }} + '20.10': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }} + 'stable': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }} + 'edge': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }} docker_cli_versioned_pkg: 'latest': docker-ce-cli - '18.09': docker-ce-cli=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }} - '19.03': docker-ce-cli=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }} - '20.10': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} - 'stable': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} - 'edge': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '18.09': docker-ce-cli=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release | lower }} + '19.03': docker-ce-cli=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release | lower }} + '20.10': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }} + 'stable': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }} + 'edge': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }} docker_package_info: pkgs: @@ -45,5 +45,5 @@ docker_repo_info: repos: - > deb [arch={{ host_architecture }}] {{ docker_ubuntu_repo_base_url }} - {{ ansible_distribution_release|lower }} + {{ ansible_distribution_release | lower }} stable diff --git a/roles/download/defaults/main/main.yml b/roles/download/defaults/main/main.yml index 828d80457..b3bc368ae 100644 --- a/roles/download/defaults/main/main.yml +++ b/roles/download/defaults/main/main.yml @@ -70,10 +70,10 @@ image_pull_command_on_localhost: "{{ lookup('vars', image_command_tool_on_localh image_info_command_on_localhost: "{{ lookup('vars', image_command_tool_on_localhost + '_image_info_command') }}" # Arch of Docker images and needed packages -image_arch: "{{host_architecture | default('amd64')}}" +image_arch: "{{ host_architecture | default('amd64') }}" # Nerdctl insecure flag set -nerdctl_extra_flags: '{%- if containerd_insecure_registries is defined and containerd_insecure_registries|length>0 -%}--insecure-registry{%- else -%}{%- endif -%}' +nerdctl_extra_flags: '{%- if containerd_insecure_registries is defined and containerd_insecure_registries | length > 0 -%}--insecure-registry{%- else -%}{%- endif -%}' # Versions kubeadm_version: "{{ kube_version }}" @@ -277,10 +277,10 @@ haproxy_image_tag: 2.6.6-alpine # Coredns version should be supported by corefile-migration (or at least work with) # bundle with kubeadm; if not 'basic' upgrade can sometimes fail -coredns_version: "{{ 'v1.10.1' if (kube_version is version('v1.27.0','>=')) else 'v1.9.3' }}" -coredns_image_is_namespaced: "{{ (coredns_version is version('v1.7.1','>=')) }}" +coredns_version: "{{ 'v1.10.1' if (kube_version is version('v1.27.0', '>=')) else 'v1.9.3' }}" +coredns_image_is_namespaced: "{{ (coredns_version is version('v1.7.1', '>=')) }}" -coredns_image_repo: "{{ kube_image_repo }}{{'/coredns/coredns' if (coredns_image_is_namespaced | bool) else '/coredns' }}" +coredns_image_repo: "{{ kube_image_repo }}{{ '/coredns/coredns' if (coredns_image_is_namespaced | bool) else '/coredns' }}" coredns_image_tag: "{{ coredns_version if (coredns_image_is_namespaced | bool) else (coredns_version | regex_replace('^v', '')) }}" nodelocaldns_version: "1.22.20" @@ -389,7 +389,7 @@ downloads: container: true repo: "{{ netcheck_server_image_repo }}" tag: "{{ netcheck_server_image_tag }}" - sha256: "{{ netcheck_server_digest_checksum|default(None) }}" + sha256: "{{ netcheck_server_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -398,7 +398,7 @@ downloads: container: true repo: "{{ netcheck_agent_image_repo }}" tag: "{{ netcheck_agent_image_tag }}" - sha256: "{{ netcheck_agent_digest_checksum|default(None) }}" + sha256: "{{ netcheck_agent_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -412,7 +412,7 @@ downloads: tag: "{{ etcd_image_tag }}" sha256: >- {{ etcd_binary_checksum if (etcd_deployment_type == 'host') - else etcd_digest_checksum|d(None) }} + else etcd_digest_checksum | d(None) }} url: "{{ etcd_download_url }}" unarchive: "{{ etcd_deployment_type == 'host' }}" owner: "root" @@ -635,7 +635,7 @@ downloads: container: true repo: "{{ cilium_image_repo }}" tag: "{{ cilium_image_tag }}" - sha256: "{{ cilium_digest_checksum|default(None) }}" + sha256: "{{ cilium_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -644,7 +644,7 @@ downloads: container: true repo: "{{ cilium_operator_image_repo }}" tag: "{{ cilium_operator_image_tag }}" - sha256: "{{ cilium_operator_digest_checksum|default(None) }}" + sha256: "{{ cilium_operator_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -653,7 +653,7 @@ downloads: container: true repo: "{{ cilium_hubble_relay_image_repo }}" tag: "{{ cilium_hubble_relay_image_tag }}" - sha256: "{{ cilium_hubble_relay_digest_checksum|default(None) }}" + sha256: "{{ cilium_hubble_relay_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -662,7 +662,7 @@ downloads: container: true repo: "{{ cilium_hubble_certgen_image_repo }}" tag: "{{ cilium_hubble_certgen_image_tag }}" - sha256: "{{ cilium_hubble_certgen_digest_checksum|default(None) }}" + sha256: "{{ cilium_hubble_certgen_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -671,7 +671,7 @@ downloads: container: true repo: "{{ cilium_hubble_ui_image_repo }}" tag: "{{ cilium_hubble_ui_image_tag }}" - sha256: "{{ cilium_hubble_ui_digest_checksum|default(None) }}" + sha256: "{{ cilium_hubble_ui_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -680,7 +680,7 @@ downloads: container: true repo: "{{ cilium_hubble_ui_backend_image_repo }}" tag: "{{ cilium_hubble_ui_backend_image_tag }}" - sha256: "{{ cilium_hubble_ui_backend_digest_checksum|default(None) }}" + sha256: "{{ cilium_hubble_ui_backend_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -689,7 +689,7 @@ downloads: container: true repo: "{{ cilium_hubble_envoy_image_repo }}" tag: "{{ cilium_hubble_envoy_image_tag }}" - sha256: "{{ cilium_hubble_envoy_digest_checksum|default(None) }}" + sha256: "{{ cilium_hubble_envoy_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -711,7 +711,7 @@ downloads: container: true repo: "{{ multus_image_repo }}" tag: "{{ multus_image_tag }}" - sha256: "{{ multus_digest_checksum|default(None) }}" + sha256: "{{ multus_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -720,7 +720,7 @@ downloads: container: true repo: "{{ flannel_image_repo }}" tag: "{{ flannel_image_tag }}" - sha256: "{{ flannel_digest_checksum|default(None) }}" + sha256: "{{ flannel_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -729,7 +729,7 @@ downloads: container: true repo: "{{ flannel_init_image_repo }}" tag: "{{ flannel_init_image_tag }}" - sha256: "{{ flannel_init_digest_checksum|default(None) }}" + sha256: "{{ flannel_init_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -753,7 +753,7 @@ downloads: container: true repo: "{{ calico_node_image_repo }}" tag: "{{ calico_node_image_tag }}" - sha256: "{{ calico_node_digest_checksum|default(None) }}" + sha256: "{{ calico_node_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -762,7 +762,7 @@ downloads: container: true repo: "{{ calico_cni_image_repo }}" tag: "{{ calico_cni_image_tag }}" - sha256: "{{ calico_cni_digest_checksum|default(None) }}" + sha256: "{{ calico_cni_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -771,7 +771,7 @@ downloads: container: true repo: "{{ calico_flexvol_image_repo }}" tag: "{{ calico_flexvol_image_tag }}" - sha256: "{{ calico_flexvol_digest_checksum|default(None) }}" + sha256: "{{ calico_flexvol_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -780,7 +780,7 @@ downloads: container: true repo: "{{ calico_policy_image_repo }}" tag: "{{ calico_policy_image_tag }}" - sha256: "{{ calico_policy_digest_checksum|default(None) }}" + sha256: "{{ calico_policy_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -789,7 +789,7 @@ downloads: container: true repo: "{{ calico_typha_image_repo }}" tag: "{{ calico_typha_image_tag }}" - sha256: "{{ calico_typha_digest_checksum|default(None) }}" + sha256: "{{ calico_typha_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -798,7 +798,7 @@ downloads: container: true repo: "{{ calico_apiserver_image_repo }}" tag: "{{ calico_apiserver_image_tag }}" - sha256: "{{ calico_apiserver_digest_checksum|default(None) }}" + sha256: "{{ calico_apiserver_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -811,9 +811,9 @@ downloads: url: "{{ calico_crds_download_url }}" unarchive: true unarchive_extra_opts: - - "{{ '--strip=6' if (calico_version is version('v3.22.3','<')) else '--strip=3' }}" + - "{{ '--strip=6' if (calico_version is version('v3.22.3', '<')) else '--strip=3' }}" - "--wildcards" - - "{{ '*/_includes/charts/calico/crds/kdd/' if (calico_version is version('v3.22.3','<')) else '*/libcalico-go/config/crd/' }}" + - "{{ '*/_includes/charts/calico/crds/kdd/' if (calico_version is version('v3.22.3', '<')) else '*/libcalico-go/config/crd/' }}" owner: "root" mode: "0755" groups: @@ -824,7 +824,7 @@ downloads: container: true repo: "{{ weave_kube_image_repo }}" tag: "{{ weave_kube_image_tag }}" - sha256: "{{ weave_kube_digest_checksum|default(None) }}" + sha256: "{{ weave_kube_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -833,7 +833,7 @@ downloads: container: true repo: "{{ weave_npc_image_repo }}" tag: "{{ weave_npc_image_tag }}" - sha256: "{{ weave_npc_digest_checksum|default(None) }}" + sha256: "{{ weave_npc_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -842,7 +842,7 @@ downloads: container: true repo: "{{ kube_ovn_container_image_repo }}" tag: "{{ kube_ovn_container_image_tag }}" - sha256: "{{ kube_ovn_digest_checksum|default(None) }}" + sha256: "{{ kube_ovn_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -851,7 +851,7 @@ downloads: container: true repo: "{{ kube_router_image_repo }}" tag: "{{ kube_router_image_tag }}" - sha256: "{{ kube_router_digest_checksum|default(None) }}" + sha256: "{{ kube_router_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -860,7 +860,7 @@ downloads: container: true repo: "{{ pod_infra_image_repo }}" tag: "{{ pod_infra_image_tag }}" - sha256: "{{ pod_infra_digest_checksum|default(None) }}" + sha256: "{{ pod_infra_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -869,7 +869,7 @@ downloads: container: true repo: "{{ kube_vip_image_repo }}" tag: "{{ kube_vip_image_tag }}" - sha256: "{{ kube_vip_digest_checksum|default(None) }}" + sha256: "{{ kube_vip_digest_checksum | default(None) }}" groups: - kube_control_plane @@ -878,7 +878,7 @@ downloads: container: true repo: "{{ nginx_image_repo }}" tag: "{{ nginx_image_tag }}" - sha256: "{{ nginx_digest_checksum|default(None) }}" + sha256: "{{ nginx_digest_checksum | default(None) }}" groups: - kube_node @@ -887,7 +887,7 @@ downloads: container: true repo: "{{ haproxy_image_repo }}" tag: "{{ haproxy_image_tag }}" - sha256: "{{ haproxy_digest_checksum|default(None) }}" + sha256: "{{ haproxy_digest_checksum | default(None) }}" groups: - kube_node @@ -896,7 +896,7 @@ downloads: container: true repo: "{{ coredns_image_repo }}" tag: "{{ coredns_image_tag }}" - sha256: "{{ coredns_digest_checksum|default(None) }}" + sha256: "{{ coredns_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -905,7 +905,7 @@ downloads: container: true repo: "{{ nodelocaldns_image_repo }}" tag: "{{ nodelocaldns_image_tag }}" - sha256: "{{ nodelocaldns_digest_checksum|default(None) }}" + sha256: "{{ nodelocaldns_digest_checksum | default(None) }}" groups: - k8s_cluster @@ -914,7 +914,7 @@ downloads: container: true repo: "{{ dnsautoscaler_image_repo }}" tag: "{{ dnsautoscaler_image_tag }}" - sha256: "{{ dnsautoscaler_digest_checksum|default(None) }}" + sha256: "{{ dnsautoscaler_digest_checksum | default(None) }}" groups: - kube_control_plane @@ -949,7 +949,7 @@ downloads: container: true repo: "{{ registry_image_repo }}" tag: "{{ registry_image_tag }}" - sha256: "{{ registry_digest_checksum|default(None) }}" + sha256: "{{ registry_digest_checksum | default(None) }}" groups: - kube_node @@ -958,7 +958,7 @@ downloads: container: true repo: "{{ metrics_server_image_repo }}" tag: "{{ metrics_server_image_tag }}" - sha256: "{{ metrics_server_digest_checksum|default(None) }}" + sha256: "{{ metrics_server_digest_checksum | default(None) }}" groups: - kube_control_plane @@ -967,7 +967,7 @@ downloads: container: true repo: "{{ local_volume_provisioner_image_repo }}" tag: "{{ local_volume_provisioner_image_tag }}" - sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}" + sha256: "{{ local_volume_provisioner_digest_checksum | default(None) }}" groups: - kube_node @@ -976,7 +976,7 @@ downloads: container: true repo: "{{ cephfs_provisioner_image_repo }}" tag: "{{ cephfs_provisioner_image_tag }}" - sha256: "{{ cephfs_provisioner_digest_checksum|default(None) }}" + sha256: "{{ cephfs_provisioner_digest_checksum | default(None) }}" groups: - kube_node @@ -985,7 +985,7 @@ downloads: container: true repo: "{{ rbd_provisioner_image_repo }}" tag: "{{ rbd_provisioner_image_tag }}" - sha256: "{{ rbd_provisioner_digest_checksum|default(None) }}" + sha256: "{{ rbd_provisioner_digest_checksum | default(None) }}" groups: - kube_node @@ -994,7 +994,7 @@ downloads: container: true repo: "{{ local_path_provisioner_image_repo }}" tag: "{{ local_path_provisioner_image_tag }}" - sha256: "{{ local_path_provisioner_digest_checksum|default(None) }}" + sha256: "{{ local_path_provisioner_digest_checksum | default(None) }}" groups: - kube_node @@ -1003,7 +1003,7 @@ downloads: container: true repo: "{{ ingress_nginx_controller_image_repo }}" tag: "{{ ingress_nginx_controller_image_tag }}" - sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}" + sha256: "{{ ingress_nginx_controller_digest_checksum | default(None) }}" groups: - kube_node @@ -1012,7 +1012,7 @@ downloads: container: true repo: "{{ alb_ingress_image_repo }}" tag: "{{ alb_ingress_image_tag }}" - sha256: "{{ ingress_alb_controller_digest_checksum|default(None) }}" + sha256: "{{ ingress_alb_controller_digest_checksum | default(None) }}" groups: - kube_node @@ -1021,7 +1021,7 @@ downloads: container: true repo: "{{ cert_manager_controller_image_repo }}" tag: "{{ cert_manager_controller_image_tag }}" - sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}" + sha256: "{{ cert_manager_controller_digest_checksum | default(None) }}" groups: - kube_node @@ -1030,7 +1030,7 @@ downloads: container: true repo: "{{ cert_manager_cainjector_image_repo }}" tag: "{{ cert_manager_cainjector_image_tag }}" - sha256: "{{ cert_manager_cainjector_digest_checksum|default(None) }}" + sha256: "{{ cert_manager_cainjector_digest_checksum | default(None) }}" groups: - kube_node @@ -1039,7 +1039,7 @@ downloads: container: true repo: "{{ cert_manager_webhook_image_repo }}" tag: "{{ cert_manager_webhook_image_tag }}" - sha256: "{{ cert_manager_webhook_digest_checksum|default(None) }}" + sha256: "{{ cert_manager_webhook_digest_checksum | default(None) }}" groups: - kube_node @@ -1048,7 +1048,7 @@ downloads: container: true repo: "{{ csi_attacher_image_repo }}" tag: "{{ csi_attacher_image_tag }}" - sha256: "{{ csi_attacher_digest_checksum|default(None) }}" + sha256: "{{ csi_attacher_digest_checksum | default(None) }}" groups: - kube_node @@ -1057,7 +1057,7 @@ downloads: container: true repo: "{{ csi_provisioner_image_repo }}" tag: "{{ csi_provisioner_image_tag }}" - sha256: "{{ csi_provisioner_digest_checksum|default(None) }}" + sha256: "{{ csi_provisioner_digest_checksum | default(None) }}" groups: - kube_node @@ -1066,7 +1066,7 @@ downloads: container: true repo: "{{ csi_snapshotter_image_repo }}" tag: "{{ csi_snapshotter_image_tag }}" - sha256: "{{ csi_snapshotter_digest_checksum|default(None) }}" + sha256: "{{ csi_snapshotter_digest_checksum | default(None) }}" groups: - kube_node @@ -1075,7 +1075,7 @@ downloads: container: true repo: "{{ snapshot_controller_image_repo }}" tag: "{{ snapshot_controller_image_tag }}" - sha256: "{{ snapshot_controller_digest_checksum|default(None) }}" + sha256: "{{ snapshot_controller_digest_checksum | default(None) }}" groups: - kube_node @@ -1084,7 +1084,7 @@ downloads: container: true repo: "{{ csi_resizer_image_repo }}" tag: "{{ csi_resizer_image_tag }}" - sha256: "{{ csi_resizer_digest_checksum|default(None) }}" + sha256: "{{ csi_resizer_digest_checksum | default(None) }}" groups: - kube_node @@ -1093,7 +1093,7 @@ downloads: container: true repo: "{{ csi_node_driver_registrar_image_repo }}" tag: "{{ csi_node_driver_registrar_image_tag }}" - sha256: "{{ csi_node_driver_registrar_digest_checksum|default(None) }}" + sha256: "{{ csi_node_driver_registrar_digest_checksum | default(None) }}" groups: - kube_node @@ -1102,7 +1102,7 @@ downloads: container: true repo: "{{ cinder_csi_plugin_image_repo }}" tag: "{{ cinder_csi_plugin_image_tag }}" - sha256: "{{ cinder_csi_plugin_digest_checksum|default(None) }}" + sha256: "{{ cinder_csi_plugin_digest_checksum | default(None) }}" groups: - kube_node @@ -1111,7 +1111,7 @@ downloads: container: true repo: "{{ aws_ebs_csi_plugin_image_repo }}" tag: "{{ aws_ebs_csi_plugin_image_tag }}" - sha256: "{{ aws_ebs_csi_plugin_digest_checksum|default(None) }}" + sha256: "{{ aws_ebs_csi_plugin_digest_checksum | default(None) }}" groups: - kube_node @@ -1120,7 +1120,7 @@ downloads: container: true repo: "{{ dashboard_image_repo }}" tag: "{{ dashboard_image_tag }}" - sha256: "{{ dashboard_digest_checksum|default(None) }}" + sha256: "{{ dashboard_digest_checksum | default(None) }}" groups: - kube_control_plane @@ -1129,7 +1129,7 @@ downloads: container: true repo: "{{ dashboard_metrics_scraper_repo }}" tag: "{{ dashboard_metrics_scraper_tag }}" - sha256: "{{ dashboard_digest_checksum|default(None) }}" + sha256: "{{ dashboard_digest_checksum | default(None) }}" groups: - kube_control_plane @@ -1138,7 +1138,7 @@ downloads: container: true repo: "{{ metallb_speaker_image_repo }}" tag: "{{ metallb_version }}" - sha256: "{{ metallb_speaker_digest_checksum|default(None) }}" + sha256: "{{ metallb_speaker_digest_checksum | default(None) }}" groups: - kube_control_plane @@ -1147,7 +1147,7 @@ downloads: container: true repo: "{{ metallb_controller_image_repo }}" tag: "{{ metallb_version }}" - sha256: "{{ metallb_controller_digest_checksum|default(None) }}" + sha256: "{{ metallb_controller_digest_checksum | default(None) }}" groups: - kube_control_plane @@ -1156,7 +1156,7 @@ downloads: file: true version: "{{ yq_version }}" dest: "{{ local_release_dir }}/yq-{{ yq_version }}-{{ image_arch }}" - sha256: "{{ yq_binary_checksum|default(None) }}" + sha256: "{{ yq_binary_checksum | default(None) }}" url: "{{ yq_download_url }}" unarchive: false owner: "root" diff --git a/roles/download/tasks/check_pull_required.yml b/roles/download/tasks/check_pull_required.yml index 449589b4c..c0681a7ec 100644 --- a/roles/download/tasks/check_pull_required.yml +++ b/roles/download/tasks/check_pull_required.yml @@ -11,7 +11,7 @@ - name: check_pull_required | Set pull_required if the desired image is not yet loaded set_fact: pull_required: >- - {%- if image_reponame | regex_replace('^docker\.io/(library/)?','') in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%} + {%- if image_reponame | regex_replace('^docker\.io/(library/)?', '') in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%} when: not download_always_pull - name: check_pull_required | Check that the local digest sha256 corresponds to the given image tag diff --git a/roles/download/tasks/download_file.yml b/roles/download/tasks/download_file.yml index fba76405e..0db1eec68 100644 --- a/roles/download/tasks/download_file.yml +++ b/roles/download/tasks/download_file.yml @@ -68,7 +68,7 @@ retries: "{{ download_retries }}" delay: "{{ retry_stagger | default(5) }}" environment: "{{ proxy_env }}" - no_log: "{{ not (unsafe_show_logs|bool) }}" + no_log: "{{ not (unsafe_show_logs | bool) }}" loop: "{{ download.mirrors | default([download.url]) }}" loop_control: loop_var: mirror @@ -102,7 +102,7 @@ retries: "{{ download_retries }}" delay: "{{ retry_stagger | default(5) }}" environment: "{{ proxy_env }}" - no_log: "{{ not (unsafe_show_logs|bool) }}" + no_log: "{{ not (unsafe_show_logs | bool) }}" - name: download_file | Copy file back to ansible host file cache ansible.posix.synchronize: diff --git a/roles/download/tasks/extract_file.yml b/roles/download/tasks/extract_file.yml index 81858dd3a..94f240edb 100644 --- a/roles/download/tasks/extract_file.yml +++ b/roles/download/tasks/extract_file.yml @@ -6,6 +6,6 @@ owner: "{{ download.owner | default(omit) }}" mode: "{{ download.mode | default(omit) }}" copy: no - extra_opts: "{{ download.unarchive_extra_opts|default(omit) }}" + extra_opts: "{{ download.unarchive_extra_opts | default(omit) }}" when: - download.unarchive | default(false) diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 536c293a7..92313a58a 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -2,7 +2,7 @@ - name: download | Prepare working directories and variables import_tasks: prep_download.yml when: - - not skip_downloads|default(false) + - not skip_downloads | default(false) tags: - download - upload @@ -10,7 +10,7 @@ - name: download | Get kubeadm binary and list of required images include_tasks: prep_kubeadm_images.yml when: - - not skip_downloads|default(false) + - not skip_downloads | default(false) - inventory_hostname in groups['kube_control_plane'] tags: - download diff --git a/roles/download/tasks/prep_download.yml b/roles/download/tasks/prep_download.yml index 587810d48..0554d1b29 100644 --- a/roles/download/tasks/prep_download.yml +++ b/roles/download/tasks/prep_download.yml @@ -58,7 +58,7 @@ - name: prep_download | Register docker images info shell: "{{ image_info_command }}" # noqa command-instead-of-shell - image_info_command contains pipe therefore requires shell - no_log: "{{ not (unsafe_show_logs|bool) }}" + no_log: "{{ not (unsafe_show_logs | bool) }}" register: docker_images failed_when: false changed_when: false diff --git a/roles/download/tasks/prep_kubeadm_images.yml b/roles/download/tasks/prep_kubeadm_images.yml index 2ab216a76..e1dc3af4c 100644 --- a/roles/download/tasks/prep_kubeadm_images.yml +++ b/roles/download/tasks/prep_kubeadm_images.yml @@ -20,7 +20,7 @@ dest: "{{ kube_config_dir }}/kubeadm-images.yaml" mode: 0644 when: - - not skip_kubeadm_images|default(false) + - not skip_kubeadm_images | default(false) - name: prep_kubeadm_images | Copy kubeadm binary from download dir to system path copy: @@ -36,36 +36,36 @@ state: file - name: prep_kubeadm_images | Generate list of required images - shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns|pause'" + shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns | pause'" args: executable: /bin/bash register: kubeadm_images_raw run_once: true changed_when: false when: - - not skip_kubeadm_images|default(false) + - not skip_kubeadm_images | default(false) - name: prep_kubeadm_images | Parse list of images vars: kubeadm_images_list: "{{ kubeadm_images_raw.stdout_lines }}" set_fact: kubeadm_image: - key: "kubeadm_{{ (item | regex_replace('^(?:.*\\/)*','')).split(':')[0] }}" + key: "kubeadm_{{ (item | regex_replace('^(?:.*\\/)*', '')).split(':')[0] }}" value: enabled: true container: true - repo: "{{ item | regex_replace('^(.*):.*$','\\1') }}" - tag: "{{ item | regex_replace('^.*:(.*)$','\\1') }}" + repo: "{{ item | regex_replace('^(.*):.*$', '\\1') }}" + tag: "{{ item | regex_replace('^.*:(.*)$', '\\1') }}" groups: k8s_cluster loop: "{{ kubeadm_images_list | flatten(levels=1) }}" register: kubeadm_images_cooked run_once: true when: - - not skip_kubeadm_images|default(false) + - not skip_kubeadm_images | default(false) - name: prep_kubeadm_images | Convert list of images to dict for later use set_fact: kubeadm_images: "{{ kubeadm_images_cooked.results | map(attribute='ansible_facts.kubeadm_image') | list | items2dict }}" run_once: true when: - - not skip_kubeadm_images|default(false) + - not skip_kubeadm_images | default(false) diff --git a/roles/download/templates/kubeadm-images.yaml.j2 b/roles/download/templates/kubeadm-images.yaml.j2 index 3a9121def..36154b31a 100644 --- a/roles/download/templates/kubeadm-images.yaml.j2 +++ b/roles/download/templates/kubeadm-images.yaml.j2 @@ -21,5 +21,5 @@ etcd: {% endif %} dns: type: CoreDNS - imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$','') }} + imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$', '') }} imageTag: {{ coredns_image_tag }} diff --git a/roles/etcd/tasks/check_certs.yml b/roles/etcd/tasks/check_certs.yml index b67c6b2c9..2cb802d4e 100644 --- a/roles/etcd/tasks/check_certs.yml +++ b/roles/etcd/tasks/check_certs.yml @@ -42,7 +42,7 @@ - name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node(1/2)" set_fact: gen_certs: true - when: force_etcd_cert_refresh or not item in etcdcert_master.files|map(attribute='path') | list + when: force_etcd_cert_refresh or not item in etcdcert_master.files | map(attribute='path') | list run_once: true with_items: "{{ expected_files }}" vars: @@ -59,7 +59,7 @@ {% for host in k8s_nodes %} '{{ etcd_cert_dir }}/node-{{ host }}.pem', '{{ etcd_cert_dir }}/node-{{ host }}-key.pem' - {% if not loop.last %}{{','}}{% endif %} + {% if not loop.last %}{{ ',' }}{% endif %} {% endfor %}] - name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node(2/2)" @@ -77,28 +77,29 @@ '{{ etcd_cert_dir }}/member-{{ host }}.pem', '{{ etcd_cert_dir }}/member-{{ host }}-key.pem', {% endfor %} - {% set k8s_nodes = groups['k8s_cluster']|unique|sort %} + {% set k8s_nodes = groups['k8s_cluster'] | unique | sort %} {% for host in k8s_nodes %} '{{ etcd_cert_dir }}/node-{{ host }}.pem', '{{ etcd_cert_dir }}/node-{{ host }}-key.pem' - {% if not loop.last %}{{','}}{% endif %} + {% if not loop.last %}{{ ',' }}{% endif %} {% endfor %}] when: - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool - kube_network_plugin != "calico" or calico_datastore == "etcd" - - force_etcd_cert_refresh or not item in etcdcert_master.files|map(attribute='path') | list + - force_etcd_cert_refresh or not item in etcdcert_master.files | map(attribute='path') | list - name: "Check_certs | Set 'gen_master_certs' object to track whether member and admin certs exist on first etcd node" set_fact: + # noqa: jinja[spacing] gen_master_certs: |- { {% set etcd_members = groups['etcd'] -%} - {% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %} + {% set existing_certs = etcdcert_master.files | map(attribute='path') | list | sort %} {% for host in etcd_members -%} - {% set member_cert = "%s/member-%s.pem"|format(etcd_cert_dir, host) %} - {% set member_key = "%s/member-%s-key.pem"|format(etcd_cert_dir, host) %} - {% set admin_cert = "%s/admin-%s.pem"|format(etcd_cert_dir, host) %} - {% set admin_key = "%s/admin-%s-key.pem"|format(etcd_cert_dir, host) %} + {% set member_cert = "%s/member-%s.pem" | format(etcd_cert_dir, host) %} + {% set member_key = "%s/member-%s-key.pem" | format(etcd_cert_dir, host) %} + {% set admin_cert = "%s/admin-%s.pem" | format(etcd_cert_dir, host) %} + {% set admin_key = "%s/admin-%s-key.pem" | format(etcd_cert_dir, host) %} {% if force_etcd_cert_refresh -%} "{{ host }}": True, {% elif member_cert in existing_certs and member_key in existing_certs and admin_cert in existing_certs and admin_key in existing_certs -%} @@ -112,13 +113,14 @@ - name: "Check_certs | Set 'gen_node_certs' object to track whether node certs exist on first etcd node" set_fact: + # noqa: jinja[spacing] gen_node_certs: |- { {% set k8s_nodes = groups['k8s_cluster'] -%} - {% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %} + {% set existing_certs = etcdcert_master.files | map(attribute='path') | list | sort %} {% for host in k8s_nodes -%} - {% set host_cert = "%s/node-%s.pem"|format(etcd_cert_dir, host) %} - {% set host_key = "%s/node-%s-key.pem"|format(etcd_cert_dir, host) %} + {% set host_cert = "%s/node-%s.pem" | format(etcd_cert_dir, host) %} + {% set host_key = "%s/node-%s-key.pem" | format(etcd_cert_dir, host) %} {% if force_etcd_cert_refresh -%} "{{ host }}": True, {% elif host_cert in existing_certs and host_key in existing_certs -%} @@ -135,16 +137,16 @@ etcd_member_requires_sync: true when: - inventory_hostname in groups['etcd'] - - (not etcd_member_certs.results[0].stat.exists|default(false)) or - (not etcd_member_certs.results[1].stat.exists|default(false)) or - (not etcd_member_certs.results[2].stat.exists|default(false)) or - (not etcd_member_certs.results[3].stat.exists|default(false)) or - (not etcd_member_certs.results[4].stat.exists|default(false)) or - (etcd_member_certs.results[0].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[0].stat.path)|map(attribute="checksum")|first|default('')) or - (etcd_member_certs.results[1].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[1].stat.path)|map(attribute="checksum")|first|default('')) or - (etcd_member_certs.results[2].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[2].stat.path)|map(attribute="checksum")|first|default('')) or - (etcd_member_certs.results[3].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[3].stat.path)|map(attribute="checksum")|first|default('')) or - (etcd_member_certs.results[4].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[4].stat.path)|map(attribute="checksum")|first|default('')) + - (not etcd_member_certs.results[0].stat.exists | default(false)) or + (not etcd_member_certs.results[1].stat.exists | default(false)) or + (not etcd_member_certs.results[2].stat.exists | default(false)) or + (not etcd_member_certs.results[3].stat.exists | default(false)) or + (not etcd_member_certs.results[4].stat.exists | default(false)) or + (etcd_member_certs.results[0].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_member_certs.results[0].stat.path) | map(attribute="checksum") | first | default('')) or + (etcd_member_certs.results[1].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_member_certs.results[1].stat.path) | map(attribute="checksum") | first | default('')) or + (etcd_member_certs.results[2].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_member_certs.results[2].stat.path) | map(attribute="checksum") | first | default('')) or + (etcd_member_certs.results[3].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_member_certs.results[3].stat.path) | map(attribute="checksum") | first | default('')) or + (etcd_member_certs.results[4].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_member_certs.results[4].stat.path) | map(attribute="checksum") | first | default('')) - name: "Check_certs | Set 'kubernetes_host_requires_sync' to true if ca or node cert and key don't exist on kubernetes host or checksum doesn't match" set_fact: @@ -152,18 +154,18 @@ when: - inventory_hostname in groups['k8s_cluster'] and inventory_hostname not in groups['etcd'] - - (not etcd_node_certs.results[0].stat.exists|default(false)) or - (not etcd_node_certs.results[1].stat.exists|default(false)) or - (not etcd_node_certs.results[2].stat.exists|default(false)) or - (etcd_node_certs.results[0].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_node_certs.results[0].stat.path)|map(attribute="checksum")|first|default('')) or - (etcd_node_certs.results[1].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_node_certs.results[1].stat.path)|map(attribute="checksum")|first|default('')) or - (etcd_node_certs.results[2].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_node_certs.results[2].stat.path)|map(attribute="checksum")|first|default('')) + - (not etcd_node_certs.results[0].stat.exists | default(false)) or + (not etcd_node_certs.results[1].stat.exists | default(false)) or + (not etcd_node_certs.results[2].stat.exists | default(false)) or + (etcd_node_certs.results[0].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_node_certs.results[0].stat.path) | map(attribute="checksum") | first | default('')) or + (etcd_node_certs.results[1].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_node_certs.results[1].stat.path) | map(attribute="checksum") | first | default('')) or + (etcd_node_certs.results[2].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_node_certs.results[2].stat.path) | map(attribute="checksum") | first | default('')) - name: "Check_certs | Set 'sync_certs' to true" set_fact: sync_certs: true when: - - etcd_member_requires_sync|default(false) or - kubernetes_host_requires_sync|default(false) or + - etcd_member_requires_sync | default(false) or + kubernetes_host_requires_sync | default(false) or (inventory_hostname in gen_master_certs and gen_master_certs[inventory_hostname]) or (inventory_hostname in gen_node_certs and gen_node_certs[inventory_hostname]) diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml index 7beda4d78..cd66de7eb 100644 --- a/roles/etcd/tasks/gen_certs_script.yml +++ b/roles/etcd/tasks/gen_certs_script.yml @@ -25,7 +25,7 @@ run_once: yes delegate_to: "{{ groups['etcd'][0] }}" when: - - gen_certs|default(false) + - gen_certs | default(false) - inventory_hostname == groups['etcd'][0] - name: Gen_certs | copy certs generation script @@ -35,7 +35,7 @@ mode: 0700 run_once: yes when: - - gen_certs|default(false) + - gen_certs | default(false) - inventory_hostname == groups['etcd'][0] - name: Gen_certs | run cert generation script for etcd and kube control plane nodes @@ -55,7 +55,7 @@ {% endfor %} run_once: yes delegate_to: "{{ groups['etcd'][0] }}" - when: gen_certs|default(false) + when: gen_certs | default(false) notify: set etcd_secret_changed - name: Gen_certs | run cert generation script for all clients @@ -72,7 +72,7 @@ when: - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool - kube_network_plugin != "calico" or calico_datastore == "etcd" - - gen_certs|default(false) + - gen_certs | default(false) notify: set etcd_secret_changed - name: Gen_certs | Gather etcd member/admin and kube_control_plane client certs from first etcd node @@ -95,7 +95,7 @@ delegate_to: "{{ groups['etcd'][0] }}" when: - inventory_hostname in groups['etcd'] - - sync_certs|default(false) + - sync_certs | default(false) - inventory_hostname != groups['etcd'][0] notify: set etcd_secret_changed @@ -109,7 +109,7 @@ with_items: "{{ etcd_master_certs.results }}" when: - inventory_hostname in groups['etcd'] - - sync_certs|default(false) + - sync_certs | default(false) - inventory_hostname != groups['etcd'][0] loop_control: label: "{{ item.item }}" @@ -150,14 +150,14 @@ - include_tasks: gen_nodes_certs_script.yml when: - inventory_hostname in groups['kube_control_plane'] and - sync_certs|default(false) and inventory_hostname not in groups['etcd'] + sync_certs | default(false) and inventory_hostname not in groups['etcd'] - include_tasks: gen_nodes_certs_script.yml when: - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool - kube_network_plugin != "calico" or calico_datastore == "etcd" - inventory_hostname in groups['k8s_cluster'] and - sync_certs|default(false) and inventory_hostname not in groups['etcd'] + sync_certs | default(false) and inventory_hostname not in groups['etcd'] - name: Gen_certs | check certificate permissions file: diff --git a/roles/etcd/tasks/gen_nodes_certs_script.yml b/roles/etcd/tasks/gen_nodes_certs_script.yml index 73e64c29f..a7b31db26 100644 --- a/roles/etcd/tasks/gen_nodes_certs_script.yml +++ b/roles/etcd/tasks/gen_nodes_certs_script.yml @@ -14,18 +14,18 @@ - "{{ my_etcd_node_certs }}" - name: Gen_certs | Gather node certs - shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs|join(' ') }} | base64 --wrap=0" + shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs | join(' ') }} | base64 --wrap=0" args: executable: /bin/bash - no_log: "{{ not (unsafe_show_logs|bool) }}" + no_log: "{{ not (unsafe_show_logs | bool) }}" register: etcd_node_certs check_mode: no delegate_to: "{{ groups['etcd'][0] }}" changed_when: false - name: Gen_certs | Copy certs on nodes - shell: "set -o pipefail && base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}" + shell: "set -o pipefail && base64 -d <<< '{{ etcd_node_certs.stdout | quote }}' | tar xz -C {{ etcd_cert_dir }}" args: executable: /bin/bash - no_log: "{{ not (unsafe_show_logs|bool) }}" + no_log: "{{ not (unsafe_show_logs | bool) }}" changed_when: false diff --git a/roles/etcd/tasks/install_docker.yml b/roles/etcd/tasks/install_docker.yml index 025a0ba17..4c0923b6e 100644 --- a/roles/etcd/tasks/install_docker.yml +++ b/roles/etcd/tasks/install_docker.yml @@ -17,14 +17,14 @@ notify: restart etcd when: - etcd_cluster_setup - - etcd_image_tag not in etcd_current_docker_image.stdout|default('') + - etcd_image_tag not in etcd_current_docker_image.stdout | default('') - name: Restart etcd-events if necessary command: /bin/true notify: restart etcd-events when: - etcd_events_cluster_setup - - etcd_image_tag not in etcd_events_current_docker_image.stdout|default('') + - etcd_image_tag not in etcd_events_current_docker_image.stdout | default('') - name: Install etcd launch script template: diff --git a/roles/etcd/tasks/install_host.yml b/roles/etcd/tasks/install_host.yml index 14a75b48b..6abea352b 100644 --- a/roles/etcd/tasks/install_host.yml +++ b/roles/etcd/tasks/install_host.yml @@ -11,14 +11,14 @@ notify: restart etcd when: - etcd_cluster_setup - - etcd_version.lstrip('v') not in etcd_current_host_version.stdout|default('') + - etcd_version.lstrip('v') not in etcd_current_host_version.stdout | default('') - name: Restart etcd-events if necessary command: /bin/true notify: restart etcd-events when: - etcd_events_cluster_setup - - etcd_version.lstrip('v') not in etcd_current_host_version.stdout|default('') + - etcd_version.lstrip('v') not in etcd_current_host_version.stdout | default('') - name: install | Download etcd and etcdctl include_tasks: "../../download/tasks/download_file.yml" diff --git a/roles/etcd/tasks/join_etcd-events_member.yml b/roles/etcd/tasks/join_etcd-events_member.yml index cfd0a33b0..205549bad 100644 --- a/roles/etcd/tasks/join_etcd-events_member.yml +++ b/roles/etcd/tasks/join_etcd-events_member.yml @@ -14,10 +14,11 @@ - include_tasks: refresh_config.yml vars: + # noqa: jinja[spacing] etcd_events_peer_addresses: >- {% for host in groups['etcd'] -%} {%- if hostvars[host]['etcd_events_member_in_cluster'].rc == 0 -%} - {{ "etcd"+loop.index|string }}=https://{{ hostvars[host].etcd_events_access_address | default(hostvars[host].ip | default(fallback_ips[host])) }}:2382, + {{ "etcd" + loop.index | string }}=https://{{ hostvars[host].etcd_events_access_address | default(hostvars[host].ip | default(fallback_ips[host])) }}:2382, {%- endif -%} {%- if loop.last -%} {{ etcd_member_name }}={{ etcd_events_peer_url }} diff --git a/roles/etcd/tasks/join_etcd_member.yml b/roles/etcd/tasks/join_etcd_member.yml index 1cc2abf4f..b3d8e013c 100644 --- a/roles/etcd/tasks/join_etcd_member.yml +++ b/roles/etcd/tasks/join_etcd_member.yml @@ -15,10 +15,11 @@ - include_tasks: refresh_config.yml vars: + # noqa: jinja[spacing] etcd_peer_addresses: >- {% for host in groups['etcd'] -%} {%- if hostvars[host]['etcd_member_in_cluster'].rc == 0 -%} - {{ "etcd"+loop.index|string }}=https://{{ hostvars[host].etcd_access_address | default(hostvars[host].ip | default(fallback_ips[host])) }}:2380, + {{ "etcd" + loop.index | string }}=https://{{ hostvars[host].etcd_access_address | default(hostvars[host].ip | default(fallback_ips[host])) }}:2380, {%- endif -%} {%- if loop.last -%} {{ etcd_member_name }}={{ etcd_peer_url }} diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 432d5e20d..f3d304bb8 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -7,13 +7,13 @@ - include_tasks: "gen_certs_script.yml" when: - - cert_management |d('script') == "script" + - cert_management | d('script') == "script" tags: - etcd-secrets - include_tasks: upd_ca_trust.yml when: - - inventory_hostname in groups['etcd']|union(groups['kube_control_plane'])|unique|sort + - inventory_hostname in groups['etcd'] | union(groups['kube_control_plane']) | unique | sort tags: - etcd-secrets @@ -63,12 +63,12 @@ - name: Restart etcd if certs changed command: /bin/true notify: restart etcd - when: is_etcd_master and etcd_cluster_setup and etcd_secret_changed|default(false) + when: is_etcd_master and etcd_cluster_setup and etcd_secret_changed | default(false) - name: Restart etcd-events if certs changed command: /bin/true notify: restart etcd - when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed|default(false) + when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed | default(false) # After etcd cluster is assembled, make sure that # initial state of the cluster is in `existing` diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index e743037f9..4de1fe916 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -6,10 +6,10 @@ dns_memory_limit: 300Mi dns_cpu_requests: 100m dns_memory_requests: 70Mi -dns_min_replicas: "{{ [ 2, groups['k8s_cluster'] | length ] | min }}" +dns_min_replicas: "{{ [2, groups['k8s_cluster'] | length] | min }}" dns_nodes_per_replica: 16 dns_cores_per_replica: 256 -dns_prevent_single_point_failure: "{{ 'true' if dns_min_replicas|int > 1 else 'false' }}" +dns_prevent_single_point_failure: "{{ 'true' if dns_min_replicas | int > 1 else 'false' }}" enable_coredns_reverse_dns_lookups: true coredns_ordinal_suffix: "" # dns_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}] diff --git a/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml b/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml index b94509f45..b438afb88 100644 --- a/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml +++ b/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml @@ -1,6 +1,7 @@ --- - name: Kubernetes Apps | set up necessary nodelocaldns parameters set_fact: + # noqa: jinja[spacing] primaryClusterIP: >- {%- if dns_mode in ['coredns', 'coredns_dual'] -%} {{ skydns_server }} @@ -26,6 +27,7 @@ - { name: nodelocaldns, file: nodelocaldns-daemonset.yml, type: daemonset } register: nodelocaldns_manifests vars: + # noqa: jinja[spacing] forwardTarget: >- {%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%} {{ primaryClusterIP }} {{ secondaryclusterIP }} @@ -33,8 +35,8 @@ {{ primaryClusterIP }} {%- endif -%} upstreamForwardTarget: >- - {%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%} - {{ upstream_dns_servers|join(' ') }} + {%- if upstream_dns_servers is defined and upstream_dns_servers | length > 0 -%} + {{ upstream_dns_servers | join(' ') }} {%- else -%} /etc/resolv.conf {%- endif -%} @@ -54,15 +56,17 @@ - { name: nodelocaldns, file: nodelocaldns-second-daemonset.yml, type: daemonset } register: nodelocaldns_second_manifests vars: + # noqa: jinja[spacing] forwardTarget: >- {%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%} {{ primaryClusterIP }} {{ secondaryclusterIP }} {%- else -%} {{ primaryClusterIP }} {%- endif -%} + # noqa: jinja[spacing] upstreamForwardTarget: >- - {%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%} - {{ upstream_dns_servers|join(' ') }} + {%- if upstream_dns_servers is defined and upstream_dns_servers | length > 0 -%} + {{ upstream_dns_servers | join(' ') }} {%- else -%} /etc/resolv.conf {%- endif -%} diff --git a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 index 037378042..7a06023e8 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 @@ -8,12 +8,12 @@ metadata: addonmanager.kubernetes.io/mode: EnsureExists data: Corefile: | -{% if coredns_external_zones is defined and coredns_external_zones|length > 0 %} +{% if coredns_external_zones is defined and coredns_external_zones | length > 0 %} {% for block in coredns_external_zones %} {{ block['zones'] | join(' ') }} { log errors -{% if block['rewrite'] is defined and block['rewrite']|length > 0 %} +{% if block['rewrite'] is defined and block['rewrite'] | length > 0 %} {% for rewrite_match in block['rewrite'] %} rewrite {{ rewrite_match }} {% endfor %} @@ -57,7 +57,7 @@ data: {% endif %} } prometheus :9153 - forward . {{ upstream_dns_servers|join(' ') if upstream_dns_servers is defined and upstream_dns_servers|length > 0 else '/etc/resolv.conf' }} { + forward . {{ upstream_dns_servers | join(' ') if upstream_dns_servers is defined and upstream_dns_servers | length > 0 else '/etc/resolv.conf' }} { prefer_udp max_concurrent 1000 {% if dns_upstream_forward_extra_opts is defined %} diff --git a/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 b/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 index 9704155bb..c08540534 100644 --- a/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 @@ -32,7 +32,7 @@ spec: annotations: spec: nodeSelector: - {{ dns_autoscaler_deployment_nodeselector}} + {{ dns_autoscaler_deployment_nodeselector }} priorityClassName: system-cluster-critical securityContext: seccompProfile: diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 index 47dbf70a3..40dd199e0 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 @@ -15,7 +15,7 @@ spec: labels: app: netchecker-agent spec: - priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }} tolerations: - effect: NoSchedule operator: Exists diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 index 8b2e51a42..50e279335 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 @@ -19,7 +19,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet nodeSelector: kubernetes.io/os: linux - priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }} tolerations: - effect: NoSchedule operator: Exists diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 index edda5c5b2..02fd6b680 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 @@ -16,7 +16,7 @@ spec: labels: app: netchecker-server spec: - priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }} volumes: - name: etcd-data emptyDir: {} diff --git a/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 b/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 index 231c8bac1..b15ea89e9 100644 --- a/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 @@ -8,13 +8,13 @@ metadata: data: Corefile: | -{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones|length > 0 %} +{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones | length > 0 %} {% for block in nodelocaldns_external_zones %} {{ block['zones'] | join(' ') }} { errors cache {{ block['cache'] | default(30) }} reload -{% if block['rewrite'] is defined and block['rewrite']|length > 0 %} +{% if block['rewrite'] is defined and block['rewrite'] | length > 0 %} {% for rewrite_match in block['rewrite'] %} rewrite {{ rewrite_match }} {% endfor %} @@ -95,7 +95,7 @@ data: } {% if enable_nodelocaldns_secondary %} Corefile-second: | -{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones|length > 0 %} +{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones | length > 0 %} {% for block in nodelocaldns_external_zones %} {{ block['zones'] | join(' ') }} { errors diff --git a/roles/kubernetes-apps/cloud_controller/oci/templates/controller-manager-config.yml.j2 b/roles/kubernetes-apps/cloud_controller/oci/templates/controller-manager-config.yml.j2 index de7709354..d585de1f0 100644 --- a/roles/kubernetes-apps/cloud_controller/oci/templates/controller-manager-config.yml.j2 +++ b/roles/kubernetes-apps/cloud_controller/oci/templates/controller-manager-config.yml.j2 @@ -63,7 +63,7 @@ loadBalancer: # inbound traffic to load balancers. securityListManagementMode: {{ oci_security_list_management }} -{% if oci_security_lists is defined and oci_security_lists|length > 0 %} +{% if oci_security_lists is defined and oci_security_lists | length > 0 %} # Optional specification of which security lists to modify per subnet. This does not apply if security list management is off. securityLists: {% for subnet_ocid, list_ocid in oci_security_lists.items() %} @@ -71,7 +71,7 @@ loadBalancer: {% endfor %} {% endif %} -{% if oci_rate_limit is defined and oci_rate_limit|length > 0 %} +{% if oci_rate_limit is defined and oci_rate_limit | length > 0 %} # Optional rate limit controls for accessing OCI API rateLimiter: {% if oci_rate_limit.rate_limit_qps_read %} diff --git a/roles/kubernetes-apps/cloud_controller/oci/templates/oci-cloud-provider.yml.j2 b/roles/kubernetes-apps/cloud_controller/oci/templates/oci-cloud-provider.yml.j2 index d50f1393f..6b45d818c 100644 --- a/roles/kubernetes-apps/cloud_controller/oci/templates/oci-cloud-provider.yml.j2 +++ b/roles/kubernetes-apps/cloud_controller/oci/templates/oci-cloud-provider.yml.j2 @@ -30,7 +30,7 @@ spec: spec: {% if oci_cloud_controller_pull_secret is defined %} imagePullSecrets: - - name: {{oci_cloud_controller_pull_secret}} + - name: {{ oci_cloud_controller_pull_secret }} {% endif %} serviceAccountName: cloud-controller-manager hostNetwork: true @@ -56,7 +56,7 @@ spec: path: /etc/kubernetes containers: - name: oci-cloud-controller-manager - image: {{oci_cloud_controller_pull_source}}:{{oci_cloud_controller_version}} + image: {{ oci_cloud_controller_pull_source }}:{{ oci_cloud_controller_version }} command: ["/usr/local/bin/oci-cloud-controller-manager"] args: - --cloud-config=/etc/oci/cloud-provider.yaml diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml index 643c0ce45..668f18afd 100644 --- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml +++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml @@ -70,7 +70,7 @@ src: k8s-cluster-critical-pc.yml dest: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml" mode: 0640 - when: inventory_hostname == groups['kube_control_plane']|last + when: inventory_hostname == groups['kube_control_plane'] | last - name: PriorityClass | Create k8s-cluster-critical kube: @@ -79,4 +79,4 @@ resource: "PriorityClass" filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml" state: latest - when: inventory_hostname == groups['kube_control_plane']|last + when: inventory_hostname == groups['kube_control_plane'] | last diff --git a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml index 62ecaf90f..8cba9bf37 100644 --- a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml +++ b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml @@ -1,25 +1,25 @@ --- -- name: Container Engine Acceleration Nvidia GPU| gather os specific variables +- name: Container Engine Acceleration Nvidia GPU | gather os specific variables include_vars: "{{ item }}" with_first_found: - files: - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}.yml" - - "{{ ansible_os_family|lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}.yml" skip: true - name: Container Engine Acceleration Nvidia GPU | Set fact of download url Tesla set_fact: nvidia_driver_download_url_default: "{{ nvidia_gpu_tesla_base_url }}{{ nvidia_url_end }}" - when: nvidia_gpu_flavor|lower == "tesla" + when: nvidia_gpu_flavor | lower == "tesla" - name: Container Engine Acceleration Nvidia GPU | Set fact of download url GTX set_fact: nvidia_driver_download_url_default: "{{ nvidia_gpu_gtx_base_url }}{{ nvidia_url_end }}" - when: nvidia_gpu_flavor|lower == "gtx" + when: nvidia_gpu_flavor | lower == "gtx" - name: Container Engine Acceleration Nvidia GPU | Create addon dir file: diff --git a/roles/kubernetes-apps/csi_driver/cinder/defaults/main.yml b/roles/kubernetes-apps/csi_driver/cinder/defaults/main.yml index 6a13e8612..501f3689c 100644 --- a/roles/kubernetes-apps/csi_driver/cinder/defaults/main.yml +++ b/roles/kubernetes-apps/csi_driver/cinder/defaults/main.yml @@ -2,18 +2,18 @@ # To access Cinder, the CSI controller will need credentials to access # openstack apis. Per default this values will be # read from the environment. -cinder_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" -cinder_username: "{{ lookup('env','OS_USERNAME') }}" -cinder_password: "{{ lookup('env','OS_PASSWORD') }}" -cinder_application_credential_id: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_ID') }}" -cinder_application_credential_name: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_NAME') }}" -cinder_application_credential_secret: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_SECRET') }}" -cinder_region: "{{ lookup('env','OS_REGION_NAME') }}" -cinder_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID'),true) }}" -cinder_tenant_name: "{{ lookup('env','OS_TENANT_NAME')| default(lookup('env','OS_PROJECT_NAME'),true) }}" -cinder_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" -cinder_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" -cinder_cacert: "{{ lookup('env','OS_CACERT') }}" +cinder_auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}" +cinder_username: "{{ lookup('env', 'OS_USERNAME') }}" +cinder_password: "{{ lookup('env', 'OS_PASSWORD') }}" +cinder_application_credential_id: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_ID') }}" +cinder_application_credential_name: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_NAME') }}" +cinder_application_credential_secret: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_SECRET') }}" +cinder_region: "{{ lookup('env', 'OS_REGION_NAME') }}" +cinder_tenant_id: "{{ lookup('env', 'OS_TENANT_ID') | default(lookup('env', 'OS_PROJECT_ID'), true) }}" +cinder_tenant_name: "{{ lookup('env', 'OS_TENANT_NAME') | default(lookup('env', 'OS_PROJECT_NAME'), true) }}" +cinder_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}" +cinder_domain_id: "{{ lookup('env', 'OS_USER_DOMAIN_ID') }}" +cinder_cacert: "{{ lookup('env', 'OS_CACERT') }}" # For now, only Cinder v3 is supported in Cinder CSI driver cinder_blockstorage_version: "v3" diff --git a/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-credential-check.yml b/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-credential-check.yml index cb65f42b0..d7977326b 100644 --- a/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-credential-check.yml +++ b/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-credential-check.yml @@ -16,7 +16,7 @@ msg: "cinder_application_credential_id is missing" when: - cinder_application_credential_name is defined - - cinder_application_credential_name|length > 0 + - cinder_application_credential_name | length > 0 - cinder_application_credential_id is not defined or not cinder_application_credential_id - name: Cinder CSI Driver | check cinder_application_credential_secret value @@ -24,7 +24,7 @@ msg: "cinder_application_credential_secret is missing" when: - cinder_application_credential_name is defined - - cinder_application_credential_name|length > 0 + - cinder_application_credential_name | length > 0 - cinder_application_credential_secret is not defined or not cinder_application_credential_secret - name: Cinder CSI Driver | check cinder_password value @@ -32,7 +32,7 @@ msg: "cinder_password is missing" when: - cinder_username is defined - - cinder_username|length > 0 + - cinder_username | length > 0 - cinder_application_credential_name is not defined or not cinder_application_credential_name - cinder_application_credential_secret is not defined or not cinder_application_credential_secret - cinder_password is not defined or not cinder_password diff --git a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 index a4db64215..b0b8f78fd 100644 --- a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 +++ b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 @@ -133,7 +133,7 @@ spec: - name: ca-certs mountPath: /etc/ssl/certs readOnly: true -{% if ssl_ca_dirs|length %} +{% if ssl_ca_dirs | length %} {% for dir in ssl_ca_dirs %} - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} mountPath: {{ dir }} @@ -155,7 +155,7 @@ spec: hostPath: path: /etc/ssl/certs type: DirectoryOrCreate -{% if ssl_ca_dirs|length %} +{% if ssl_ca_dirs | length %} {% for dir in ssl_ca_dirs %} - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} hostPath: diff --git a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2 b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2 index 41f922a2f..289b16830 100644 --- a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2 +++ b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2 @@ -89,7 +89,7 @@ spec: - name: ca-certs mountPath: /etc/ssl/certs readOnly: true -{% if ssl_ca_dirs|length %} +{% if ssl_ca_dirs | length %} {% for dir in ssl_ca_dirs %} - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} mountPath: {{ dir }} @@ -125,7 +125,7 @@ spec: hostPath: path: /etc/ssl/certs type: DirectoryOrCreate -{% if ssl_ca_dirs|length %} +{% if ssl_ca_dirs | length %} {% for dir in ssl_ca_dirs %} - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} hostPath: diff --git a/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml b/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml index 0f9eac471..ea828f333 100644 --- a/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml +++ b/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml @@ -5,12 +5,12 @@ upcloud_csi_attacher_image_tag: "v3.4.0" upcloud_csi_resizer_image_tag: "v1.4.0" upcloud_csi_plugin_image_tag: "v0.3.3" upcloud_csi_node_image_tag: "v2.5.0" -upcloud_username: "{{ lookup('env','UPCLOUD_USERNAME') }}" -upcloud_password: "{{ lookup('env','UPCLOUD_PASSWORD') }}" +upcloud_username: "{{ lookup('env', 'UPCLOUD_USERNAME') }}" +upcloud_password: "{{ lookup('env', 'UPCLOUD_PASSWORD') }}" upcloud_tolerations: [] upcloud_csi_enable_volume_snapshot: false upcloud_csi_snapshot_controller_replicas: 2 upcloud_csi_snapshotter_image_tag: "v4.2.1" upcloud_csi_snapshot_controller_image_tag: "v4.2.1" upcloud_csi_snapshot_validation_webhook_image_tag: "v4.2.1" -upcloud_cacert: "{{ lookup('env','OS_CACERT') }}" +upcloud_cacert: "{{ lookup('env', 'OS_CACERT') }}" diff --git a/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml b/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml index 63e37bcf5..8f0b69f8c 100644 --- a/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml +++ b/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml @@ -9,7 +9,7 @@ msg: "UpCloud password is missing. Env UPCLOUD_PASSWORD is mandatory" when: - upcloud_username is defined - - upcloud_username|length > 0 + - upcloud_username | length > 0 - upcloud_password is not defined or not upcloud_password - name: UpCloud CSI Driver | Generate Manifests diff --git a/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml b/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml index ede7cb0b7..e01b36b1d 100644 --- a/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml +++ b/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml @@ -36,8 +36,8 @@ unsafe_show_logs: false # according to the above link , we can controler the block-volume-snapshot parameter vsphere_csi_block_volume_snapshot: false -external_vsphere_user: "{{ lookup('env','VSPHERE_USER') }}" -external_vsphere_password: "{{ lookup('env','VSPHERE_PASSWORD') }}" +external_vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}" +external_vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}" # Controller resources vsphere_csi_snapshotter_resources: {} diff --git a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml index bb0161429..0fe5c49e3 100644 --- a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml +++ b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml @@ -44,11 +44,11 @@ command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n {{ vsphere_csi_namespace }} --dry-run --save-config -o yaml" register: vsphere_csi_secret_manifest when: inventory_hostname == groups['kube_control_plane'][0] - no_log: "{{ not (unsafe_show_logs|bool) }}" + no_log: "{{ not (unsafe_show_logs | bool) }}" - name: vSphere CSI Driver | Apply a CSI secret manifest command: cmd: "{{ kubectl }} apply -f -" stdin: "{{ vsphere_csi_secret_manifest.stdout }}" when: inventory_hostname == groups['kube_control_plane'][0] - no_log: "{{ not (unsafe_show_logs|bool) }}" + no_log: "{{ not (unsafe_show_logs | bool) }}" diff --git a/roles/kubernetes-apps/external_cloud_controller/hcloud/tasks/main.yml b/roles/kubernetes-apps/external_cloud_controller/hcloud/tasks/main.yml index e09f99d1f..c626e78e9 100644 --- a/roles/kubernetes-apps/external_cloud_controller/hcloud/tasks/main.yml +++ b/roles/kubernetes-apps/external_cloud_controller/hcloud/tasks/main.yml @@ -9,7 +9,7 @@ - {name: external-hcloud-cloud-secret, file: external-hcloud-cloud-secret.yml} - {name: external-hcloud-cloud-service-account, file: external-hcloud-cloud-service-account.yml} - {name: external-hcloud-cloud-role-bindings, file: external-hcloud-cloud-role-bindings.yml} - - {name: "{{ 'external-hcloud-cloud-controller-manager-ds-with-networks' if external_hcloud_cloud.with_networks else 'external-hcloud-cloud-controller-manager-ds' }}", file: "{{ 'external-hcloud-cloud-controller-manager-ds-with-networks.yml' if external_hcloud_cloud.with_networks else 'external-hcloud-cloud-controller-manager-ds.yml' }}"} + - {name: "{{ 'external-hcloud-cloud-controller-manager-ds-with-networks' if external_hcloud_cloud.with_networks else 'external-hcloud-cloud-controller-manager-ds' }}", file: "{{ 'external-hcloud-cloud-controller-manager-ds-with-networks.yml' if external_hcloud_cloud.with_networks else 'external-hcloud-cloud-controller-manager-ds.yml' }}"} register: external_hcloud_manifests when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-secret.yml.j2 b/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-secret.yml.j2 index c2ea894a9..a750c2fd9 100644 --- a/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-secret.yml.j2 +++ b/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-secret.yml.j2 @@ -7,5 +7,5 @@ metadata: data: token: "{{ external_hcloud_cloud.hcloud_api_token | b64encode }}" {% if external_hcloud_cloud.with_networks %} - network: "{{ network_id|b64encode }}" + network: "{{ network_id | b64encode }}" {% endif %} diff --git a/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml b/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml index f19ad7deb..4bcf135a3 100644 --- a/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml +++ b/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml @@ -2,18 +2,18 @@ # The external cloud controller will need credentials to access # openstack apis. Per default these values will be # read from the environment. -external_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" -external_openstack_username: "{{ lookup('env','OS_USERNAME') }}" -external_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" -external_openstack_application_credential_id: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_ID') }}" -external_openstack_application_credential_name: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_NAME') }}" -external_openstack_application_credential_secret: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_SECRET') }}" -external_openstack_region: "{{ lookup('env','OS_REGION_NAME') }}" -external_openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID'),true) }}" -external_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME')| default(lookup('env','OS_PROJECT_NAME'),true) }}" -external_openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" -external_openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" -external_openstack_cacert: "{{ lookup('env','OS_CACERT') }}" +external_openstack_auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}" +external_openstack_username: "{{ lookup('env', 'OS_USERNAME') }}" +external_openstack_password: "{{ lookup('env', 'OS_PASSWORD') }}" +external_openstack_application_credential_id: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_ID') }}" +external_openstack_application_credential_name: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_NAME') }}" +external_openstack_application_credential_secret: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_SECRET') }}" +external_openstack_region: "{{ lookup('env', 'OS_REGION_NAME') }}" +external_openstack_tenant_id: "{{ lookup('env', 'OS_TENANT_ID') | default(lookup('env', 'OS_PROJECT_ID'), true) }}" +external_openstack_tenant_name: "{{ lookup('env', 'OS_TENANT_NAME') | default(lookup('env', 'OS_PROJECT_NAME'), true) }}" +external_openstack_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}" +external_openstack_domain_id: "{{ lookup('env', 'OS_USER_DOMAIN_ID') }}" +external_openstack_cacert: "{{ lookup('env', 'OS_CACERT') }}" ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset ## Format: diff --git a/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/openstack-credential-check.yml b/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/openstack-credential-check.yml index 9abc927e2..6a146584f 100644 --- a/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/openstack-credential-check.yml +++ b/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/openstack-credential-check.yml @@ -18,7 +18,7 @@ msg: "external_openstack_application_credential_id is missing" when: - external_openstack_application_credential_name is defined - - external_openstack_application_credential_name|length > 0 + - external_openstack_application_credential_name | length > 0 - external_openstack_application_credential_id is not defined or not external_openstack_application_credential_id @@ -27,7 +27,7 @@ msg: "external_openstack_application_credential_secret is missing" when: - external_openstack_application_credential_name is defined - - external_openstack_application_credential_name|length > 0 + - external_openstack_application_credential_name | length > 0 - external_openstack_application_credential_secret is not defined or not external_openstack_application_credential_secret @@ -36,7 +36,7 @@ msg: "external_openstack_password is missing" when: - external_openstack_username is defined - - external_openstack_username|length > 0 + - external_openstack_username | length > 0 - external_openstack_application_credential_name is not defined or not external_openstack_application_credential_name - external_openstack_application_credential_secret is not defined or not external_openstack_application_credential_secret - external_openstack_password is not defined or not external_openstack_password diff --git a/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-ds.yml.j2 b/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-ds.yml.j2 index 6649a24ec..565875dff 100644 --- a/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-ds.yml.j2 +++ b/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-ds.yml.j2 @@ -57,7 +57,7 @@ spec: - mountPath: /etc/ssl/certs name: ca-certs readOnly: true -{% if ssl_ca_dirs|length %} +{% if ssl_ca_dirs | length %} {% for dir in ssl_ca_dirs %} - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} mountPath: {{ dir }} @@ -98,7 +98,7 @@ spec: hostPath: path: /etc/ssl/certs type: DirectoryOrCreate -{% if ssl_ca_dirs|length %} +{% if ssl_ca_dirs | length %} {% for dir in ssl_ca_dirs %} - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} hostPath: diff --git a/roles/kubernetes-apps/external_cloud_controller/vsphere/defaults/main.yml b/roles/kubernetes-apps/external_cloud_controller/vsphere/defaults/main.yml index 91b126ed9..b6fb797a8 100644 --- a/roles/kubernetes-apps/external_cloud_controller/vsphere/defaults/main.yml +++ b/roles/kubernetes-apps/external_cloud_controller/vsphere/defaults/main.yml @@ -10,5 +10,5 @@ external_vsphere_insecure: "true" external_vsphere_cloud_controller_extra_args: {} external_vsphere_cloud_controller_image_tag: "latest" -external_vsphere_user: "{{ lookup('env','VSPHERE_USER') }}" -external_vsphere_password: "{{ lookup('env','VSPHERE_PASSWORD') }}" +external_vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}" +external_vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}" diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2 index ac3bb33db..8d9eb08bb 100644 --- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2 +++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2 @@ -19,7 +19,7 @@ spec: app: cephfs-provisioner version: {{ cephfs_provisioner_image_tag }} spec: - priorityClassName: {% if cephfs_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + priorityClassName: {% if cephfs_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }} serviceAccount: cephfs-provisioner containers: - name: cephfs-provisioner diff --git a/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2 index 6922691cf..6ce426a00 100644 --- a/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2 +++ b/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2 @@ -24,7 +24,7 @@ spec: - start - --config - /etc/config/config.json -{% if local_path_provisioner_debug|default(false) %} +{% if local_path_provisioner_debug | default(false) %} - --debug {% endif %} volumeMounts: diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml index 16ed6ffab..38afefb2c 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml @@ -12,7 +12,7 @@ local_volume_provisioner_use_node_name_only: false local_volume_provisioner_storage_classes: | { "{{ local_volume_provisioner_storage_class | default('local-storage') }}": { - "host_dir": "{{ local_volume_provisioner_base_dir | default ('/mnt/disks') }}", + "host_dir": "{{ local_volume_provisioner_base_dir | default('/mnt/disks') }}", "mount_dir": "{{ local_volume_provisioner_mount_dir | default('/mnt/disks') }}", "volume_mode": "Filesystem", "fs_type": "ext4" diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 index 76625b6df..7e37283b1 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 @@ -1,8 +1,8 @@ # Macro to convert camelCase dictionary keys to snake_case keys {% macro convert_keys(mydict) -%} - {% for key in mydict.keys()|list -%} + {% for key in mydict.keys() | list -%} {% set key_split = key.split('_') -%} - {% set new_key = key_split[0] + key_split[1:]|map('capitalize')|join -%} + {% set new_key = key_split[0] + key_split[1:] | map('capitalize') | join -%} {% set value = mydict.pop(key) -%} {{ mydict.__setitem__(new_key, value) -}} {{ convert_keys(value) if value is mapping else None -}} diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 index a8747a230..90a473090 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 @@ -18,7 +18,7 @@ spec: k8s-app: local-volume-provisioner version: {{ local_volume_provisioner_image_tag }} spec: - priorityClassName: {% if local_volume_provisioner_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + priorityClassName: {% if local_volume_provisioner_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }} serviceAccountName: local-volume-provisioner nodeSelector: kubernetes.io/os: linux diff --git a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/deploy-rbd-provisioner.yml.j2 b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/deploy-rbd-provisioner.yml.j2 index dccc16564..b8643db64 100644 --- a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/deploy-rbd-provisioner.yml.j2 +++ b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/deploy-rbd-provisioner.yml.j2 @@ -21,7 +21,7 @@ spec: app: rbd-provisioner version: {{ rbd_provisioner_image_tag }} spec: - priorityClassName: {% if rbd_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + priorityClassName: {% if rbd_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }} serviceAccount: rbd-provisioner containers: - name: rbd-provisioner diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index 0ac7edca9..eae0e2171 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -3,11 +3,11 @@ include_vars: "{{ item }}" with_first_found: - files: - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}.yml" - - "{{ ansible_os_family|lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}.yml" - defaults.yml paths: - ../vars diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml index b7751d512..0f58bd5bb 100644 --- a/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml @@ -10,9 +10,9 @@ cert_manager_controller_extra_args: [] ## Allow http_proxy, https_proxy and no_proxy environment variables ## Details https://github.com/kubernetes-sigs/kubespray/blob/master/docs/proxy.md -cert_manager_http_proxy: "{{ http_proxy|default('') }}" -cert_manager_https_proxy: "{{ https_proxy|default('') }}" -cert_manager_no_proxy: "{{ no_proxy|default('') }}" +cert_manager_http_proxy: "{{ http_proxy | default('') }}" +cert_manager_https_proxy: "{{ https_proxy | default('') }}" +cert_manager_no_proxy: "{{ no_proxy | default('') }}" ## Change leader election namespace when deploying on GKE Autopilot that forbid the changes on kube-system namespace. ## See https://github.com/jetstack/cert-manager/issues/3717 diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2 index 4afb75d3a..70e4ea0ea 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2 @@ -35,7 +35,7 @@ spec: tolerations: {{ ingress_nginx_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} {% endif %} - priorityClassName: {% if ingress_nginx_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + priorityClassName: {% if ingress_nginx_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }} containers: - name: ingress-nginx-controller image: {{ ingress_nginx_controller_image_repo }}:{{ ingress_nginx_controller_image_tag }} diff --git a/roles/kubernetes-apps/metallb/tasks/main.yml b/roles/kubernetes-apps/metallb/tasks/main.yml index 91d16e5c3..4b26d3a78 100644 --- a/roles/kubernetes-apps/metallb/tasks/main.yml +++ b/roles/kubernetes-apps/metallb/tasks/main.yml @@ -41,7 +41,7 @@ name: "MetalLB" kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/metallb.yaml" - state: "{{ metallb_rendering.changed | ternary('latest','present') }}" + state: "{{ metallb_rendering.changed | ternary('latest', 'present') }}" wait: true become: true when: @@ -67,7 +67,7 @@ name: "MetalLB" kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/pools.yaml" - state: "{{ pools_rendering.changed | ternary('latest','present') }}" + state: "{{ pools_rendering.changed | ternary('latest', 'present') }}" become: true when: - inventory_hostname == groups['kube_control_plane'][0] @@ -87,7 +87,7 @@ name: "MetalLB" kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/layer2.yaml" - state: "{{ layer2_rendering.changed | ternary('latest','present') }}" + state: "{{ layer2_rendering.changed | ternary('latest', 'present') }}" become: true when: - inventory_hostname == groups['kube_control_plane'][0] @@ -107,7 +107,7 @@ name: "MetalLB" kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/layer3.yaml" - state: "{{ layer3_rendering.changed | ternary('latest','present') }}" + state: "{{ layer3_rendering.changed | ternary('latest', 'present') }}" become: true when: - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/metallb/templates/layer3.yaml.j2 b/roles/kubernetes-apps/metallb/templates/layer3.yaml.j2 index 57d9465be..490bae24f 100644 --- a/roles/kubernetes-apps/metallb/templates/layer3.yaml.j2 +++ b/roles/kubernetes-apps/metallb/templates/layer3.yaml.j2 @@ -57,7 +57,7 @@ spec: aggregationLengthV6: 128 communities: - no-advertise - localpref: "{{ peer.localpref | default ("100") }}" + localpref: "{{ peer.localpref | default("100") }}" ipAddressPools: {% for address_pool in peer.address_pool %} - "{{ address_pool }}" diff --git a/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml b/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml index bfaa9b3a2..8e56d34ee 100644 --- a/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml @@ -9,10 +9,10 @@ state: "latest" delegate_to: "{{ groups['kube_control_plane'][0] }}" run_once: true - with_items: "{{ multus_manifest_1.results + (multus_nodes_list|map('extract', hostvars, 'multus_manifest_2')|list|json_query('[].results')) }}" + with_items: "{{ multus_manifest_1.results + (multus_nodes_list | map('extract', hostvars, 'multus_manifest_2') | list | json_query('[].results')) }}" loop_control: label: "{{ item.item.name }}" vars: - multus_nodes_list: "{{ groups['k8s_cluster'] if ansible_play_batch|length == ansible_play_hosts_all|length else ansible_play_batch }}" + multus_nodes_list: "{{ groups['k8s_cluster'] if ansible_play_batch | length == ansible_play_hosts_all | length else ansible_play_batch }}" when: - not item is skipped diff --git a/roles/kubernetes-apps/registry/tasks/main.yml b/roles/kubernetes-apps/registry/tasks/main.yml index 5090212e1..06f1f6a13 100644 --- a/roles/kubernetes-apps/registry/tasks/main.yml +++ b/roles/kubernetes-apps/registry/tasks/main.yml @@ -8,21 +8,21 @@ fail: msg: "registry_service_cluster_ip support only compatible with ClusterIP." when: - - registry_service_cluster_ip is defined and registry_service_cluster_ip|length > 0 + - registry_service_cluster_ip is defined and registry_service_cluster_ip | length > 0 - registry_service_type != "ClusterIP" - name: Registry | Stop if registry_service_loadbalancer_ip is defined when registry_service_type is not 'LoadBalancer' fail: msg: "registry_service_loadbalancer_ip support only compatible with LoadBalancer." when: - - registry_service_loadbalancer_ip is defined and registry_service_loadbalancer_ip|length > 0 + - registry_service_loadbalancer_ip is defined and registry_service_loadbalancer_ip | length > 0 - registry_service_type != "LoadBalancer" - name: Registry | Stop if registry_service_nodeport is defined when registry_service_type is not 'NodePort' fail: msg: "registry_service_nodeport support only compatible with NodePort." when: - - registry_service_nodeport is defined and registry_service_nodeport|length > 0 + - registry_service_nodeport is defined and registry_service_nodeport | length > 0 - registry_service_type != "NodePort" - name: Registry | Create addon dir diff --git a/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2 index 47519f9d8..3b516845c 100644 --- a/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2 +++ b/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2 @@ -24,7 +24,7 @@ spec: k8s-app: registry version: v{{ registry_image_tag }} spec: - priorityClassName: {% if registry_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + priorityClassName: {% if registry_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }} serviceAccountName: registry securityContext: fsGroup: 1000 diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml index cb9e81e79..4483038f9 100644 --- a/roles/kubernetes/client/tasks/main.yml +++ b/roles/kubernetes/client/tasks/main.yml @@ -1,6 +1,7 @@ --- - name: Set external kube-apiserver endpoint set_fact: + # noqa: jinja[spacing] external_apiserver_address: >- {%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined -%} {{ loadbalancer_apiserver.address }} @@ -9,9 +10,10 @@ {%- else -%} {{ kube_apiserver_access_address }} {%- endif -%} + # noqa: jinja[spacing] external_apiserver_port: >- {%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined and loadbalancer_apiserver.port is defined -%} - {{ loadbalancer_apiserver.port|default(kube_apiserver_port) }} + {{ loadbalancer_apiserver.port | default(kube_apiserver_port) }} {%- else -%} {{ kube_apiserver_port }} {%- endif -%} @@ -69,9 +71,9 @@ user_certs: "{{ admin_kubeconfig['users'][0]['user'] }}" username: "kubernetes-admin-{{ cluster_name }}" context: "kubernetes-admin-{{ cluster_name }}@{{ cluster_name }}" - override_cluster_name: "{{ { 'clusters': [ { 'cluster': (cluster_infos|combine({'server': 'https://'+external_apiserver_address+':'+(external_apiserver_port|string)})), 'name': cluster_name } ] } }}" - override_context: "{{ { 'contexts': [ { 'context': { 'user': username, 'cluster': cluster_name }, 'name': context } ], 'current-context': context } }}" - override_user: "{{ { 'users': [ { 'name': username, 'user': user_certs } ] } }}" + override_cluster_name: "{{ {'clusters': [{'cluster': (cluster_infos | combine({'server': 'https://' + external_apiserver_address + ':' + (external_apiserver_port | string)})), 'name': cluster_name}]} }}" + override_context: "{{ {'contexts': [{'context': {'user': username, 'cluster': cluster_name}, 'name': context}], 'current-context': context} }}" + override_user: "{{ {'users': [{'name': username, 'user': user_certs}]} }}" when: kubeconfig_localhost - name: Write admin kubeconfig on ansible host diff --git a/roles/kubernetes/control-plane/defaults/main/kube-proxy.yml b/roles/kubernetes/control-plane/defaults/main/kube-proxy.yml index 5503212ab..24ebc6cc5 100644 --- a/roles/kubernetes/control-plane/defaults/main/kube-proxy.yml +++ b/roles/kubernetes/control-plane/defaults/main/kube-proxy.yml @@ -111,4 +111,4 @@ kube_proxy_oom_score_adj: -999 # portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed # in order to proxy service traffic. If unspecified, 0, or (0-0) then ports will be randomly chosen. -kube_proxy_port_range: '' \ No newline at end of file +kube_proxy_port_range: '' diff --git a/roles/kubernetes/control-plane/defaults/main/main.yml b/roles/kubernetes/control-plane/defaults/main/main.yml index c25fbc10d..2a9eda14a 100644 --- a/roles/kubernetes/control-plane/defaults/main/main.yml +++ b/roles/kubernetes/control-plane/defaults/main/main.yml @@ -5,7 +5,7 @@ upgrade_cluster_setup: false # By default the external API listens on all interfaces, this can be changed to # listen on a specific address/interface. # NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost -# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} too. +# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too. kube_apiserver_bind_address: 0.0.0.0 # A port range to reserve for services with NodePort visibility. @@ -181,12 +181,12 @@ kube_encryption_resources: [secrets] # If non-empty, will use this string as identification instead of the actual hostname kube_override_hostname: >- - {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} + {%- if cloud_provider is defined and cloud_provider in ['aws'] -%} {%- else -%} {{ inventory_hostname }} {%- endif -%} -secrets_encryption_query: "resources[*].providers[0].{{kube_encryption_algorithm}}.keys[0].secret" +secrets_encryption_query: "resources[*].providers[0].{{ kube_encryption_algorithm }}.keys[0].secret" ## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. # tls_min_version: "" diff --git a/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml b/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml index d01f511bd..64e2de785 100644 --- a/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml +++ b/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml @@ -8,7 +8,7 @@ - name: Set fact joined_control_panes set_fact: - joined_control_planes: "{{ ((kube_control_planes_raw.stdout| from_json)['items'])| default([]) | map (attribute='metadata') | map (attribute='name') | list }}" + joined_control_planes: "{{ ((kube_control_planes_raw.stdout | from_json)['items']) | default([]) | map(attribute='metadata') | map(attribute='name') | list }}" delegate_to: item loop: "{{ groups['kube_control_plane'] }}" when: kube_control_planes_raw is succeeded @@ -16,4 +16,4 @@ - name: Set fact first_kube_control_plane set_fact: - first_kube_control_plane: "{{ joined_control_planes|default([]) | first | default(groups['kube_control_plane']|first) }}" + first_kube_control_plane: "{{ joined_control_planes | default([]) | first | default(groups['kube_control_plane'] | first) }}" diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml index a4869fec8..f1c92aeee 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml @@ -1,6 +1,7 @@ --- - name: Set kubeadm_discovery_address set_fact: + # noqa: jinja[spacing] kubeadm_discovery_address: >- {%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%} {{ first_kube_control_plane_address }}:{{ kube_apiserver_port }} diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml index 097fb0f44..4f1ea288d 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml @@ -52,26 +52,26 @@ path: "{{ audit_policy_file | dirname }}" state: directory mode: 0640 - when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false) + when: kubernetes_audit | default(false) or kubernetes_audit_webhook | default(false) - name: Write api audit policy yaml template: src: apiserver-audit-policy.yaml.j2 dest: "{{ audit_policy_file }}" mode: 0640 - when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false) + when: kubernetes_audit | default(false) or kubernetes_audit_webhook | default(false) - name: Write api audit webhook config yaml template: src: apiserver-audit-webhook-config.yaml.j2 dest: "{{ audit_webhook_config_file }}" mode: 0640 - when: kubernetes_audit_webhook|default(false) + when: kubernetes_audit_webhook | default(false) # Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint. - name: set kubeadm_config_api_fqdn define set_fact: - kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}" + kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name | default('lb-apiserver.kubernetes.local') }}" when: loadbalancer_apiserver is defined - name: Set kubeadm api version to v1beta3 @@ -100,8 +100,8 @@ - name: kubeadm | Push admission control config files template: - src: "{{ item|lower }}.yaml.j2" - dest: "{{ kube_config_dir }}/admission-controls/{{ item|lower }}.yaml" + src: "{{ item | lower }}.yaml.j2" + dest: "{{ kube_config_dir }}/admission-controls/{{ item | lower }}.yaml" mode: 0640 when: - kube_apiserver_admission_control_config_file @@ -123,8 +123,8 @@ register: apiserver_sans_host_check changed_when: apiserver_sans_host_check.stdout is not search('does match certificate') vars: - apiserver_ips: "{{ apiserver_sans|map('ipaddr')|reject('equalto', False)|list }}" - apiserver_hosts: "{{ apiserver_sans|difference(apiserver_ips) }}" + apiserver_ips: "{{ apiserver_sans | map('ipaddr') | reject('equalto', False) | list }}" + apiserver_hosts: "{{ apiserver_sans | difference(apiserver_ips) }}" when: - kubeadm_already_run.stat.exists - not kube_external_ca_mode @@ -186,7 +186,7 @@ - name: set kubeadm certificate key set_fact: - kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)','\\1') | first }}" + kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)', '\\1') | first }}" with_items: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_init'].stdout_lines | default([]) }}" when: - kubeadm_certificate_key is not defined diff --git a/roles/kubernetes/control-plane/tasks/main.yml b/roles/kubernetes/control-plane/tasks/main.yml index 4df478343..2fab9d57c 100644 --- a/roles/kubernetes/control-plane/tasks/main.yml +++ b/roles/kubernetes/control-plane/tasks/main.yml @@ -8,14 +8,14 @@ src: webhook-token-auth-config.yaml.j2 dest: "{{ kube_config_dir }}/webhook-token-auth-config.yaml" mode: 0640 - when: kube_webhook_token_auth|default(false) + when: kube_webhook_token_auth | default(false) - name: Create webhook authorization config template: src: webhook-authorization-config.yaml.j2 dest: "{{ kube_config_dir }}/webhook-authorization-config.yaml" mode: 0640 - when: kube_webhook_authorization|default(false) + when: kube_webhook_authorization | default(false) - name: Create kube-scheduler config template: diff --git a/roles/kubernetes/control-plane/tasks/pre-upgrade.yml b/roles/kubernetes/control-plane/tasks/pre-upgrade.yml index 4c33624e4..2d7dce5bd 100644 --- a/roles/kubernetes/control-plane/tasks/pre-upgrade.yml +++ b/roles/kubernetes/control-plane/tasks/pre-upgrade.yml @@ -6,7 +6,7 @@ with_items: - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] register: kube_apiserver_manifest_replaced - when: etcd_secret_changed|default(false) + when: etcd_secret_changed | default(false) - name: "Pre-upgrade | Delete master containers forcefully" # noqa no-handler shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f" diff --git a/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2 b/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2 index 34f5f188c..fc4d0efbf 100644 --- a/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2 @@ -4,6 +4,6 @@ plugins: {% for plugin in kube_apiserver_enable_admission_plugins %} {% if plugin in kube_apiserver_admission_plugins_needs_configuration %} - name: {{ plugin }} - path: {{ kube_config_dir }}/{{ plugin|lower }}.yaml + path: {{ kube_config_dir }}/{{ plugin | lower }}.yaml {% endif %} {% endfor %} diff --git a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 index e2d41fbe0..d284c7543 100644 --- a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 @@ -13,7 +13,7 @@ localAPIEndpoint: certificateKey: {{ kubeadm_certificate_key }} {% endif %} nodeRegistration: -{% if kube_override_hostname|default('') %} +{% if kube_override_hostname | default('') %} name: {{ kube_override_hostname }} {% endif %} {% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube_node'] %} @@ -89,7 +89,7 @@ etcd: {% endfor %} {% endif %} dns: - imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$','') }} + imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$', '') }} imageTag: {{ coredns_image_tag }} networking: dnsDomain: {{ dns_domain }} @@ -100,7 +100,7 @@ networking: {% if kubeadm_feature_gates %} featureGates: {% for feature in kubeadm_feature_gates %} - {{ feature|replace("=", ": ") }} + {{ feature | replace("=", ": ") }} {% endfor %} {% endif %} kubernetesVersion: {{ kube_version }} @@ -124,13 +124,13 @@ apiServer: {% endif %} authorization-mode: {{ authorization_modes | join(',') }} bind-address: {{ kube_apiserver_bind_address }} -{% if kube_apiserver_enable_admission_plugins|length > 0 %} +{% if kube_apiserver_enable_admission_plugins | length > 0 %} enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }} {% endif %} {% if kube_apiserver_admission_control_config_file %} admission-control-config-file: {{ kube_config_dir }}/admission-controls.yaml {% endif %} -{% if kube_apiserver_disable_admission_plugins|length > 0 %} +{% if kube_apiserver_disable_admission_plugins | length > 0 %} disable-admission-plugins: {{ kube_apiserver_disable_admission_plugins | join(',') }} {% endif %} apiserver-count: "{{ kube_apiserver_count }}" @@ -144,13 +144,13 @@ apiServer: profiling: "{{ kube_profiling }}" request-timeout: "{{ kube_apiserver_request_timeout }}" enable-aggregator-routing: "{{ kube_api_aggregator_routing }}" -{% if kube_token_auth|default(true) %} +{% if kube_token_auth | default(true) %} token-auth-file: {{ kube_token_dir }}/known_tokens.csv {% endif %} {% if kube_apiserver_service_account_lookup %} service-account-lookup: "{{ kube_apiserver_service_account_lookup }}" {% endif %} -{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %} +{% if kube_oidc_auth | default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %} oidc-issuer-url: "{{ kube_oidc_url }}" oidc-client-id: "{{ kube_oidc_client_id }}" {% if kube_oidc_ca_file is defined %} @@ -169,17 +169,17 @@ apiServer: oidc-groups-prefix: "{{ kube_oidc_groups_prefix }}" {% endif %} {% endif %} -{% if kube_webhook_token_auth|default(false) %} +{% if kube_webhook_token_auth | default(false) %} authentication-token-webhook-config-file: {{ kube_config_dir }}/webhook-token-auth-config.yaml {% endif %} -{% if kube_webhook_authorization|default(false) %} +{% if kube_webhook_authorization | default(false) %} authorization-webhook-config-file: {{ kube_config_dir }}/webhook-authorization-config.yaml {% endif %} {% if kube_encrypt_secret_data %} encryption-provider-config: {{ kube_cert_dir }}/secrets_encryption.yaml {% endif %} storage-backend: {{ kube_apiserver_storage_backend }} -{% if kube_api_runtime_config|length > 0 %} +{% if kube_api_runtime_config | length > 0 %} runtime-config: {{ kube_api_runtime_config | join(',') }} {% endif %} allow-privileged: "true" @@ -223,24 +223,24 @@ apiServer: {% if kubelet_rotate_server_certificates %} kubelet-certificate-authority: {{ kube_cert_dir }}/ca.crt {% endif %} -{% if kubernetes_audit or kube_token_auth|default(true) or kube_webhook_token_auth|default(false) or ( cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] ) or apiserver_extra_volumes or ssl_ca_dirs|length %} +{% if kubernetes_audit or kube_token_auth | default(true) or kube_webhook_token_auth | default(false) or ( cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] ) or apiserver_extra_volumes or ssl_ca_dirs | length %} extraVolumes: {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %} - name: cloud-config hostPath: {{ kube_config_dir }}/cloud_config mountPath: {{ kube_config_dir }}/cloud_config {% endif %} -{% if kube_token_auth|default(true) %} +{% if kube_token_auth | default(true) %} - name: token-auth-config hostPath: {{ kube_token_dir }} mountPath: {{ kube_token_dir }} {% endif %} -{% if kube_webhook_token_auth|default(false) %} +{% if kube_webhook_token_auth | default(false) %} - name: webhook-token-auth-config hostPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml mountPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml {% endif %} -{% if kube_webhook_authorization|default(false) %} +{% if kube_webhook_authorization | default(false) %} - name: webhook-authorization-config hostPath: {{ kube_config_dir }}/webhook-authorization-config.yaml mountPath: {{ kube_config_dir }}/webhook-authorization-config.yaml @@ -269,7 +269,7 @@ apiServer: mountPath: {{ volume.mountPath }} readOnly: {{ volume.readOnly | d(not (volume.writable | d(false))) }} {% endfor %} -{% if ssl_ca_dirs|length %} +{% if ssl_ca_dirs | length %} {% for dir in ssl_ca_dirs %} - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} hostPath: {{ dir }} @@ -316,7 +316,7 @@ controllerManager: configure-cloud-routes: "false" {% endif %} {% if kubelet_flexvolumes_plugins_dir is defined %} - flex-volume-plugin-dir: {{kubelet_flexvolumes_plugins_dir}} + flex-volume-plugin-dir: {{ kubelet_flexvolumes_plugins_dir }} {% endif %} {% if tls_min_version is defined %} tls-min-version: {{ tls_min_version }} @@ -352,7 +352,7 @@ scheduler: feature-gates: "{{ kube_scheduler_feature_gates | default(kube_feature_gates, true) | join(',') }}" {% endif %} profiling: "{{ kube_profiling }}" -{% if kube_kubeadm_scheduler_extra_args|length > 0 %} +{% if kube_kubeadm_scheduler_extra_args | length > 0 %} {% for key in kube_kubeadm_scheduler_extra_args %} {{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}" {% endfor %} @@ -422,7 +422,7 @@ portRange: {{ kube_proxy_port_range }} {% set feature_gates = ( kube_proxy_feature_gates | default(kube_feature_gates, true) ) %} featureGates: {% for feature in feature_gates %} - {{ feature|replace("=", ": ") }} + {{ feature | replace("=", ": ") }} {% endfor %} {% endif %} {# DNS settings for kubelet #} @@ -448,6 +448,6 @@ clusterDNS: {% set feature_gates = ( kubelet_feature_gates | default(kube_feature_gates, true) ) %} featureGates: {% for feature in feature_gates %} - {{ feature|replace("=", ": ") }} + {{ feature | replace("=", ": ") }} {% endfor %} {% endif %} diff --git a/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2 index 78e399d5f..fc696ae3e 100644 --- a/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2 @@ -17,7 +17,7 @@ controlPlane: bindPort: {{ kube_apiserver_port }} certificateKey: {{ kubeadm_certificate_key }} nodeRegistration: - name: {{ kube_override_hostname|default(inventory_hostname) }} + name: {{ kube_override_hostname | default(inventory_hostname) }} criSocket: {{ cri_socket }} {% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube_node'] %} taints: diff --git a/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2 b/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2 index be41418d4..4be4b083d 100644 --- a/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2 @@ -1,5 +1,5 @@ {% set kubescheduler_config_api_version = "v1beta3" %} -apiVersion: kubescheduler.config.k8s.io/{{ kubescheduler_config_api_version|d('v1') }} +apiVersion: kubescheduler.config.k8s.io/{{ kubescheduler_config_api_version | d('v1') }} kind: KubeSchedulerConfiguration clientConnection: kubeconfig: "{{ kube_config_dir }}/scheduler.conf" diff --git a/roles/kubernetes/control-plane/templates/podsecurity.yaml.j2 b/roles/kubernetes/control-plane/templates/podsecurity.yaml.j2 index 0a650fa10..c97373306 100644 --- a/roles/kubernetes/control-plane/templates/podsecurity.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/podsecurity.yaml.j2 @@ -9,9 +9,9 @@ defaults: warn: "{{ kube_pod_security_default_warn }}" warn-version: "{{ kube_pod_security_default_warn_version }}" exemptions: - usernames: {{ kube_pod_security_exemptions_usernames|to_json }} - runtimeClasses: {{ kube_pod_security_exemptions_runtime_class_names|to_json }} - namespaces: {{ kube_pod_security_exemptions_namespaces|to_json }} + usernames: {{ kube_pod_security_exemptions_usernames | to_json }} + runtimeClasses: {{ kube_pod_security_exemptions_runtime_class_names | to_json }} + namespaces: {{ kube_pod_security_exemptions_namespaces | to_json }} {% else %} # This file is intentinally left empty as kube_pod_security_use_default={{ kube_pod_security_use_default }} {% endif %} diff --git a/roles/kubernetes/control-plane/templates/secrets_encryption.yaml.j2 b/roles/kubernetes/control-plane/templates/secrets_encryption.yaml.j2 index 9105bb69b..3c521ff12 100644 --- a/roles/kubernetes/control-plane/templates/secrets_encryption.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/secrets_encryption.yaml.j2 @@ -2,7 +2,7 @@ apiVersion: apiserver.config.k8s.io/v1 kind: EncryptionConfiguration resources: - resources: -{{ kube_encryption_resources|to_nice_yaml|indent(4, True) }} +{{ kube_encryption_resources | to_nice_yaml | indent(4, True) }} providers: - {{ kube_encryption_algorithm }}: keys: diff --git a/roles/kubernetes/kubeadm/defaults/main.yml b/roles/kubernetes/kubeadm/defaults/main.yml index 0449b8ae7..61b132e61 100644 --- a/roles/kubernetes/kubeadm/defaults/main.yml +++ b/roles/kubernetes/kubeadm/defaults/main.yml @@ -6,7 +6,7 @@ kubeadm_join_timeout: 120s # If non-empty, will use this string as identification instead of the actual hostname kube_override_hostname: >- - {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} + {%- if cloud_provider is defined and cloud_provider in ['aws'] -%} {%- else -%} {{ inventory_hostname }} {%- endif -%} diff --git a/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml b/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml index 651bcc39d..d39ea2b9f 100644 --- a/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml +++ b/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml @@ -51,7 +51,7 @@ register: "etcd_client_cert_serial_result" changed_when: false when: - - inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort + - inventory_hostname in groups['k8s_cluster'] | union(groups['calico_rr'] | default([])) | unique | sort tags: - network diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml index 6449e01e3..c8b76f019 100644 --- a/roles/kubernetes/kubeadm/tasks/main.yml +++ b/roles/kubernetes/kubeadm/tasks/main.yml @@ -1,6 +1,7 @@ --- - name: Set kubeadm_discovery_address set_fact: + # noqa: jinja[spacing] kubeadm_discovery_address: >- {%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%} {{ first_kube_control_plane_address }}:{{ kube_apiserver_port }} @@ -138,7 +139,7 @@ args: executable: /bin/bash run_once: true - delegate_to: "{{ groups['kube_control_plane']|first }}" + delegate_to: "{{ groups['kube_control_plane'] | first }}" delegate_facts: false when: - kubeadm_config_api_fqdn is not defined @@ -158,7 +159,7 @@ - name: Restart all kube-proxy pods to ensure that they load the new configmap command: "{{ kubectl }} delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0" run_once: true - delegate_to: "{{ groups['kube_control_plane']|first }}" + delegate_to: "{{ groups['kube_control_plane'] | first }}" delegate_facts: false when: - kubeadm_config_api_fqdn is not defined diff --git a/roles/kubernetes/node-label/tasks/main.yml b/roles/kubernetes/node-label/tasks/main.yml index 0904ffca7..cda700ce0 100644 --- a/roles/kubernetes/node-label/tasks/main.yml +++ b/roles/kubernetes/node-label/tasks/main.yml @@ -17,10 +17,10 @@ - name: Node label for nvidia GPU nodes set_fact: - role_node_labels: "{{ role_node_labels + [ 'nvidia.com/gpu=true' ] }}" + role_node_labels: "{{ role_node_labels + ['nvidia.com/gpu=true'] }}" when: - nvidia_gpu_nodes is defined - - nvidia_accelerator_enabled|bool + - nvidia_accelerator_enabled | bool - inventory_hostname in nvidia_gpu_nodes - name: Set inventory node label to empty list @@ -29,8 +29,8 @@ - name: Populate inventory node label set_fact: - inventory_node_labels: "{{ inventory_node_labels + [ '%s=%s'|format(item.key, item.value) ] }}" - loop: "{{ node_labels|d({})|dict2items }}" + inventory_node_labels: "{{ inventory_node_labels + ['%s=%s' | format(item.key, item.value)] }}" + loop: "{{ node_labels | d({}) | dict2items }}" when: - node_labels is defined - node_labels is mapping diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index 1eb288c17..f5dbf38ab 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -141,7 +141,7 @@ kubelet_node_custom_flags: [] # If non-empty, will use this string as identification instead of the actual hostname kube_override_hostname: >- - {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} + {%- if cloud_provider is defined and cloud_provider in ['aws'] -%} {%- else -%} {{ inventory_hostname }} {%- endif -%} @@ -161,14 +161,14 @@ sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" # For the openstack integration kubelet will need credentials to access # openstack apis like nova and cinder. Per default this values will be # read from the environment. -openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" -openstack_username: "{{ lookup('env','OS_USERNAME') }}" -openstack_password: "{{ lookup('env','OS_PASSWORD') }}" -openstack_region: "{{ lookup('env','OS_REGION_NAME') }}" -openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID')|default(lookup('env','OS_PROJECT_NAME'),true),true) }}" -openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}" -openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" -openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" +openstack_auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}" +openstack_username: "{{ lookup('env', 'OS_USERNAME') }}" +openstack_password: "{{ lookup('env', 'OS_PASSWORD') }}" +openstack_region: "{{ lookup('env', 'OS_REGION_NAME') }}" +openstack_tenant_id: "{{ lookup('env', 'OS_TENANT_ID') | default(lookup('env', 'OS_PROJECT_ID') | default(lookup('env', 'OS_PROJECT_NAME'), true), true) }}" +openstack_tenant_name: "{{ lookup('env', 'OS_TENANT_NAME') }}" +openstack_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}" +openstack_domain_id: "{{ lookup('env', 'OS_USER_DOMAIN_ID') }}" # For the vsphere integration, kubelet will need credentials to access # vsphere apis @@ -186,7 +186,7 @@ vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}" vsphere_scsi_controller_type: pvscsi # vsphere_public_network is name of the network the VMs are joined to -vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}" +vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK') | default('') }}" ## When azure is used, you need to also set the following variables. ## see docs/azure.md for details on how to get these values diff --git a/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml b/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml index 62337fc29..8ff55cf99 100644 --- a/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml +++ b/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml @@ -61,15 +61,15 @@ - name: "check azure_exclude_master_from_standard_lb is a bool" assert: - that: azure_exclude_master_from_standard_lb |type_debug == 'bool' + that: azure_exclude_master_from_standard_lb | type_debug == 'bool' - name: "check azure_disable_outbound_snat is a bool" assert: - that: azure_disable_outbound_snat |type_debug == 'bool' + that: azure_disable_outbound_snat | type_debug == 'bool' - name: "check azure_use_instance_metadata is a bool" assert: - that: azure_use_instance_metadata |type_debug == 'bool' + that: azure_use_instance_metadata | type_debug == 'bool' - name: check azure_vmtype value fail: diff --git a/roles/kubernetes/node/tasks/facts.yml b/roles/kubernetes/node/tasks/facts.yml index d68d5bdde..43af5cceb 100644 --- a/roles/kubernetes/node/tasks/facts.yml +++ b/roles/kubernetes/node/tasks/facts.yml @@ -52,9 +52,9 @@ include_vars: "{{ item }}" with_first_found: - files: - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}.yml" - - "{{ ansible_os_family|lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}.yml" skip: true diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index 99babd64f..e79ca5c4d 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -151,7 +151,7 @@ - name: Test if openstack_cacert is a base64 string set_fact: - openstack_cacert_is_base64: "{% if openstack_cacert is search ('^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$') %}true{% else %}false{% endif %}" + openstack_cacert_is_base64: "{% if openstack_cacert is search('^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}= | [A-Za-z0-9+/]{2}==)?$') %}true{% else %}false{% endif %}" when: - cloud_provider is defined - cloud_provider == 'openstack' diff --git a/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 b/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 index d8cc557f4..995919fa0 100644 --- a/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 +++ b/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 @@ -34,13 +34,13 @@ healthzPort: {{ kubelet_healthz_port }} healthzBindAddress: {{ kubelet_healthz_bind_address }} kubeletCgroups: {{ kubelet_kubelet_cgroups }} clusterDomain: {{ dns_domain }} -{% if kubelet_protect_kernel_defaults|bool %} +{% if kubelet_protect_kernel_defaults | bool %} protectKernelDefaults: true {% endif %} -{% if kubelet_rotate_certificates|bool %} +{% if kubelet_rotate_certificates | bool %} rotateCertificates: true {% endif %} -{% if kubelet_rotate_server_certificates|bool %} +{% if kubelet_rotate_server_certificates | bool %} serverTLSBootstrap: true {% endif %} {# DNS settings for kubelet #} @@ -60,10 +60,10 @@ clusterDNS: - {{ dns_address }} {% endfor %} {# Node reserved CPU/memory #} -{% if kube_reserved|bool %} +{% if kube_reserved | bool %} kubeReservedCgroup: {{ kube_reserved_cgroups }} kubeReserved: -{% if is_kube_master|bool %} +{% if is_kube_master | bool %} cpu: {{ kube_master_cpu_reserved }} memory: {{ kube_master_memory_reserved }} {% if kube_master_ephemeral_storage_reserved is defined %} @@ -83,10 +83,10 @@ kubeReserved: {% endif %} {% endif %} {% endif %} -{% if system_reserved|bool %} +{% if system_reserved | bool %} systemReservedCgroup: {{ system_reserved_cgroups }} systemReserved: -{% if is_kube_master|bool %} +{% if is_kube_master | bool %} cpu: {{ system_master_cpu_reserved }} memory: {{ system_master_memory_reserved }} {% if system_master_ephemeral_storage_reserved is defined %} @@ -106,10 +106,10 @@ systemReserved: {% endif %} {% endif %} {% endif %} -{% if is_kube_master|bool and eviction_hard_control_plane is defined and eviction_hard_control_plane %} +{% if is_kube_master | bool and eviction_hard_control_plane is defined and eviction_hard_control_plane %} evictionHard: {{ eviction_hard_control_plane | to_nice_yaml(indent=2) | indent(2) }} -{% elif not is_kube_master|bool and eviction_hard is defined and eviction_hard %} +{% elif not is_kube_master | bool and eviction_hard is defined and eviction_hard %} evictionHard: {{ eviction_hard | to_nice_yaml(indent=2) | indent(2) }} {% endif %} @@ -123,7 +123,7 @@ resolvConf: "{{ kube_resolv_conf }}" {% if kubelet_feature_gates or kube_feature_gates %} featureGates: {% for feature in (kubelet_feature_gates | default(kube_feature_gates, true)) %} - {{ feature|replace("=", ": ") }} + {{ feature | replace("=", ": ") }} {% endfor %} {% endif %} {% if tls_min_version is defined %} diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml index 147033f38..f7670318f 100644 --- a/roles/kubernetes/preinstall/defaults/main.yml +++ b/roles/kubernetes/preinstall/defaults/main.yml @@ -73,7 +73,7 @@ ping_access_ip: true ntp_enabled: false # The package to install which provides NTP functionality. # The default is ntp for most platforms, or chrony on RHEL/CentOS 7 and later. -# The ntp_package can be one of ['ntp','chrony'] +# The ntp_package can be one of ['ntp', 'chrony'] ntp_package: >- {% if ansible_os_family == "RedHat" -%} chrony diff --git a/roles/kubernetes/preinstall/handlers/main.yml b/roles/kubernetes/preinstall/handlers/main.yml index 7cb0c3185..8ae931f26 100644 --- a/roles/kubernetes/preinstall/handlers/main.yml +++ b/roles/kubernetes/preinstall/handlers/main.yml @@ -45,7 +45,7 @@ - Preinstall | restart kube-controller-manager crio/containerd - Preinstall | restart kube-apiserver docker - Preinstall | restart kube-apiserver crio/containerd - when: not dns_early|bool + when: not dns_early | bool # FIXME(mattymo): Also restart for kubeadm mode - name: Preinstall | kube-apiserver configured diff --git a/roles/kubernetes/preinstall/tasks/0020-set_facts.yml b/roles/kubernetes/preinstall/tasks/0020-set_facts.yml index d8638ff2b..8d4c9ac31 100644 --- a/roles/kubernetes/preinstall/tasks/0020-set_facts.yml +++ b/roles/kubernetes/preinstall/tasks/0020-set_facts.yml @@ -84,12 +84,12 @@ - name: Stop if /etc/resolv.conf not configured nameservers assert: - that: configured_nameservers|length>0 + that: configured_nameservers | length>0 fail_msg: "nameserver should not empty in /etc/resolv.conf" when: - not ignore_assert_errors - configured_nameservers is defined - - not (upstream_dns_servers is defined and upstream_dns_servers|length > 0) + - not (upstream_dns_servers is defined and upstream_dns_servers | length > 0) - not (disable_host_nameservers | default(false)) - name: NetworkManager | Check if host has NetworkManager @@ -111,14 +111,14 @@ - name: set default dns if remove_default_searchdomains is false set_fact: default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"] - when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0) + when: not remove_default_searchdomains | default() | bool or (remove_default_searchdomains | default() | bool and searchdomains | default([]) | length==0) - name: set dns facts set_fact: resolvconf: >- {%- if resolvconf.rc == 0 and resolvconfd_path.stat.isdir is defined and resolvconfd_path.stat.isdir -%}true{%- else -%}false{%- endif -%} bogus_domains: |- - {% for d in default_searchdomains|default([]) + searchdomains|default([]) -%} + {% for d in default_searchdomains | default([]) + searchdomains | default([]) -%} {{ dns_domain }}.{{ d }}./{{ d }}.{{ d }}./com.{{ d }}./ {%- endfor %} cloud_resolver: "{{ ['169.254.169.254'] if cloud_provider is defined and cloud_provider == 'gce' else @@ -142,9 +142,9 @@ set_fact: resolvconffile: /etc/resolv.conf base: >- - {%- if resolvconf|bool -%}/etc/resolvconf/resolv.conf.d/base{%- endif -%} + {%- if resolvconf | bool -%}/etc/resolvconf/resolv.conf.d/base{%- endif -%} head: >- - {%- if resolvconf|bool -%}/etc/resolvconf/resolv.conf.d/head{%- endif -%} + {%- if resolvconf | bool -%}/etc/resolvconf/resolv.conf.d/head{%- endif -%} when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and not is_fedora_coreos - name: target temporary resolvconf cloud init file (Flatcar Container Linux by Kinvolk / Fedora CoreOS) @@ -191,36 +191,36 @@ - name: generate search domains to resolvconf set_fact: searchentries: - search {{ (default_searchdomains|default([]) + searchdomains|default([])) | join(' ') }} + search {{ (default_searchdomains | default([]) + searchdomains | default([])) | join(' ') }} domainentry: domain {{ dns_domain }} supersede_search: - supersede domain-search "{{ (default_searchdomains|default([]) + searchdomains|default([])) | join('", "') }}"; + supersede domain-search "{{ (default_searchdomains | default([]) + searchdomains | default([])) | join('", "') }}"; supersede_domain: supersede domain-name "{{ dns_domain }}"; - name: pick coredns cluster IP or default resolver set_fact: coredns_server: |- - {%- if dns_mode == 'coredns' and not dns_early|bool -%} - {{ [ skydns_server ] }} - {%- elif dns_mode == 'coredns_dual' and not dns_early|bool -%} - {{ [ skydns_server ] + [ skydns_server_secondary ] }} - {%- elif dns_mode == 'manual' and not dns_early|bool -%} - {{ ( manual_dns_server.split(',') | list) }} - {%- elif dns_mode == 'none' and not dns_early|bool -%} + {%- if dns_mode == 'coredns' and not dns_early | bool -%} + {{ [skydns_server] }} + {%- elif dns_mode == 'coredns_dual' and not dns_early | bool -%} + {{ [skydns_server] + [skydns_server_secondary] }} + {%- elif dns_mode == 'manual' and not dns_early | bool -%} + {{ (manual_dns_server.split(',') | list) }} + {%- elif dns_mode == 'none' and not dns_early | bool -%} [] - {%- elif dns_early|bool -%} - {{ upstream_dns_servers|default([]) }} + {%- elif dns_early | bool -%} + {{ upstream_dns_servers | default([]) }} {%- endif -%} # This task should only run after cluster/nodelocal DNS is up, otherwise all DNS lookups will timeout - name: generate nameservers for resolvconf, including cluster DNS set_fact: nameserverentries: |- - {{ (([nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server|d([]) if not enable_nodelocaldns else []) + nameservers|d([]) + cloud_resolver|d([]) + (configured_nameservers|d([]) if not disable_host_nameservers|d()|bool else [])) | unique | join(',') }} + {{ (([nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server | d([]) if not enable_nodelocaldns else []) + nameservers | d([]) + cloud_resolver | d([]) + (configured_nameservers | d([]) if not disable_host_nameservers | d() | bool else [])) | unique | join(',') }} supersede_nameserver: - supersede domain-name-servers {{ ( ( [nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server|d([]) if not enable_nodelocaldns else []) + nameservers|d([]) + cloud_resolver|d([])) | unique | join(', ') }}; + supersede domain-name-servers {{ (([nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server | d([]) if not enable_nodelocaldns else []) + nameservers | d([]) + cloud_resolver | d([])) | unique | join(', ') }}; when: not dns_early or dns_late # This task should run instead of the above task when cluster/nodelocal DNS hasn't @@ -228,20 +228,20 @@ - name: generate nameservers for resolvconf, not including cluster DNS set_fact: nameserverentries: |- - {{ ( nameservers|d([]) + cloud_resolver|d([]) + configured_nameservers|d([])) | unique | join(',') }} + {{ (nameservers | d([]) + cloud_resolver | d([]) + configured_nameservers | d([])) | unique | join(',') }} supersede_nameserver: - supersede domain-name-servers {{ ( nameservers|d([]) + cloud_resolver|d([])) | unique | join(', ') }}; + supersede domain-name-servers {{ (nameservers | d([]) + cloud_resolver | d([])) | unique | join(', ') }}; when: dns_early and not dns_late - name: gather os specific variables include_vars: "{{ item }}" with_first_found: - files: - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}.yml" - - "{{ ansible_os_family|lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}.yml" - defaults.yml paths: - ../vars diff --git a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml index b8f2b479b..3b4ec4bd7 100644 --- a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml @@ -45,7 +45,7 @@ # simplify this items-list when https://github.com/ansible/ansible/issues/15753 is resolved - name: "Stop if known booleans are set as strings (Use JSON format on CLI: -e \"{'key': true }\")" assert: - that: item.value|type_debug == 'bool' + that: item.value | type_debug == 'bool' msg: "{{ item.value }} isn't a bool" run_once: yes with_items: @@ -58,7 +58,7 @@ - name: Stop if even number of etcd hosts assert: - that: groups.etcd|length is not divisibleby 2 + that: groups.etcd | length is not divisibleby 2 when: - not ignore_assert_errors - inventory_hostname in groups.get('etcd',[]) @@ -102,6 +102,7 @@ - name: Ensure ping package package: + # noqa: jinja[spacing] name: >- {%- if ansible_os_family == 'Debian' -%} iputils-ping @@ -207,7 +208,7 @@ - name: Stop if unknown cert_management assert: - that: cert_management|d('script') in ['script', 'none'] + that: cert_management | d('script') in ['script', 'none'] msg: "cert_management can only be 'script' or 'none'" run_once: true diff --git a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml index 4397cdd63..884ffbb49 100644 --- a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml +++ b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml @@ -13,7 +13,7 @@ {% for item in nameserverentries.split(',') %} nameserver {{ item }} {% endfor %} - options ndots:{{ ndots }} timeout:{{ dns_timeout|default('2') }} attempts:{{ dns_attempts|default('2') }} + options ndots:{{ ndots }} timeout:{{ dns_timeout | default('2') }} attempts:{{ dns_attempts | default('2') }} state: present insertbefore: BOF create: yes @@ -28,7 +28,7 @@ regexp: '^{{ item[1] }}[^#]*(?=# Ansible entries BEGIN)' backup: "{{ not resolvconf_stat.stat.islnk }}" with_nested: - - "{{ [resolvconffile, base|default(''), head|default('')] | difference(['']) }}" + - "{{ [resolvconffile, base | default(''), head | default('')] | difference(['']) }}" - [ 'search\s', 'nameserver\s', 'domain\s', 'options\s' ] notify: Preinstall | propagate resolvconf to k8s components @@ -39,7 +39,7 @@ replace: '\1' backup: "{{ not resolvconf_stat.stat.islnk }}" with_nested: - - "{{ [resolvconffile, base|default(''), head|default('')] | difference(['']) }}" + - "{{ [resolvconffile, base | default(''), head | default('')] | difference(['']) }}" - [ 'search\s', 'nameserver\s', 'domain\s', 'options\s' ] notify: Preinstall | propagate resolvconf to k8s components diff --git a/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml b/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml index 9ad5f7d10..ae5e68914 100644 --- a/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml +++ b/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml @@ -12,14 +12,14 @@ - name: set default dns if remove_default_searchdomains is false set_fact: default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"] - when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0) + when: not remove_default_searchdomains | default() | bool or (remove_default_searchdomains | default() | bool and searchdomains | default([]) | length==0) - name: NetworkManager | Add DNS search to NM configuration community.general.ini_file: path: /etc/NetworkManager/conf.d/dns.conf section: global-dns option: searches - value: "{{ (default_searchdomains|default([]) + searchdomains|default([])) | join(',') }}" + value: "{{ (default_searchdomains | default([]) + searchdomains | default([])) | join(',') }}" mode: '0600' backup: yes notify: Preinstall | update resolvconf for networkmanager @@ -29,7 +29,7 @@ path: /etc/NetworkManager/conf.d/dns.conf section: global-dns option: options - value: "ndots:{{ ndots }},timeout:{{ dns_timeout|default('2') }},attempts:{{ dns_attempts|default('2') }}" + value: "ndots:{{ ndots }},timeout:{{ dns_timeout | default('2') }},attempts:{{ dns_attempts | default('2') }}" mode: '0600' backup: yes notify: Preinstall | update resolvconf for networkmanager diff --git a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml index b4fccfb89..eb81d7d8b 100644 --- a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml +++ b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml @@ -55,7 +55,7 @@ delay: "{{ retry_stagger | random + 3 }}" when: - ansible_distribution == "Fedora" - - ansible_distribution_major_version|int >= 30 + - ansible_distribution_major_version | int >= 30 - not is_fedora_coreos changed_when: False tags: @@ -68,18 +68,18 @@ when: - ansible_os_family == "RedHat" - not is_fedora_coreos - - epel_enabled|bool + - epel_enabled | bool tags: - bootstrap-os - name: Update common_required_pkgs with ipvsadm when kube_proxy_mode is ipvs set_fact: - common_required_pkgs: "{{ common_required_pkgs|default([]) + ['ipvsadm', 'ipset'] }}" + common_required_pkgs: "{{ common_required_pkgs | default([]) + ['ipvsadm', 'ipset'] }}" when: kube_proxy_mode == 'ipvs' - name: Install packages requirements package: - name: "{{ required_pkgs | default([]) | union(common_required_pkgs|default([])) }}" + name: "{{ required_pkgs | default([]) | union(common_required_pkgs | default([])) }}" state: present register: pkgs_task_result until: pkgs_task_result is succeeded diff --git a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml index 2bf552351..87fb17667 100644 --- a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml +++ b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml @@ -119,7 +119,7 @@ - { name: kernel.panic_on_oops, value: 1 } - { name: vm.overcommit_memory, value: 1 } - { name: vm.panic_on_oom, value: 0 } - when: kubelet_protect_kernel_defaults|bool + when: kubelet_protect_kernel_defaults | bool - name: Check dummy module community.general.modprobe: diff --git a/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml b/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml index c2e42366d..4efe1e3a1 100644 --- a/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml +++ b/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml @@ -17,6 +17,7 @@ - name: Set fact NTP settings set_fact: + # noqa: jinja[spacing] ntp_config_file: >- {% if ntp_package == "ntp" -%} /etc/ntp.conf @@ -25,6 +26,7 @@ {%- else -%} /etc/chrony/chrony.conf {%- endif -%} + # noqa: jinja[spacing] ntp_service_name: >- {% if ntp_package == "chrony" -%} chronyd @@ -51,6 +53,7 @@ - ntp_force_sync_immediately - name: Force Sync NTP Immediately + # noqa: jinja[spacing] command: >- timeout -k 60s 60s {% if ntp_package == "ntp" -%} diff --git a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml index ed5ce291f..d38ef5857 100644 --- a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml +++ b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml @@ -4,7 +4,7 @@ - name: Hosts | create list from inventory set_fact: etc_hosts_inventory_block: |- - {% for item in (groups['k8s_cluster'] + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique -%} + {% for item in (groups['k8s_cluster'] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%} {% if 'access_ip' in hostvars[item] or 'ip' in hostvars[item] or 'ansible_default_ipv4' in hostvars[item] -%} {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }} {%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }} {% else %} {{ item }}.{{ dns_domain }} {{ item }} {% endif %} @@ -51,8 +51,8 @@ - name: Hosts | Extract existing entries for localhost from hosts file set_fact: etc_hosts_localhosts_dict: >- - {%- set splitted = (item | regex_replace('[ \t]+', ' ')|regex_replace('#.*$')|trim).split( ' ') -%} - {{ etc_hosts_localhosts_dict|default({}) | combine({splitted[0]: splitted[1::] }) }} + {%- set splitted = (item | regex_replace('[ \t]+', ' ') | regex_replace('#.*$') | trim).split(' ') -%} + {{ etc_hosts_localhosts_dict | default({}) | combine({splitted[0]: splitted[1::]}) }} with_items: "{{ (etc_hosts_content['content'] | b64decode).splitlines() }}" when: - etc_hosts_content.content is defined @@ -61,19 +61,19 @@ - name: Hosts | Update target hosts file entries dict with required entries set_fact: etc_hosts_localhosts_dict_target: >- - {%- set target_entries = (etc_hosts_localhosts_dict|default({})).get(item.key, []) | difference(item.value.get('unexpected' ,[])) -%} - {{ etc_hosts_localhosts_dict_target|default({}) | combine({item.key: (target_entries + item.value.expected)|unique}) }} - loop: "{{ etc_hosts_localhost_entries|dict2items }}" + {%- set target_entries = (etc_hosts_localhosts_dict | default({})).get(item.key, []) | difference(item.value.get('unexpected', [])) -%} + {{ etc_hosts_localhosts_dict_target | default({}) | combine({item.key: (target_entries + item.value.expected) | unique}) }} + loop: "{{ etc_hosts_localhost_entries | dict2items }}" - name: Hosts | Update (if necessary) hosts file lineinfile: dest: /etc/hosts - line: "{{ item.key }} {{ item.value|join(' ') }}" + line: "{{ item.key }} {{ item.value | join(' ') }}" regexp: "^{{ item.key }}.*$" state: present backup: yes unsafe_writes: yes - loop: "{{ etc_hosts_localhosts_dict_target|default({})|dict2items }}" + loop: "{{ etc_hosts_localhosts_dict_target | default({}) | dict2items }}" when: populate_localhost_entries_to_hosts_file # gather facts to update ansible_fqdn diff --git a/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml b/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml index 50a62026d..da3814715 100644 --- a/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml +++ b/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml @@ -2,7 +2,7 @@ - name: Configure dhclient to supersede search/domain/nameservers blockinfile: block: |- - {% for item in [ supersede_domain, supersede_search, supersede_nameserver ] -%} + {% for item in [supersede_domain, supersede_search, supersede_nameserver] -%} {{ item }} {% endfor %} path: "{{ dhclientconffile }}" diff --git a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml index 6a2203cca..d4b7957f9 100644 --- a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml +++ b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml @@ -14,7 +14,7 @@ - name: Search root filesystem device vars: query: "[?mount=='/'].device" - _root_device: "{{ ansible_mounts|json_query(query) }}" + _root_device: "{{ ansible_mounts | json_query(query) }}" set_fact: device: "{{ _root_device | first | regex_replace('([^0-9]+)[0-9]+', '\\1') }}" partition: "{{ _root_device | first | regex_replace('[^0-9]+([0-9]+)', '\\1') }}" diff --git a/roles/kubernetes/preinstall/vars/centos.yml b/roles/kubernetes/preinstall/vars/centos.yml index 2a5b6c75d..9b1a8749e 100644 --- a/roles/kubernetes/preinstall/vars/centos.yml +++ b/roles/kubernetes/preinstall/vars/centos.yml @@ -1,6 +1,6 @@ --- required_pkgs: - - "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" + - "{{ ((ansible_distribution_major_version | int) < 8) | ternary('libselinux-python', 'python3-libselinux') }}" - device-mapper-libs - nss - conntrack diff --git a/roles/kubernetes/preinstall/vars/redhat.yml b/roles/kubernetes/preinstall/vars/redhat.yml index 2a5b6c75d..9b1a8749e 100644 --- a/roles/kubernetes/preinstall/vars/redhat.yml +++ b/roles/kubernetes/preinstall/vars/redhat.yml @@ -1,6 +1,6 @@ --- required_pkgs: - - "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" + - "{{ ((ansible_distribution_major_version | int) < 8) | ternary('libselinux-python', 'python3-libselinux') }}" - device-mapper-libs - nss - conntrack diff --git a/roles/kubernetes/tokens/tasks/check-tokens.yml b/roles/kubernetes/tokens/tasks/check-tokens.yml index ae75f0d04..a157a0597 100644 --- a/roles/kubernetes/tokens/tasks/check-tokens.yml +++ b/roles/kubernetes/tokens/tasks/check-tokens.yml @@ -17,7 +17,7 @@ - name: "Check_tokens | Set 'sync_tokens' and 'gen_tokens' to true" set_fact: gen_tokens: true - when: not known_tokens_master.stat.exists and kube_token_auth|default(true) + when: not known_tokens_master.stat.exists and kube_token_auth | default(true) run_once: true - name: "Check tokens | check if a cert already exists" @@ -34,7 +34,7 @@ {%- set tokens = {'sync': False} -%} {%- for server in groups['kube_control_plane'] | intersect(ansible_play_batch) if (not hostvars[server].known_tokens.stat.exists) or - (hostvars[server].known_tokens.stat.checksum|default('') != known_tokens_master.stat.checksum|default('')) -%} + (hostvars[server].known_tokens.stat.checksum | default('') != known_tokens_master.stat.checksum | default('')) -%} {%- set _ = tokens.update({'sync': True}) -%} {%- endfor -%} {{ tokens.sync }} diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml index e80e56d6f..6ac6b4907 100644 --- a/roles/kubernetes/tokens/tasks/gen_tokens.yml +++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml @@ -6,7 +6,7 @@ mode: 0700 run_once: yes delegate_to: "{{ groups['kube_control_plane'][0] }}" - when: gen_tokens|default(false) + when: gen_tokens | default(false) - name: Gen_tokens | generate tokens for master components command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}" @@ -19,7 +19,7 @@ changed_when: "'Added' in gentoken_master.stdout" run_once: yes delegate_to: "{{ groups['kube_control_plane'][0] }}" - when: gen_tokens|default(false) + when: gen_tokens | default(false) - name: Gen_tokens | generate tokens for node components command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}" @@ -32,7 +32,7 @@ changed_when: "'Added' in gentoken_node.stdout" run_once: yes delegate_to: "{{ groups['kube_control_plane'][0] }}" - when: gen_tokens|default(false) + when: gen_tokens | default(false) - name: Gen_tokens | Get list of tokens from first master command: "find {{ kube_token_dir }} -maxdepth 1 -type f" @@ -40,7 +40,7 @@ check_mode: no delegate_to: "{{ groups['kube_control_plane'][0] }}" run_once: true - when: sync_tokens|default(false) + when: sync_tokens | default(false) - name: Gen_tokens | Gather tokens shell: "set -o pipefail && tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0" @@ -50,14 +50,14 @@ check_mode: no delegate_to: "{{ groups['kube_control_plane'][0] }}" run_once: true - when: sync_tokens|default(false) + when: sync_tokens | default(false) - name: Gen_tokens | Copy tokens on masters - shell: "set -o pipefail && echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /" + shell: "set -o pipefail && echo '{{ tokens_data.stdout | quote }}' | base64 -d | tar xz -C /" args: executable: /bin/bash when: - inventory_hostname in groups['kube_control_plane'] - - sync_tokens|default(false) + - sync_tokens | default(false) - inventory_hostname != groups['kube_control_plane'][0] - tokens_data.stdout diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index d32dd3a5a..7055462f0 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -21,7 +21,7 @@ kube_version: v1.26.6 ## The minimum version working kube_version_min_required: v1.25.0 -## Kube Proxy mode One of ['iptables','ipvs'] +## Kube Proxy mode One of ['iptables', 'ipvs'] kube_proxy_mode: ipvs ## The timeout for init first control-plane @@ -33,13 +33,13 @@ kubeadm_init_timeout: 300s kubeadm_init_phases_skip_default: [ "addon/coredns" ] kubeadm_init_phases_skip: >- {%- if kube_network_plugin == 'kube-router' and (kube_router_run_service_proxy is defined and kube_router_run_service_proxy) -%} - {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }} + {{ kubeadm_init_phases_skip_default + ["addon/kube-proxy"] }} {%- elif kube_network_plugin == 'cilium' and (cilium_kube_proxy_replacement is defined and cilium_kube_proxy_replacement == 'strict') -%} - {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }} + {{ kubeadm_init_phases_skip_default + ["addon/kube-proxy"] }} {%- elif kube_network_plugin == 'calico' and (calico_bpf_enabled is defined and calico_bpf_enabled) -%} - {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }} + {{ kubeadm_init_phases_skip_default + ["addon/kube-proxy"] }} {%- elif kube_proxy_remove is defined and kube_proxy_remove -%} - {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }} + {{ kubeadm_init_phases_skip_default + ["addon/kube-proxy"] }} {%- else -%} {{ kubeadm_init_phases_skip_default }} {%- endif -%} @@ -116,19 +116,19 @@ resolvconf_mode: host_resolvconf # Deploy netchecker app to verify DNS resolve as an HTTP service deploy_netchecker: false # Ip address of the kubernetes DNS service (called skydns for historical reasons) -skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" -skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +skydns_server: "{{ kube_service_addresses | ipaddr('net') | ipaddr(3) | ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses | ipaddr('net') | ipaddr(4) | ipaddr('address') }}" dns_domain: "{{ cluster_name }}" docker_dns_search_domains: - 'default.svc.{{ dns_domain }}' - 'svc.{{ dns_domain }}' kube_dns_servers: - coredns: ["{{skydns_server}}"] - coredns_dual: "{{[skydns_server] + [ skydns_server_secondary ]}}" - manual: ["{{manual_dns_server}}"] + coredns: ["{{ skydns_server }}"] + coredns_dual: "{{ [skydns_server] + [skydns_server_secondary] }}" + manual: ["{{ manual_dns_server }}"] -dns_servers: "{{kube_dns_servers[dns_mode]}}" +dns_servers: "{{ kube_dns_servers[dns_mode] }}" enable_coredns_k8s_external: false coredns_k8s_external_zone: k8s_external.local @@ -179,7 +179,7 @@ kube_network_plugin: calico kube_network_plugin_multus: false # Determines if calico_rr group exists -peer_with_calico_rr: "{{ 'calico_rr' in groups and groups['calico_rr']|length > 0 }}" +peer_with_calico_rr: "{{ 'calico_rr' in groups and groups['calico_rr'] | length > 0 }}" # Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) calico_datastore: "kdd" @@ -232,10 +232,10 @@ kube_network_node_prefix_ipv6: 120 # listening on. # NOTE: loadbalancer_apiserver_localhost somewhat alters the final API enpdoint # access IP value (automatically evaluated below) -kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_ip: "{{ kube_service_addresses | ipaddr('net') | ipaddr(1) | ipaddr('address') }}" # NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost -# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} too. +# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too. kube_apiserver_bind_address: 0.0.0.0 # https @@ -243,7 +243,7 @@ kube_apiserver_port: 6443 # If non-empty, will use this string as identification instead of the actual hostname kube_override_hostname: >- - {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} + {%- if cloud_provider is defined and cloud_provider in ['aws'] -%} {%- else -%} {{ inventory_hostname }} {%- endif -%} @@ -443,7 +443,7 @@ openstack_lbaas_create_monitor: "yes" openstack_lbaas_monitor_delay: "1m" openstack_lbaas_monitor_timeout: "30s" openstack_lbaas_monitor_max_retries: "3" -openstack_cacert: "{{ lookup('env','OS_CACERT') }}" +openstack_cacert: "{{ lookup('env', 'OS_CACERT') }}" # Default values for the external OpenStack Cloud Controller external_openstack_lbaas_enabled: true @@ -509,7 +509,7 @@ kubeadm_feature_gates: [] local_volume_provisioner_storage_classes: | { "{{ local_volume_provisioner_storage_class | default('local-storage') }}": { - "host_dir": "{{ local_volume_provisioner_base_dir | default ('/mnt/disks') }}", + "host_dir": "{{ local_volume_provisioner_base_dir | default('/mnt/disks') }}", "mount_dir": "{{ local_volume_provisioner_mount_dir | default('/mnt/disks') }}", "volume_mode": "Filesystem", "fs_type": "ext4" @@ -546,7 +546,7 @@ loadbalancer_apiserver_type: "nginx" apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local" kube_apiserver_global_endpoint: |- {% if loadbalancer_apiserver is defined -%} - https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }} + https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }} {%- elif loadbalancer_apiserver_localhost and (loadbalancer_apiserver_port is not defined or loadbalancer_apiserver_port == kube_apiserver_port) -%} https://localhost:{{ kube_apiserver_port }} {%- else -%} @@ -554,11 +554,11 @@ kube_apiserver_global_endpoint: |- {%- endif %} kube_apiserver_endpoint: |- {% if loadbalancer_apiserver is defined -%} - https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }} + https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }} {%- elif not is_kube_master and loadbalancer_apiserver_localhost -%} - https://localhost:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} + https://localhost:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} {%- elif is_kube_master -%} - https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_port }} + https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0', '127.0.0.1') }}:{{ kube_apiserver_port }} {%- else -%} https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }} {%- endif %} @@ -594,20 +594,20 @@ etcd_metrics_addresses: |- {% for item in etcd_hosts -%} https://{{ hostvars[item]['etcd_access_address'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}:{{ etcd_metrics_port | default(2381) }}{% if not loop.last %},{% endif %} {%- endfor %} -etcd_events_access_addresses: "{{etcd_events_access_addresses_list | join(',')}}" -etcd_events_access_addresses_semicolon: "{{etcd_events_access_addresses_list | join(';')}}" +etcd_events_access_addresses: "{{ etcd_events_access_addresses_list | join(',') }}" +etcd_events_access_addresses_semicolon: "{{ etcd_events_access_addresses_list | join(';') }}" # user should set etcd_member_name in inventory/mycluster/hosts.ini etcd_member_name: |- {% for host in groups['etcd'] %} - {% if inventory_hostname == host %}{{ hostvars[host].etcd_member_name | default("etcd" + loop.index|string) }}{% endif %} + {% if inventory_hostname == host %}{{ hostvars[host].etcd_member_name | default("etcd" + loop.index | string) }}{% endif %} {% endfor %} etcd_peer_addresses: |- {% for item in groups['etcd'] -%} - {{ hostvars[item].etcd_member_name | default("etcd" + loop.index|string) }}=https://{{ hostvars[item].etcd_access_address | default(hostvars[item].ip | default(fallback_ips[item])) }}:2380{% if not loop.last %},{% endif %} + {{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}=https://{{ hostvars[item].etcd_access_address | default(hostvars[item].ip | default(fallback_ips[item])) }}:2380{% if not loop.last %},{% endif %} {%- endfor %} etcd_events_peer_addresses: |- {% for item in groups['etcd'] -%} - {{ hostvars[item].etcd_member_name | default("etcd" + loop.index|string) }}-events=https://{{ hostvars[item].etcd_events_access_address | default(hostvars[item].ip | default(fallback_ips[item])) }}:2382{% if not loop.last %},{% endif %} + {{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}-events=https://{{ hostvars[item].etcd_events_access_address | default(hostvars[item].ip | default(fallback_ips[item])) }}:2382{% if not loop.last %},{% endif %} {%- endfor %} podsecuritypolicy_enabled: false @@ -653,16 +653,16 @@ host_os: >- kubelet_event_record_qps: 5 proxy_env_defaults: - http_proxy: "{{ http_proxy | default ('') }}" - HTTP_PROXY: "{{ http_proxy | default ('') }}" - https_proxy: "{{ https_proxy | default ('') }}" - HTTPS_PROXY: "{{ https_proxy | default ('') }}" - no_proxy: "{{ no_proxy | default ('') }}" - NO_PROXY: "{{ no_proxy | default ('') }}" + http_proxy: "{{ http_proxy | default('') }}" + HTTP_PROXY: "{{ http_proxy | default('') }}" + https_proxy: "{{ https_proxy | default('') }}" + HTTPS_PROXY: "{{ https_proxy | default('') }}" + no_proxy: "{{ no_proxy | default('') }}" + NO_PROXY: "{{ no_proxy | default('') }}" # If we use SSL_CERT_FILE: {{ omit }} it cause in value __omit_place_holder__ and break environments # Combine dict is avoiding the problem with omit placeholder. Maybe it can be better solution? -proxy_env: "{{ proxy_env_defaults | combine({ 'SSL_CERT_FILE': https_proxy_cert_file }) if https_proxy_cert_file is defined else proxy_env_defaults }}" +proxy_env: "{{ proxy_env_defaults | combine({'SSL_CERT_FILE': https_proxy_cert_file}) if https_proxy_cert_file is defined else proxy_env_defaults }}" proxy_disable_env: ALL_PROXY: '' diff --git a/roles/kubespray-defaults/tasks/fallback_ips.yml b/roles/kubespray-defaults/tasks/fallback_ips.yml index d42faee8f..86b0bd7f9 100644 --- a/roles/kubespray-defaults/tasks/fallback_ips.yml +++ b/roles/kubespray-defaults/tasks/fallback_ips.yml @@ -10,7 +10,7 @@ delegate_to: "{{ item }}" delegate_facts: yes when: hostvars[item].ansible_default_ipv4 is not defined - loop: "{{ (groups['k8s_cluster']|default([]) + groups['etcd']|default([]) + groups['calico_rr']|default([])) | unique }}" + loop: "{{ (groups['k8s_cluster'] | default([]) + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique }}" run_once: yes tags: always @@ -18,7 +18,7 @@ set_fact: fallback_ips_base: | --- - {% for item in (groups['k8s_cluster']|default([]) + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique %} + {% for item in (groups['k8s_cluster'] | default([]) + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique %} {% set found = hostvars[item].get('ansible_default_ipv4') %} {{ item }}: "{{ found.get('address', '127.0.0.1') }}" {% endfor %} diff --git a/roles/kubespray-defaults/tasks/no_proxy.yml b/roles/kubespray-defaults/tasks/no_proxy.yml index 6e6a5c9bb..d2d5cc6d1 100644 --- a/roles/kubespray-defaults/tasks/no_proxy.yml +++ b/roles/kubespray-defaults/tasks/no_proxy.yml @@ -1,9 +1,10 @@ --- - name: Set no_proxy to all assigned cluster IPs and hostnames set_fact: + # noqa: jinja[spacing] no_proxy_prepare: >- {%- if loadbalancer_apiserver is defined -%} - {{ apiserver_loadbalancer_domain_name| default('') }}, + {{ apiserver_loadbalancer_domain_name | default('') }}, {{ loadbalancer_apiserver.address | default('') }}, {%- endif -%} {%- if no_proxy_exclude_workers | default(false) -%} @@ -11,12 +12,12 @@ {%- else -%} {% set cluster_or_master = 'k8s_cluster' %} {%- endif -%} - {%- for item in (groups[cluster_or_master] + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique -%} + {%- for item in (groups[cluster_or_master] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%} {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}, - {%- if item != hostvars[item].get('ansible_hostname', '') -%} + {%- if item != hostvars[item].get('ansible_hostname', '') -%} {{ hostvars[item]['ansible_hostname'] }}, {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }}, - {%- endif -%} + {%- endif -%} {{ item }},{{ item }}.{{ dns_domain }}, {%- endfor -%} {%- if additional_no_proxy is defined -%} @@ -32,7 +33,8 @@ - name: Populates no_proxy to all hosts set_fact: no_proxy: "{{ hostvars.localhost.no_proxy_prepare }}" + # noqa: jinja[spacing] proxy_env: "{{ proxy_env | combine({ - 'no_proxy': hostvars.localhost.no_proxy_prepare, - 'NO_PROXY': hostvars.localhost.no_proxy_prepare - }) }}" + 'no_proxy': hostvars.localhost.no_proxy_prepare, + 'NO_PROXY': hostvars.localhost.no_proxy_prepare + }) }}" diff --git a/roles/network_plugin/calico/rr/tasks/update-node.yml b/roles/network_plugin/calico/rr/tasks/update-node.yml index 930429139..59841148c 100644 --- a/roles/network_plugin/calico/rr/tasks/update-node.yml +++ b/roles/network_plugin/calico/rr/tasks/update-node.yml @@ -4,7 +4,7 @@ - block: - name: Set the retry count set_fact: - retry_count: "{{ 0 if retry_count is undefined else retry_count|int + 1 }}" + retry_count: "{{ 0 if retry_count is undefined else retry_count | int + 1 }}" - name: Calico | Set label for route reflector # noqa command-instead-of-shell shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-rr-id={{ calico_rr_id }} --overwrite" @@ -24,6 +24,7 @@ retries: 10 - name: Calico-rr | Set route reflector cluster ID + # noqa: jinja[spacing] set_fact: calico_rr_node_patched: >- {{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp': @@ -38,7 +39,7 @@ - name: Fail if retry limit is reached fail: msg: Ended after 10 retries - when: retry_count|int == 10 + when: retry_count | int == 10 - name: Retrying node configuration debug: diff --git a/roles/network_plugin/calico/tasks/check.yml b/roles/network_plugin/calico/tasks/check.yml index 0ea752418..8506e4499 100644 --- a/roles/network_plugin/calico/tasks/check.yml +++ b/roles/network_plugin/calico/tasks/check.yml @@ -168,7 +168,7 @@ - name: "Check if inventory match current cluster configuration" assert: that: - - calico_pool_conf.spec.blockSize|int == (calico_pool_blocksize | default(kube_network_node_prefix) | int) + - calico_pool_conf.spec.blockSize | int == (calico_pool_blocksize | default(kube_network_node_prefix) | int) - calico_pool_conf.spec.cidr == (calico_pool_cidr | default(kube_pods_subnet)) - not calico_pool_conf.spec.ipipMode is defined or calico_pool_conf.spec.ipipMode == calico_ipip_mode - not calico_pool_conf.spec.vxlanMode is defined or calico_pool_conf.spec.vxlanMode == calico_vxlan_mode diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml index 7d509f90e..6dbcc3170 100644 --- a/roles/network_plugin/calico/tasks/install.yml +++ b/roles/network_plugin/calico/tasks/install.yml @@ -122,7 +122,7 @@ - block: - name: Calico | Check if extra directory is needed stat: - path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ 'kdd' if (calico_version is version('v3.22.3','<')) else 'crd' }}" + path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ 'kdd' if (calico_version is version('v3.22.3', '<')) else 'crd' }}" register: kdd_path - name: Calico | Set kdd path when calico < v3.22.3 set_fact: @@ -196,7 +196,7 @@ - name: Calico | Configure calico FelixConfiguration command: cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" - stdin: "{{ _felix_config is string | ternary(_felix_config, _felix_config|to_json) }}" + stdin: "{{ _felix_config is string | ternary(_felix_config, _felix_config | to_json) }}" changed_when: False when: - inventory_hostname == groups['kube_control_plane'][0] @@ -222,7 +222,7 @@ "cidr": "{{ calico_pool_cidr | default(kube_pods_subnet) }}", "ipipMode": "{{ calico_ipip_mode }}", "vxlanMode": "{{ calico_vxlan_mode }}", - "natOutgoing": {{ nat_outgoing|default(false) }} + "natOutgoing": {{ nat_outgoing | default(false) }} } } @@ -235,7 +235,7 @@ - name: Calico | Configure calico network pool command: cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" - stdin: "{{ _calico_pool is string | ternary(_calico_pool, _calico_pool|to_json) }}" + stdin: "{{ _calico_pool is string | ternary(_calico_pool, _calico_pool | to_json) }}" changed_when: False when: - inventory_hostname == groups['kube_control_plane'][0] @@ -261,7 +261,7 @@ "cidr": "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}", "ipipMode": "{{ calico_ipip_mode_ipv6 }}", "vxlanMode": "{{ calico_vxlan_mode_ipv6 }}", - "natOutgoing": {{ nat_outgoing_ipv6|default(false) }} + "natOutgoing": {{ nat_outgoing_ipv6 | default(false) }} } } @@ -274,7 +274,7 @@ - name: Calico | Configure calico ipv6 network pool command: cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" - stdin: "{{ _calico_pool_ipv6 is string | ternary(_calico_pool_ipv6, _calico_pool_ipv6|to_json) }}" + stdin: "{{ _calico_pool_ipv6 is string | ternary(_calico_pool_ipv6, _calico_pool_ipv6 | to_json) }}" changed_when: False when: - inventory_hostname == groups['kube_control_plane'][0] @@ -282,13 +282,13 @@ - name: Populate Service External IPs set_fact: - _service_external_ips: "{{ _service_external_ips|default([]) + [ {'cidr': item} ] }}" + _service_external_ips: "{{ _service_external_ips | default([]) + [{'cidr': item}] }}" with_items: "{{ calico_advertise_service_external_ips }}" run_once: yes - name: Populate Service LoadBalancer IPs set_fact: - _service_loadbalancer_ips: "{{ _service_loadbalancer_ips|default([]) + [ {'cidr': item} ] }}" + _service_loadbalancer_ips: "{{ _service_loadbalancer_ips | default([]) + [{'cidr': item}] }}" with_items: "{{ calico_advertise_service_loadbalancer_ips }}" run_once: yes @@ -296,7 +296,7 @@ set_fact: nodeToNodeMeshEnabled: "false" when: - - peer_with_router|default(false) or peer_with_calico_rr|default(false) + - peer_with_router | default(false) or peer_with_calico_rr | default(false) - inventory_hostname in groups['k8s_cluster'] run_once: yes @@ -309,6 +309,7 @@ - name: Calico | Set kubespray BGP Configuration set_fact: + # noqa: jinja[spacing] _bgp_config: > { "kind": "BGPConfiguration", @@ -319,12 +320,12 @@ "spec": { "listenPort": {{ calico_bgp_listen_port }}, "logSeverityScreen": "Info", - {% if not calico_no_global_as_num|default(false) %}"asNumber": {{ global_as_num }},{% endif %} - "nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled|default('true') }} , - {% if calico_advertise_cluster_ips|default(false) %} + {% if not calico_no_global_as_num | default(false) %}"asNumber": {{ global_as_num }},{% endif %} + "nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled | default('true') }} , + {% if calico_advertise_cluster_ips | default(false) %} "serviceClusterIPs": [{"cidr": "{{ kube_service_addresses }}" } {{ ',{"cidr":"' + kube_service_addresses_ipv6 + '"}' if enable_dual_stack_networks else '' }}],{% endif %} - {% if calico_advertise_service_loadbalancer_ips|length > 0 %}"serviceLoadBalancerIPs": {{ _service_loadbalancer_ips }},{% endif %} - "serviceExternalIPs": {{ _service_external_ips|default([]) }} + {% if calico_advertise_service_loadbalancer_ips | length > 0 %}"serviceLoadBalancerIPs": {{ _service_loadbalancer_ips }},{% endif %} + "serviceExternalIPs": {{ _service_external_ips | default([]) }} } } @@ -337,7 +338,7 @@ - name: Calico | Set up BGP Configuration command: cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" - stdin: "{{ _bgp_config is string | ternary(_bgp_config, _bgp_config|to_json) }}" + stdin: "{{ _bgp_config is string | ternary(_bgp_config, _bgp_config | to_json) }}" changed_when: False when: - inventory_hostname == groups['kube_control_plane'][0] @@ -464,8 +465,8 @@ - include_tasks: peer_with_calico_rr.yml when: - - peer_with_calico_rr|default(false) + - peer_with_calico_rr | default(false) - include_tasks: peer_with_router.yml when: - - peer_with_router|default(false) + - peer_with_router | default(false) diff --git a/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml b/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml index 5e6010ced..9d216bd20 100644 --- a/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml +++ b/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml @@ -13,7 +13,7 @@ command: cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" # revert when it's already a string - stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" vars: stdin: > {"apiVersion": "projectcalico.org/v3", @@ -38,7 +38,7 @@ command: cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" # revert when it's already a string - stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" vars: stdin: > {"apiVersion": "projectcalico.org/v3", @@ -64,7 +64,7 @@ command: cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" # revert when it's already a string - stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" vars: stdin: > {"apiVersion": "projectcalico.org/v3", diff --git a/roles/network_plugin/calico/tasks/peer_with_router.yml b/roles/network_plugin/calico/tasks/peer_with_router.yml index a698ed1da..a29ca36dd 100644 --- a/roles/network_plugin/calico/tasks/peer_with_router.yml +++ b/roles/network_plugin/calico/tasks/peer_with_router.yml @@ -2,13 +2,13 @@ - name: Calico | Configure peering with router(s) at global scope command: cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" - stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" vars: stdin: > {"apiVersion": "projectcalico.org/v3", "kind": "BGPPeer", "metadata": { - "name": "global-{{ item.name | default(item.router_id|replace(':','-')) }}" + "name": "global-{{ item.name | default(item.router_id | replace(':', '-')) }}" }, "spec": { "asNumber": "{{ item.as }}", @@ -19,14 +19,14 @@ until: output.rc == 0 delay: "{{ retry_stagger | random + 3 }}" with_items: - - "{{ peers|selectattr('scope','defined')|selectattr('scope','equalto', 'global')|list|default([]) }}" + - "{{ peers | selectattr('scope', 'defined') | selectattr('scope', 'equalto', 'global') | list | default([]) }}" when: - inventory_hostname == groups['kube_control_plane'][0] - name: Calico | Configure node asNumber for per node peering command: cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" - stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" vars: stdin: > {"apiVersion": "projectcalico.org/v3", @@ -52,26 +52,26 @@ - name: Calico | Configure peering with router(s) at node scope command: cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" - stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" vars: stdin: > {"apiVersion": "projectcalico.org/v3", "kind": "BGPPeer", "metadata": { - "name": "{{ inventory_hostname }}-{{ item.name | default(item.router_id|replace(':','-')) }}" + "name": "{{ inventory_hostname }}-{{ item.name | default(item.router_id | replace(':', '-')) }}" }, "spec": { "asNumber": "{{ item.as }}", "node": "{{ inventory_hostname }}", "peerIP": "{{ item.router_id }}", - "sourceAddress": "{{ item.sourceaddress|default('UseNodeIP') }}" + "sourceAddress": "{{ item.sourceaddress | default('UseNodeIP') }}" }} register: output retries: 4 until: output.rc == 0 delay: "{{ retry_stagger | random + 3 }}" with_items: - - "{{ peers|selectattr('scope','undefined')|list|default([]) | union(peers|selectattr('scope','defined')|selectattr('scope','equalto', 'node')|list|default([])) }}" + - "{{ peers | selectattr('scope', 'undefined') | list | default([]) | union(peers | selectattr('scope', 'defined') | selectattr('scope', 'equalto', 'node') | list | default([])) }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" when: - inventory_hostname in groups['k8s_cluster'] diff --git a/roles/network_plugin/calico/tasks/pre.yml b/roles/network_plugin/calico/tasks/pre.yml index 162aca150..fc87769f0 100644 --- a/roles/network_plugin/calico/tasks/pre.yml +++ b/roles/network_plugin/calico/tasks/pre.yml @@ -32,12 +32,12 @@ include_vars: "{{ item }}" with_first_found: - files: - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}.yml" - - "{{ ansible_os_family|lower }}-{{ ansible_architecture }}.yml" - - "{{ ansible_os_family|lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_architecture }}.yml" + - "{{ ansible_os_family | lower }}.yml" - defaults.yml paths: - ../vars diff --git a/roles/network_plugin/calico/templates/calico-config.yml.j2 b/roles/network_plugin/calico/templates/calico-config.yml.j2 index 0379b6271..4012ef784 100644 --- a/roles/network_plugin/calico/templates/calico-config.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-config.yml.j2 @@ -22,8 +22,8 @@ data: cluster_type: "kubespray" calico_backend: "{{ calico_network_backend }}" {% endif %} -{% if inventory_hostname in groups['k8s_cluster'] and peer_with_router|default(false) %} - as: "{{ local_as|default(global_as_num) }}" +{% if inventory_hostname in groups['k8s_cluster'] and peer_with_router | default(false) %} + as: "{{ local_as | default(global_as_num) }}" {% endif -%} # The CNI network configuration to install on each node. The special # values in this config will be automatically populated. @@ -73,7 +73,7 @@ data: "allow_ip_forwarding": true }, {% endif %} - {% if (calico_feature_control is defined) and (calico_feature_control|length > 0) %} + {% if (calico_feature_control is defined) and (calico_feature_control | length > 0) %} "feature_control": { {% for fc in calico_feature_control -%} {% set fcval = calico_feature_control[fc] -%} diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 index bd6c63c24..4e49f3bc4 100644 --- a/roles/network_plugin/calico/templates/calico-node.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -211,7 +211,7 @@ spec: value: "true" # Set Felix endpoint to host default action to ACCEPT. - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "{{ calico_endpoint_to_host_action|default('RETURN') }}" + value: "{{ calico_endpoint_to_host_action | default('RETURN') }}" - name: FELIX_HEALTHHOST value: "{{ calico_healthhost }}" {% if kube_proxy_mode == 'ipvs' and kube_apiserver_node_port_range is defined %} @@ -286,7 +286,7 @@ spec: - name: IP6 value: autodetect {% endif %} -{% if calico_use_default_route_src_ipaddr|default(false) %} +{% if calico_use_default_route_src_ipaddr | default(false) %} - name: FELIX_DEVICEROUTESOURCEADDRESS valueFrom: fieldRef: diff --git a/roles/network_plugin/cilium/defaults/main.yml b/roles/network_plugin/cilium/defaults/main.yml index b6f68c9c0..f4c70e479 100644 --- a/roles/network_plugin/cilium/defaults/main.yml +++ b/roles/network_plugin/cilium/defaults/main.yml @@ -8,7 +8,7 @@ cilium_enable_ipv4: true cilium_enable_ipv6: false # Cilium agent health port -cilium_agent_health_port: "{%- if cilium_version | regex_replace('v') is version('1.11.6', '>=') -%}9879 {%- else -%} 9876 {%- endif -%}" +cilium_agent_health_port: "{%- if cilium_version | regex_replace('v') is version('1.11.6', '>=') -%}9879{%- else -%}9876{%- endif -%}" # Identity allocation mode selects how identities are shared between cilium # nodes by setting how they are stored. The options are "crd" or "kvstore". diff --git a/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 b/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 index 200b9efe6..14189656f 100644 --- a/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 @@ -124,7 +124,7 @@ spec: mountPath: /var/lib/etcd-config readOnly: true - name: etcd-secrets - mountPath: "{{cilium_cert_dir}}" + mountPath: "{{ cilium_cert_dir }}" readOnly: true {% endif %} {% for volume_mount in cilium_operator_extra_volume_mounts %} @@ -163,7 +163,7 @@ spec: # To read the k8s etcd secrets in case the user might want to use TLS - name: etcd-secrets hostPath: - path: "{{cilium_cert_dir}}" + path: "{{ cilium_cert_dir }}" {% endif %} {% for volume in cilium_operator_extra_volumes %} - {{ volume | to_nice_yaml(indent=2) | indent(10) }} diff --git a/roles/network_plugin/cilium/templates/cilium/config.yml.j2 b/roles/network_plugin/cilium/templates/cilium/config.yml.j2 index 700dd0841..399d8ced8 100644 --- a/roles/network_plugin/cilium/templates/cilium/config.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium/config.yml.j2 @@ -104,7 +104,7 @@ data: # # If this option is set to "false" during an upgrade from 1.3 or earlier to # 1.4 or later, then it may cause one-time disruptions during the upgrade. - preallocate-bpf-maps: "{{cilium_preallocate_bpf_maps}}" + preallocate-bpf-maps: "{{ cilium_preallocate_bpf_maps }}" # Regular expression matching compatible Istio sidecar istio-proxy # container image names @@ -251,6 +251,6 @@ data: {% for cidr in cilium_non_masquerade_cidrs %} - {{ cidr }} {% endfor %} - masqLinkLocal: {{ cilium_masq_link_local|bool }} + masqLinkLocal: {{ cilium_masq_link_local | bool }} resyncInterval: "{{ cilium_ip_masq_resync_interval }}" {% endif %} diff --git a/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 b/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 index 13c5d8465..38360342b 100644 --- a/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 @@ -28,7 +28,7 @@ spec: spec: containers: - name: cilium-agent - image: "{{cilium_image_repo}}:{{cilium_image_tag}}" + image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}" imagePullPolicy: {{ k8s_image_pull_policy }} command: - cilium-agent @@ -160,7 +160,7 @@ spec: mountPath: /var/lib/etcd-config readOnly: true - name: etcd-secrets - mountPath: "{{cilium_cert_dir}}" + mountPath: "{{ cilium_cert_dir }}" readOnly: true {% endif %} - name: clustermesh-secrets @@ -201,7 +201,7 @@ spec: initContainers: {% if cilium_version | regex_replace('v') is version('1.11', '>=') and cilium_cgroup_auto_mount %} - name: mount-cgroup - image: "{{cilium_image_repo}}:{{cilium_image_tag}}" + image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}" imagePullPolicy: {{ k8s_image_pull_policy }} env: - name: CGROUP_ROOT @@ -230,7 +230,7 @@ spec: {% endif %} {% if cilium_version | regex_replace('v') is version('1.11.7', '>=') %} - name: apply-sysctl-overwrites - image: "{{cilium_image_repo}}:{{cilium_image_tag}}" + image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}" imagePullPolicy: {{ k8s_image_pull_policy }} env: - name: BIN_PATH @@ -256,7 +256,7 @@ spec: privileged: true {% endif %} - name: clean-cilium-state - image: "{{cilium_image_repo}}:{{cilium_image_tag}}" + image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}" imagePullPolicy: {{ k8s_image_pull_policy }} command: - /init-container.sh @@ -309,7 +309,7 @@ spec: {% if cilium_version | regex_replace('v') is version('1.13.1', '>=') %} # Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent - name: install-cni-binaries - image: "{{cilium_image_repo}}:{{cilium_image_tag}}" + image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}" imagePullPolicy: {{ k8s_image_pull_policy }} command: - "/install-plugin.sh" @@ -398,7 +398,7 @@ spec: # To read the k8s etcd secrets in case the user might want to use TLS - name: etcd-secrets hostPath: - path: "{{cilium_cert_dir}}" + path: "{{ cilium_cert_dir }}" {% endif %} # To read the clustermesh configuration - name: clustermesh-secrets diff --git a/roles/network_plugin/flannel/defaults/main.yml b/roles/network_plugin/flannel/defaults/main.yml index cd1dcf16d..8d7713bb9 100644 --- a/roles/network_plugin/flannel/defaults/main.yml +++ b/roles/network_plugin/flannel/defaults/main.yml @@ -2,7 +2,7 @@ # Flannel public IP # The address that flannel should advertise as how to access the system # Disabled until https://github.com/coreos/flannel/issues/712 is fixed -# flannel_public_ip: "{{ access_ip|default(ip|default(fallback_ips[inventory_hostname])) }}" +# flannel_public_ip: "{{ access_ip | default(ip | default(fallback_ips[inventory_hostname])) }}" ## interface that should be used for flannel operations ## This is actually an inventory cluster-level item diff --git a/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 b/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 index 472cea219..cee7ccbf4 100644 --- a/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 +++ b/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 @@ -42,23 +42,23 @@ spec: imagePullPolicy: {{ k8s_image_pull_policy }} args: - /kube-ovn/start-controller.sh - - --default-cidr={{ kube_pods_subnet }}{% if enable_dual_stack_networks %},{{ kube_ovn_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}{% endif %}{{''}} - - --default-gateway={% if kube_ovn_default_gateway is defined %}{{ kube_ovn_default_gateway }}{% endif %}{{''}} - - --default-gateway-check={{ kube_ovn_default_gateway_check|string }} - - --default-logical-gateway={{ kube_ovn_default_logical_gateway|string }} + - --default-cidr={{ kube_pods_subnet }}{% if enable_dual_stack_networks %},{{ kube_ovn_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}{% endif %}{{ '' }} + - --default-gateway={% if kube_ovn_default_gateway is defined %}{{ kube_ovn_default_gateway }}{% endif %}{{ '' }} + - --default-gateway-check={{ kube_ovn_default_gateway_check | string }} + - --default-logical-gateway={{ kube_ovn_default_logical_gateway | string }} - --default-u2o-interconnection={{ kube_ovn_u2o_interconnection }} - - --default-exclude-ips={% if kube_ovn_default_exclude_ips is defined %}{{ kube_ovn_default_exclude_ips }}{% endif %}{{''}} - - --node-switch-cidr={{ kube_ovn_node_switch_cidr }}{% if enable_dual_stack_networks %},{{ kube_ovn_node_switch_cidr_ipv6 }}{% endif %}{{''}} - - --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{''}} + - --default-exclude-ips={% if kube_ovn_default_exclude_ips is defined %}{{ kube_ovn_default_exclude_ips }}{% endif %}{{ '' }} + - --node-switch-cidr={{ kube_ovn_node_switch_cidr }}{% if enable_dual_stack_networks %},{{ kube_ovn_node_switch_cidr_ipv6 }}{% endif %}{{ '' }} + - --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{ '' }} - --network-type={{ kube_ovn_network_type }} - - --default-interface-name={{ kube_ovn_default_interface_name|default('') }} + - --default-interface-name={{ kube_ovn_default_interface_name | default('') }} - --default-vlan-id={{ kube_ovn_default_vlan_id }} - --ls-dnat-mod-dl-dst={{ kube_ovn_ls_dnat_mod_dl_dst }} - --pod-nic-type={{ kube_ovn_pod_nic_type }} - - --enable-lb={{ kube_ovn_enable_lb|string }} - - --enable-np={{ kube_ovn_enable_np|string }} + - --enable-lb={{ kube_ovn_enable_lb | string }} + - --enable-np={{ kube_ovn_enable_np | string }} - --enable-eip-snat={{ kube_ovn_eip_snat_enabled }} - - --enable-external-vpc={{ kube_ovn_enable_external_vpc|string }} + - --enable-external-vpc={{ kube_ovn_enable_external_vpc | string }} - --logtostderr=false - --alsologtostderr=true - --gc-interval=360 @@ -187,11 +187,11 @@ spec: args: - --enable-mirror={{ kube_ovn_traffic_mirror | lower }} - --encap-checksum={{ kube_ovn_encap_checksum | lower }} - - --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{''}} - - --iface={{ kube_ovn_iface|default('') }} + - --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{ '' }} + - --iface={{ kube_ovn_iface | default('') }} - --dpdk-tunnel-iface={{ kube_ovn_dpdk_tunnel_iface }} - --network-type={{ kube_ovn_network_type }} - - --default-interface-name={{ kube_ovn_default_interface_name|default('') }} + - --default-interface-name={{ kube_ovn_default_interface_name | default('') }} {% if kube_ovn_mtu is defined %} - --mtu={{ kube_ovn_mtu }} {% endif %} @@ -359,7 +359,7 @@ spec: command: - /kube-ovn/kube-ovn-pinger args: - - --external-address={{ kube_ovn_external_address }}{% if enable_dual_stack_networks %},{{ kube_ovn_external_address_ipv6 }}{% endif %}{{''}} + - --external-address={{ kube_ovn_external_address }}{% if enable_dual_stack_networks %},{{ kube_ovn_external_address_ipv6 }}{% endif %}{{ '' }} - --external-dns={{ kube_ovn_external_dns }} - --logtostderr=false - --alsologtostderr=true @@ -668,6 +668,6 @@ data: ic-db-host: "{{ kube_ovn_ic_dbhost }}" ic-nb-port: "6645" ic-sb-port: "6646" - gw-nodes: "{{ kube_ovn_central_hosts|join(',') }}" + gw-nodes: "{{ kube_ovn_central_hosts | join(',') }}" auto-route: "{{ kube_ovn_ic_autoroute | lower }}" {% endif %} diff --git a/roles/network_plugin/macvlan/handlers/main.yml b/roles/network_plugin/macvlan/handlers/main.yml index 88997c92d..aba4cbc00 100644 --- a/roles/network_plugin/macvlan/handlers/main.yml +++ b/roles/network_plugin/macvlan/handlers/main.yml @@ -7,6 +7,7 @@ - name: Macvlan | reload network service: + # noqa: jinja[spacing] name: >- {% if ansible_os_family == "RedHat" -%} network diff --git a/roles/network_plugin/multus/defaults/main.yml b/roles/network_plugin/multus/defaults/main.yml index cbeb4cb32..c6b7ecd97 100644 --- a/roles/network_plugin/multus/defaults/main.yml +++ b/roles/network_plugin/multus/defaults/main.yml @@ -3,7 +3,7 @@ multus_conf_file: "auto" multus_cni_conf_dir_host: "/etc/cni/net.d" multus_cni_bin_dir_host: "/opt/cni/bin" multus_cni_run_dir_host: "/run" -multus_cni_conf_dir: "{{ ('/host', multus_cni_conf_dir_host) | join }}" +multus_cni_conf_dir: "{{ ('/host', multus_cni_conf_dir_host) | join }}" multus_cni_bin_dir: "{{ ('/host', multus_cni_bin_dir_host) | join }}" multus_cni_run_dir: "{{ ('/host', multus_cni_run_dir_host) | join }}" multus_cni_version: "0.4.0" diff --git a/roles/network_plugin/multus/tasks/main.yml b/roles/network_plugin/multus/tasks/main.yml index ab76268a5..1428929cc 100644 --- a/roles/network_plugin/multus/tasks/main.yml +++ b/roles/network_plugin/multus/tasks/main.yml @@ -14,7 +14,7 @@ - name: Multus | Check container engine type set_fact: - container_manager_types: "{{ ansible_play_hosts_all|map('extract', hostvars, ['container_manager'])|list|unique }}" + container_manager_types: "{{ ansible_play_hosts_all | map('extract', hostvars, ['container_manager']) | list | unique }}" - name: Multus | Copy manifest templates template: @@ -28,7 +28,7 @@ register: multus_manifest_2 vars: query: "*|[?container_manager=='{{ container_manager }}']|[0].inventory_hostname" - vars_from_node: "{{ hostvars|json_query(query) }}" + vars_from_node: "{{ hostvars | json_query(query) }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" when: - item.engine in container_manager_types diff --git a/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 b/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 index 19f91bae8..10c42c175 100644 --- a/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 +++ b/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 @@ -2,7 +2,7 @@ kind: DaemonSet apiVersion: apps/v1 metadata: -{% if container_manager_types|length >= 2 %} +{% if container_manager_types | length >= 2 %} name: kube-multus-{{ container_manager }}-{{ image_arch }} {% else %} name: kube-multus-ds-{{ image_arch }} @@ -26,7 +26,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet nodeSelector: kubernetes.io/arch: {{ image_arch }} -{% if container_manager_types|length >= 2 %} +{% if container_manager_types | length >= 2 %} kubespray.io/container_manager: {{ container_manager }} {% endif %} tolerations: @@ -62,7 +62,7 @@ spec: mountPropagation: HostToContainer {% endif %} - name: cni - mountPath: {{ multus_cni_conf_dir }} + mountPath: {{ multus_cni_conf_dir }} - name: cnibin mountPath: {{ multus_cni_bin_dir }} volumes: diff --git a/roles/recover_control_plane/etcd/tasks/main.yml b/roles/recover_control_plane/etcd/tasks/main.yml index 8c6deda5c..0ebd624c8 100644 --- a/roles/recover_control_plane/etcd/tasks/main.yml +++ b/roles/recover_control_plane/etcd/tasks/main.yml @@ -75,7 +75,7 @@ - has_quorum - name: Remove broken cluster members - command: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}" + command: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ', '').split(',')[0] }}" environment: ETCDCTL_API: "3" ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" @@ -89,4 +89,4 @@ - inventory_hostname in groups['broken_etcd'] - not healthy - has_quorum - - hostvars[item[0]]['etcd_member_name'] == item[1].replace(' ','').split(',')[2] + - hostvars[item[0]]['etcd_member_name'] == item[1].replace(' ', '').split(',')[2] diff --git a/roles/recover_control_plane/post-recover/tasks/main.yml b/roles/recover_control_plane/post-recover/tasks/main.yml index b1cd5e5ef..a62f9127e 100644 --- a/roles/recover_control_plane/post-recover/tasks/main.yml +++ b/roles/recover_control_plane/post-recover/tasks/main.yml @@ -2,6 +2,7 @@ # TODO: Figure out why kubeadm does not fix this - name: Set etcd-servers fact set_fact: + # noqa: jinja[spacing] etcd_servers: >- {% for host in groups['etcd'] -%} {% if not loop.last -%} diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml index ff8a06d84..61694547f 100644 --- a/roles/remove-node/post-remove/tasks/main.yml +++ b/roles/remove-node/post-remove/tasks/main.yml @@ -1,11 +1,11 @@ --- - name: remove-node | Delete node - command: "{{ kubectl }} delete node {{ kube_override_hostname|default(inventory_hostname) }}" - delegate_to: "{{ groups['kube_control_plane']|first }}" + command: "{{ kubectl }} delete node {{ kube_override_hostname | default(inventory_hostname) }}" + delegate_to: "{{ groups['kube_control_plane'] | first }}" when: - groups['kube_control_plane'] | length > 0 # ignore servers that are not nodes - - inventory_hostname in groups['k8s_cluster'] and kube_override_hostname|default(inventory_hostname) in nodes.stdout_lines + - inventory_hostname in groups['k8s_cluster'] and kube_override_hostname | default(inventory_hostname) in nodes.stdout_lines retries: "{{ delete_node_retries }}" # Sometimes the api-server can have a short window of indisponibility when we delete a master node delay: "{{ delete_node_delay_seconds }}" diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml index 31d959c7c..d16df1a36 100644 --- a/roles/remove-node/pre-remove/tasks/main.yml +++ b/roles/remove-node/pre-remove/tasks/main.yml @@ -5,7 +5,7 @@ register: nodes when: - groups['kube_control_plane'] | length > 0 - delegate_to: "{{ groups['kube_control_plane']|first }}" + delegate_to: "{{ groups['kube_control_plane'] | first }}" changed_when: false run_once: true @@ -16,14 +16,14 @@ --ignore-daemonsets --grace-period {{ drain_grace_period }} --timeout {{ drain_timeout }} - --delete-emptydir-data {{ kube_override_hostname|default(inventory_hostname) }} + --delete-emptydir-data {{ kube_override_hostname | default(inventory_hostname) }} when: - groups['kube_control_plane'] | length > 0 # ignore servers that are not nodes - - kube_override_hostname|default(inventory_hostname) in nodes.stdout_lines + - kube_override_hostname | default(inventory_hostname) in nodes.stdout_lines register: result failed_when: result.rc != 0 and not allow_ungraceful_removal - delegate_to: "{{ groups['kube_control_plane']|first }}" + delegate_to: "{{ groups['kube_control_plane'] | first }}" until: result.rc == 0 or allow_ungraceful_removal retries: "{{ drain_retries }}" delay: "{{ drain_retry_delay_seconds }}" @@ -32,12 +32,12 @@ command: >- {{ kubectl }} get volumeattachments -o go-template={% raw %}'{{ range .items }}{{ .spec.nodeName }}{{ "\n" }}{{ end }}'{% endraw %} register: nodes_with_volumes - delegate_to: "{{ groups['kube_control_plane']|first }}" + delegate_to: "{{ groups['kube_control_plane'] | first }}" changed_when: false - until: not (kube_override_hostname|default(inventory_hostname) in nodes_with_volumes.stdout_lines) + until: not (kube_override_hostname | default(inventory_hostname) in nodes_with_volumes.stdout_lines) retries: 3 delay: "{{ drain_grace_period }}" when: - groups['kube_control_plane'] | length > 0 - not allow_ungraceful_removal - - kube_override_hostname|default(inventory_hostname) in nodes.stdout_lines + - kube_override_hostname | default(inventory_hostname) in nodes.stdout_lines diff --git a/roles/remove-node/remove-etcd-node/tasks/main.yml b/roles/remove-node/remove-etcd-node/tasks/main.yml index f7729ea79..0279018d4 100644 --- a/roles/remove-node/remove-etcd-node/tasks/main.yml +++ b/roles/remove-node/remove-etcd-node/tasks/main.yml @@ -9,7 +9,7 @@ - inventory_hostname in groups['etcd'] - ip is not defined - access_ip is not defined - delegate_to: "{{ groups['etcd']|first }}" + delegate_to: "{{ groups['etcd'] | first }}" failed_when: false - name: Set node IP @@ -37,22 +37,22 @@ - facts environment: ETCDCTL_API: "3" - ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '.pem' }}" - ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '-key.pem' }}" + ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd'] | first + '.pem' }}" + ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd'] | first + '-key.pem' }}" ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}" ETCDCTL_ENDPOINTS: "https://127.0.0.1:2379" - delegate_to: "{{ groups['etcd']|first }}" + delegate_to: "{{ groups['etcd'] | first }}" when: inventory_hostname in groups['etcd'] - name: Remove etcd member from cluster command: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}" environment: ETCDCTL_API: "3" - ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '.pem' }}" - ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '-key.pem' }}" + ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd'] | first + '.pem' }}" + ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd'] | first + '-key.pem' }}" ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}" ETCDCTL_ENDPOINTS: "https://127.0.0.1:2379" - delegate_to: "{{ groups['etcd']|first }}" + delegate_to: "{{ groups['etcd'] | first }}" when: - inventory_hostname in groups['etcd'] - etcd_member_id.stdout | length > 0 diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 2d06b5c43..534033d18 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -205,7 +205,7 @@ - nat - mangle - raw - when: flush_iptables|bool + when: flush_iptables | bool tags: - iptables @@ -219,7 +219,7 @@ - nat - mangle - raw - when: flush_iptables|bool and enable_dual_stack_networks + when: flush_iptables | bool and enable_dual_stack_networks tags: - ip6tables @@ -254,7 +254,7 @@ - name: reset | Remove nodelocaldns command: "ip link del nodelocaldns" when: - - enable_nodelocaldns|default(false)|bool + - enable_nodelocaldns | default(false) | bool - nodelocaldns_device.stat.exists - name: reset | Check whether /var/lib/kubelet directory exists @@ -279,7 +279,7 @@ state: touch attributes: "-i" mode: 0644 - loop: "{{ var_lib_kubelet_files_dirs_w_attrs.stdout_lines|select('search', 'Immutable')|list }}" + loop: "{{ var_lib_kubelet_files_dirs_w_attrs.stdout_lines | select('search', 'Immutable') | list }}" loop_control: loop_var: file_dir_line label: "{{ filedir_path }}" @@ -428,9 +428,10 @@ - name: reset | Restart network service: + # noqa: jinja[spacing] name: >- {% if ansible_os_family == "RedHat" -%} - {%- if ansible_distribution_major_version|int >= 8 or is_fedora_coreos or ansible_distribution == "Fedora" -%} + {%- if ansible_distribution_major_version | int >= 8 or is_fedora_coreos or ansible_distribution == "Fedora" -%} NetworkManager {%- else -%} network diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml index d1b1af0be..fb33dcf93 100644 --- a/roles/upgrade/post-upgrade/tasks/main.yml +++ b/roles/upgrade/post-upgrade/tasks/main.yml @@ -1,12 +1,12 @@ --- - name: wait for cilium when: - - needs_cordoning|default(false) + - needs_cordoning | default(false) - kube_network_plugin == 'cilium' command: > {{ kubectl }} wait pod -n kube-system -l k8s-app=cilium - --field-selector 'spec.nodeName=={{ kube_override_hostname|default(inventory_hostname) }}' + --field-selector 'spec.nodeName=={{ kube_override_hostname | default(inventory_hostname) }}' --for=condition=Ready --timeout={{ upgrade_post_cilium_wait_timeout }} delegate_to: "{{ groups['kube_control_plane'][0] }}" @@ -26,7 +26,7 @@ - upgrade_node_post_upgrade_pause_seconds != 0 - name: Uncordon node - command: "{{ kubectl }} uncordon {{ kube_override_hostname|default(inventory_hostname) }}" + command: "{{ kubectl }} uncordon {{ kube_override_hostname | default(inventory_hostname) }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" when: - - needs_cordoning|default(false) + - needs_cordoning | default(false) diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml index 210818b3c..a4b89f822 100644 --- a/roles/upgrade/pre-upgrade/tasks/main.yml +++ b/roles/upgrade/pre-upgrade/tasks/main.yml @@ -18,7 +18,7 @@ # Node NotReady: type = ready, status = Unknown - name: See if node is in ready state command: > - {{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }} + {{ kubectl }} get node {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end }' register: kubectl_node_ready delegate_to: "{{ groups['kube_control_plane'][0] }}" @@ -29,7 +29,7 @@ # else unschedulable key doesn't exist - name: See if node is schedulable command: > - {{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }} + {{ kubectl }} get node {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{ .spec.unschedulable }' register: kubectl_node_schedulable delegate_to: "{{ groups['kube_control_plane'][0] }}" @@ -38,6 +38,7 @@ - name: Set if node needs cordoning set_fact: + # noqa: jinja[spacing] needs_cordoning: >- {% if (kubectl_node_ready.stdout == "True" and not kubectl_node_schedulable.stdout) or upgrade_node_always_cordon -%} true @@ -48,7 +49,7 @@ - name: Node draining block: - name: Cordon node - command: "{{ kubectl }} cordon {{ kube_override_hostname|default(inventory_hostname) }}" + command: "{{ kubectl }} cordon {{ kube_override_hostname | default(inventory_hostname) }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" changed_when: true @@ -76,7 +77,7 @@ --ignore-daemonsets --grace-period {{ hostvars['localhost']['drain_grace_period_after_failure'] | default(drain_grace_period) }} --timeout {{ hostvars['localhost']['drain_timeout_after_failure'] | default(drain_timeout) }} - --delete-emptydir-data {{ kube_override_hostname|default(inventory_hostname) }} + --delete-emptydir-data {{ kube_override_hostname | default(inventory_hostname) }} {% if drain_pod_selector %}--pod-selector '{{ drain_pod_selector }}'{% endif %} when: drain_nodes register: result @@ -104,7 +105,7 @@ --ignore-daemonsets --grace-period {{ drain_fallback_grace_period }} --timeout {{ drain_fallback_timeout }} - --delete-emptydir-data {{ kube_override_hostname|default(inventory_hostname) }} + --delete-emptydir-data {{ kube_override_hostname | default(inventory_hostname) }} {% if drain_pod_selector %}--pod-selector '{{ drain_pod_selector }}'{% endif %} --disable-eviction register: drain_fallback_result @@ -119,11 +120,11 @@ rescue: - name: Set node back to schedulable - command: "{{ kubectl }} uncordon {{ kube_override_hostname|default(inventory_hostname) }}" + command: "{{ kubectl }} uncordon {{ kube_override_hostname | default(inventory_hostname) }}" when: upgrade_node_uncordon_after_drain_failure - name: Fail after rescue fail: - msg: "Failed to drain node {{ kube_override_hostname|default(inventory_hostname) }}" + msg: "Failed to drain node {{ kube_override_hostname | default(inventory_hostname) }}" when: upgrade_node_fail_if_drain_fails delegate_to: "{{ groups['kube_control_plane'][0] }}" when: diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml index feb309d38..02fbe4694 100644 --- a/scripts/collect-info.yaml +++ b/scripts/collect-info.yaml @@ -84,7 +84,7 @@ when: '{{ kube_network_plugin in ["canal", "calico"] }}' - name: helm_show_releases_history cmd: "for i in `{{ bin_dir }}/helm list -q`; do {{ bin_dir }}/helm history ${i} --col-width=0; done" - when: "{{ helm_enabled|default(true) }}" + when: "{{ helm_enabled | default(true) }}" logs: - /var/log/syslog @@ -137,7 +137,7 @@ - name: Pack results and logs community.general.archive: path: "/tmp/{{ archive_dirname }}" - dest: "{{ dir|default('.') }}/logs.tar.gz" + dest: "{{ dir | default('.') }}/logs.tar.gz" remove: true mode: 0640 delegate_to: localhost diff --git a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml index 5ac47b00c..68eb6cf81 100644 --- a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml +++ b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml @@ -12,29 +12,29 @@ dest: "{{ images_dir }}/{{ item.value.filename }}" checksum: "{{ item.value.checksum }}" mode: 0644 - loop: "{{ images|dict2items }}" + loop: "{{ images | dict2items }}" - name: Unxz compressed images command: unxz --force {{ images_dir }}/{{ item.value.filename }} - loop: "{{ images|dict2items }}" + loop: "{{ images | dict2items }}" when: - item.value.filename.endswith('.xz') - name: Convert images which is not in qcow2 format command: qemu-img convert -O qcow2 {{ images_dir }}/{{ item.value.filename.rstrip('.xz') }} {{ images_dir }}/{{ item.key }}.qcow2 - loop: "{{ images|dict2items }}" + loop: "{{ images | dict2items }}" when: - - not (item.value.converted|bool) + - not (item.value.converted | bool) - name: Make sure all images are ending with qcow2 command: cp {{ images_dir }}/{{ item.value.filename.rstrip('.xz') }} {{ images_dir }}/{{ item.key }}.qcow2 - loop: "{{ images|dict2items }}" + loop: "{{ images | dict2items }}" when: - - item.value.converted|bool + - item.value.converted | bool - name: Resize images command: qemu-img resize {{ images_dir }}/{{ item.key }}.qcow2 +8G - loop: "{{ images|dict2items }}" + loop: "{{ images | dict2items }}" # STEP 2: Include the images inside a container - name: Template default Dockerfile @@ -45,14 +45,14 @@ - name: Create docker images for each OS command: docker build -t {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }} - loop: "{{ images|dict2items }}" + loop: "{{ images | dict2items }}" - name: docker login command: docker login -u="{{ docker_user }}" -p="{{ docker_password }}" "{{ docker_host }}" - name: docker push image command: docker push {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }} - loop: "{{ images|dict2items }}" + loop: "{{ images | dict2items }}" - name: docker logout command: docker logout -u="{{ docker_user }}" "{{ docker_host }}" diff --git a/tests/cloud_playbooks/create-do.yml b/tests/cloud_playbooks/create-do.yml index b4d2125ac..d675527f6 100644 --- a/tests/cloud_playbooks/create-do.yml +++ b/tests/cloud_playbooks/create-do.yml @@ -49,7 +49,7 @@ tasks: - name: replace_test_id set_fact: - test_name: "{{ test_id |regex_replace('\\.', '-') }}" + test_name: "{{ test_id | regex_replace('\\.', '-') }}" - name: show vars debug: @@ -57,6 +57,7 @@ - name: set instance names set_fact: + # noqa: jinja[spacing] instance_names: >- {%- if mode in ['separate', 'ha'] -%} ["k8s-{{ test_name }}-1", "k8s-{{ test_name }}-2", "k8s-{{ test_name }}-3"] @@ -67,7 +68,7 @@ - name: Manage DO instances | {{ state }} community.digitalocean.digital_ocean: unique_name: yes - api_token: "{{ lookup('env','DO_API_TOKEN') }}" + api_token: "{{ lookup('env', 'DO_API_TOKEN') }}" command: "droplet" image_id: "{{ cloud_image }}" name: "{{ item }}" diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml index ccb4bce1d..c3f17f450 100644 --- a/tests/cloud_playbooks/create-gce.yml +++ b/tests/cloud_playbooks/create-gce.yml @@ -14,10 +14,11 @@ - name: replace_test_id set_fact: - test_name: "{{ test_id |regex_replace('\\.', '-') }}" + test_name: "{{ test_id | regex_replace('\\.', '-') }}" - name: set instance names set_fact: + # noqa: jinja[spacing] instance_names: >- {%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale'] -%} k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3 @@ -39,7 +40,7 @@ credentials_file: "{{ gce_credentials_file | default(omit) }}" project_id: "{{ gce_project_id }}" zone: "{{ cloud_region }}" - metadata: '{"test_id": "{{ test_id }}", "network": "{{ kube_network_plugin }}", "startup-script": "{{ startup_script|default("") }}"}' + metadata: '{"test_id": "{{ test_id }}", "network": "{{ kube_network_plugin }}", "startup-script": "{{ startup_script | default("") }}"}' tags: "build-{{ test_name }},{{ kube_network_plugin }}" ip_forward: yes service_account_permissions: ['compute-rw'] @@ -59,7 +60,7 @@ - name: Make group_vars directory file: - path: "{{ inventory_path|dirname }}/group_vars" + path: "{{ inventory_path | dirname }}/group_vars" state: directory mode: 0755 when: mode in ['scale', 'separate-scale', 'ha-scale'] @@ -67,13 +68,13 @@ - name: Template fake hosts group vars # noqa no-relative-paths - CI templates are not in role_path template: src: ../templates/fake_hosts.yml.j2 - dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml" + dest: "{{ inventory_path | dirname }}/group_vars/fake_hosts.yml" mode: 0644 when: mode in ['scale', 'separate-scale', 'ha-scale'] - name: Delete group_vars directory file: - path: "{{ inventory_path|dirname }}/group_vars" + path: "{{ inventory_path | dirname }}/group_vars" state: absent recurse: yes when: delete_group_vars diff --git a/tests/cloud_playbooks/delete-gce.yml b/tests/cloud_playbooks/delete-gce.yml index 4d118711b..f8c5d6e94 100644 --- a/tests/cloud_playbooks/delete-gce.yml +++ b/tests/cloud_playbooks/delete-gce.yml @@ -8,10 +8,11 @@ tasks: - name: replace_test_id set_fact: - test_name: "{{ test_id |regex_replace('\\.', '-') }}" + test_name: "{{ test_id | regex_replace('\\.', '-') }}" - name: set instance names set_fact: + # noqa: jinja[spacing] instance_names: >- {%- if mode in ['separate', 'ha'] -%} k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3 diff --git a/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml b/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml index 8ccf5adc5..688b580cd 100644 --- a/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml +++ b/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml @@ -17,7 +17,7 @@ vms_files: "{{ vms_files + [lookup('ansible.builtin.template', 'vm.yml.j2') | from_yaml] }}" vars: vms_files: [] - loop: "{{ range(1, vm_count|int + 1, 1) | list }}" + loop: "{{ range(1, vm_count | int + 1, 1) | list }}" loop_control: index_var: vm_id @@ -33,7 +33,7 @@ executable: /bin/bash changed_when: false register: vm_ips - loop: "{{ range(1, vm_count|int + 1, 1) | list }}" + loop: "{{ range(1, vm_count | int + 1, 1) | list }}" loop_control: index_var: vm_id retries: 20 diff --git a/tests/cloud_playbooks/roles/packet-ci/tasks/main.yml b/tests/cloud_playbooks/roles/packet-ci/tasks/main.yml index 9d8e105db..37e61cd62 100644 --- a/tests/cloud_playbooks/roles/packet-ci/tasks/main.yml +++ b/tests/cloud_playbooks/roles/packet-ci/tasks/main.yml @@ -5,7 +5,7 @@ - name: Set VM count needed for CI test_id set_fact: - vm_count: "{%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale', 'ha-recover', 'ha-recover-noquorum'] -%}{{ 3|int }}{%- elif mode == 'aio' -%}{{ 1|int }}{%- else -%}{{ 2|int }}{%- endif -%}" + vm_count: "{%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale', 'ha-recover', 'ha-recover-noquorum'] -%}{{ 3 | int }}{%- elif mode == 'aio' -%}{{ 1 | int }}{%- else -%}{{ 2 | int }}{%- endif -%}" - import_tasks: cleanup-old-vms.yml diff --git a/tests/cloud_playbooks/upload-logs-gcs.yml b/tests/cloud_playbooks/upload-logs-gcs.yml index bf0ffd9e9..73ae6c5f5 100644 --- a/tests/cloud_playbooks/upload-logs-gcs.yml +++ b/tests/cloud_playbooks/upload-logs-gcs.yml @@ -73,7 +73,7 @@ headers: '{"Content-Encoding": "x-gzip"}' gs_access_key: "{{ gs_key }}" gs_secret_key: "{{ gs_skey }}" - expiration: "{{ expire_days * 36000|int }}" + expiration: "{{ expire_days * 36000 | int }}" failed_when: false no_log: True diff --git a/tests/common/_docker_hub_registry_mirror.yml b/tests/common/_docker_hub_registry_mirror.yml index 87570f71c..e6298b70e 100644 --- a/tests/common/_docker_hub_registry_mirror.yml +++ b/tests/common/_docker_hub_registry_mirror.yml @@ -27,8 +27,8 @@ netcheck_server_image_repo: "{{ quay_image_repo }}/kubespray/k8s-netchecker-serv nginx_image_repo: "{{ quay_image_repo }}/kubespray/nginx" -flannel_image_repo: "{{ quay_image_repo}}/kubespray/flannel" -flannel_init_image_repo: "{{ quay_image_repo}}/kubespray/flannel-cni-plugin" +flannel_image_repo: "{{ quay_image_repo }}/kubespray/flannel" +flannel_init_image_repo: "{{ quay_image_repo }}/kubespray/flannel-cni-plugin" # Kubespray settings for tests deploy_netchecker: true diff --git a/tests/templates/fake_hosts.yml.j2 b/tests/templates/fake_hosts.yml.j2 index 673109213..c172b78b0 100644 --- a/tests/templates/fake_hosts.yml.j2 +++ b/tests/templates/fake_hosts.yml.j2 @@ -1,3 +1,3 @@ ansible_default_ipv4: address: 255.255.255.255 -ansible_hostname: "{{ '{{' }}inventory_hostname}}" +ansible_hostname: "{{ '{{' }}inventory_hostname }}" diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml index 78a1021d6..b5f1c2b6e 100644 --- a/tests/testcases/030_check-network.yml +++ b/tests/testcases/030_check-network.yml @@ -168,8 +168,8 @@ - name: Set networking facts set_fact: kube_pods_subnet: 10.233.64.0/18 - pod_names: "{{ (pods.stdout | from_json)['items'] | map(attribute = 'metadata.name') | list }}" - pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute = 'status.podIP') | list }}" + pod_names: "{{ (pods.stdout | from_json)['items'] | map(attribute='metadata.name') | list }}" + pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute='status.podIP') | list }}" pods_hostnet: | {% set list = hostnet_pods.stdout.split(" ") %} {{ list }} diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml index 50a8136d2..0542e1245 100644 --- a/tests/testcases/040_check-network-adv.yml +++ b/tests/testcases/040_check-network-adv.yml @@ -7,7 +7,7 @@ executable: /bin/bash when: - (calico_ipip_mode is defined and calico_ipip_mode != 'Never' or cloud_provider is defined) - - kube_network_plugin|default('calico') == 'calico' + - kube_network_plugin | default('calico') == 'calico' - hosts: k8s_cluster vars: @@ -44,7 +44,7 @@ args: executable: /bin/bash register: nca_pod - until: nca_pod.stdout_lines|length >= groups['k8s_cluster']|intersect(ansible_play_hosts)|length * 2 + until: nca_pod.stdout_lines | length >= groups['k8s_cluster'] | intersect(ansible_play_hosts) | length * 2 retries: 3 delay: 10 failed_when: false @@ -73,9 +73,9 @@ register: agents retries: 18 delay: "{{ agent_report_interval }}" - until: agents.content|length > 0 and + until: agents.content | length > 0 and agents.content[0] == '{' and - agents.content|from_json|length >= groups['k8s_cluster']|intersect(ansible_play_hosts)|length * 2 + agents.content | from_json | length >= groups['k8s_cluster'] | intersect(ansible_play_hosts) | length * 2 failed_when: false no_log: false @@ -89,7 +89,7 @@ register: connectivity_check retries: 3 delay: "{{ agent_report_interval }}" - until: connectivity_check.content|length > 0 and + until: connectivity_check.content | length > 0 and connectivity_check.content[0] == '{' no_log: false failed_when: false @@ -200,7 +200,7 @@ executable: /bin/bash when: - inventory_hostname == groups['kube_control_plane'][0] - - kube_network_plugin_multus|default(false)|bool + - kube_network_plugin_multus | default(false) | bool - name: Annotate pod with macvlan network # We cannot use only shell: below because Ansible will render the text @@ -226,7 +226,7 @@ executable: /bin/bash when: - inventory_hostname == groups['kube_control_plane'][0] - - kube_network_plugin_multus|default(false)|bool + - kube_network_plugin_multus | default(false) | bool - name: Check secondary macvlan interface command: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1" @@ -236,4 +236,4 @@ changed_when: false when: - inventory_hostname == groups['kube_control_plane'][0] - - kube_network_plugin_multus|default(false)|bool + - kube_network_plugin_multus | default(false) | bool