diff --git a/.ansible-lint b/.ansible-lint index c44f782b6..ede661355 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -9,6 +9,5 @@ skip_list: - '305' - '306' - '404' - - '502' - '503' - '701' diff --git a/contrib/azurerm/roles/generate-inventory/tasks/main.yml b/contrib/azurerm/roles/generate-inventory/tasks/main.yml index 409555fd0..20a06e10c 100644 --- a/contrib/azurerm/roles/generate-inventory/tasks/main.yml +++ b/contrib/azurerm/roles/generate-inventory/tasks/main.yml @@ -4,7 +4,8 @@ command: azure vm list-ip-address --json {{ azure_resource_group }} register: vm_list_cmd -- set_fact: +- name: Set vm_list + set_fact: vm_list: "{{ vm_list_cmd.stdout }}" - name: Generate inventory diff --git a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml index 1772b1c29..f639e64c7 100644 --- a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml +++ b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml @@ -8,7 +8,8 @@ command: az vm list -o json --resource-group {{ azure_resource_group }} register: vm_list_cmd -- set_fact: +- name: Set VM IP and roles lists + set_fact: vm_ip_list: "{{ vm_ip_list_cmd.stdout }}" vm_roles_list: "{{ vm_list_cmd.stdout }}" diff --git a/contrib/azurerm/roles/generate-templates/tasks/main.yml b/contrib/azurerm/roles/generate-templates/tasks/main.yml index 92a0e87c9..489250a98 100644 --- a/contrib/azurerm/roles/generate-templates/tasks/main.yml +++ b/contrib/azurerm/roles/generate-templates/tasks/main.yml @@ -1,13 +1,16 @@ --- -- set_fact: +- name: Set base_dir + set_fact: base_dir: "{{ playbook_dir }}/.generated/" -- file: +- name: Create base_dir + file: path: "{{ base_dir }}" state: directory recurse: true -- template: +- name: Store json files in base_dir + template: src: "{{ item }}" dest: "{{ base_dir }}/{{ item }}" with_items: diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml index 0368f4e7b..f0111cec0 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml @@ -4,6 +4,7 @@ register: "initial_heketi_state" changed_when: false command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json" + - name: "Bootstrap heketi." when: - "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0" @@ -16,15 +17,20 @@ register: "initial_heketi_pod" command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json" changed_when: false + - name: "Ensure heketi bootstrap pod is up." assert: that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1" -- set_fact: + +- name: Store the initial heketi pod name + set_fact: initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}" + - name: "Test heketi topology." changed_when: false register: "heketi_topology" command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json" + - name: "Load heketi topology." when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0" include_tasks: "bootstrap/topology.yml" @@ -42,6 +48,7 @@ command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json" changed_when: false register: "heketi_storage_state" + # ensure endpoints actually exist before trying to move database data to it - name: "Create heketi storage." include_tasks: "bootstrap/storage.yml" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml index 61729a5e2..ae598c3df 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml @@ -1,11 +1,19 @@ --- -- register: "label_present" +- name: Get storage nodes + register: "label_present" command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true" changed_when: false + - name: "Assign storage label" when: "label_present.stdout_lines|length == 0" command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs" -- register: "label_present" + +- name: Get storage nodes again + register: "label_present" command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true" changed_when: false -- assert: { that: "label_present|length > 0", msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs." } + +- name: Ensure the label has been set + assert: + that: "label_present|length > 0" + msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs." diff --git a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml index d322f6ff8..7b6d37d24 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml @@ -1,19 +1,24 @@ --- - name: "Kubernetes Apps | Lay Down Heketi" become: true - template: { src: "heketi-deployment.json.j2", dest: "{{ kube_config_dir }}/heketi-deployment.json" } + template: + src: "heketi-deployment.json.j2" + dest: "{{ kube_config_dir }}/heketi-deployment.json" register: "rendering" + - name: "Kubernetes Apps | Install and configure Heketi" kube: name: "GlusterFS" kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/heketi-deployment.json" state: "{{ rendering.changed | ternary('latest', 'present') }}" + - name: "Ensure heketi is up and running." changed_when: false register: "heketi_state" vars: - heketi_state: { stdout: "{}" } + heketi_state: + stdout: "{}" pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]" deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]" command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json" @@ -22,5 +27,7 @@ - "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'" retries: 60 delay: 5 -- set_fact: + +- name: Set the Heketi pod name + set_fact: heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/secret.yml b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml index 96f243048..3615f7c6d 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/secret.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml @@ -1,31 +1,44 @@ --- -- register: "clusterrolebinding_state" +- name: Get clusterrolebindings + register: "clusterrolebinding_state" command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true" changed_when: false + - name: "Kubernetes Apps | Deploy cluster role binding." when: "clusterrolebinding_state.stdout == \"\"" command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account" -- register: "clusterrolebinding_state" + +- name: Get clusterrolebindings again + register: "clusterrolebinding_state" command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true" changed_when: false -- assert: + +- name: Make sure that clusterrolebindings are present now + assert: that: "clusterrolebinding_state.stdout != \"\"" msg: "Cluster role binding is not present." -- register: "secret_state" +- name: Get the heketi-config-secret secret + register: "secret_state" command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true" changed_when: false + - name: "Render Heketi secret configuration." become: true template: src: "heketi.json.j2" dest: "{{ kube_config_dir }}/heketi.json" + - name: "Deploy Heketi config secret" when: "secret_state.stdout == \"\"" command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json" -- register: "secret_state" + +- name: Get the heketi-config-secret secret again + register: "secret_state" command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true" changed_when: false -- assert: + +- name: Make sure the heketi-config-secret secret exists now + assert: that: "secret_state.stdout != \"\"" msg: "Heketi config secret is not present." diff --git a/roles/bastion-ssh-config/tasks/main.yml b/roles/bastion-ssh-config/tasks/main.yml index 71c96db21..7ea39bbd8 100644 --- a/roles/bastion-ssh-config/tasks/main.yml +++ b/roles/bastion-ssh-config/tasks/main.yml @@ -1,11 +1,13 @@ --- -- set_fact: +- name: set bastion host IP + set_fact: bastion_ip: "{{ hostvars[groups['bastion'][0]]['ansible_host'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_host']) }}" delegate_to: localhost # As we are actually running on localhost, the ansible_ssh_user is your local user when you try to use it directly # To figure out the real ssh user, we delegate this task to the bastion and store the ansible_user in real_user -- set_fact: +- name: Store the current ansible_user in the real_user fact + set_fact: real_user: "{{ ansible_user }}" - name: create ssh bastion conf diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml index 0b979cc7d..2530a29ef 100644 --- a/roles/container-engine/docker/tasks/main.yml +++ b/roles/container-engine/docker/tasks/main.yml @@ -4,7 +4,8 @@ path: /run/ostree-booted register: ostree -- set_fact: +- name: set is_atomic + set_fact: is_atomic: "{{ ostree.stat.exists }}" - name: gather os specific variables diff --git a/roles/download/tasks/kubeadm_images.yml b/roles/download/tasks/kubeadm_images.yml index 8257bccc1..079dd7509 100644 --- a/roles/download/tasks/kubeadm_images.yml +++ b/roles/download/tasks/kubeadm_images.yml @@ -49,7 +49,8 @@ when: download_run_once changed_when: false -- vars: +- name: container_download | extract container names from list of kubeadm config images + vars: kubeadm_images_list: "{{ result.stdout_lines }}" set_fact: kubeadm_image: @@ -66,7 +67,8 @@ when: download_run_once register: result_images -- set_fact: +- name: container_download | set kubeadm_images + set_fact: kubeadm_images: "{{ result_images.results | map(attribute='ansible_facts.kubeadm_image') | list | items2dict }}" run_once: true when: download_run_once diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index f9764ae6a..174085f2d 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -9,7 +9,8 @@ - not skip_downloads|default(false) - inventory_hostname in groups['kube-master'] -- set_fact: +- name: Set kubeadm_images + set_fact: kubeadm_images: {} when: - kubeadm_images is not defined diff --git a/roles/download/tasks/set_docker_image_facts.yml b/roles/download/tasks/set_docker_image_facts.yml index 72d898df1..3695a3868 100644 --- a/roles/download/tasks/set_docker_image_facts.yml +++ b/roles/download/tasks/set_docker_image_facts.yml @@ -1,9 +1,11 @@ --- -- set_fact: +- name: Set if containers should be pulled by digest + set_fact: pull_by_digest: >- {%- if download.sha256 is defined and download.sha256 -%}true{%- else -%}false{%- endif -%} -- set_fact: +- name: Set pull_args + set_fact: pull_args: >- {%- if pull_by_digest %}{{ download.repo }}@sha256:{{ download.sha256 }}{%- else -%}{{ download.repo }}:{{ download.tag }}{%- endif -%} @@ -19,7 +21,8 @@ - not download_always_pull - group_names | intersect(download.groups) | length -- set_fact: +- name: Set if pull is required per container + set_fact: pull_required: >- {%- if pull_args in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%} when: diff --git a/roles/download/tasks/sync_container.yml b/roles/download/tasks/sync_container.yml index fd46766ee..ac0cf9dd0 100644 --- a/roles/download/tasks/sync_container.yml +++ b/roles/download/tasks/sync_container.yml @@ -7,14 +7,14 @@ tags: - facts -- set_fact: +- name: container_download | Set file name of container tarballs + set_fact: fname: "{{ local_release_dir }}/containers/{{ download.repo|regex_replace('/|\0|:', '_') }}:{{ download.tag|default(download.sha256)|regex_replace('/|\0|:', '_') }}.tar" run_once: true when: - download.enabled - download.container - download_run_once - tags: - facts diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml index 66b2030a5..d882b0f94 100644 --- a/roles/etcd/tasks/gen_certs_script.yml +++ b/roles/etcd/tasks/gen_certs_script.yml @@ -109,7 +109,8 @@ loop_control: label: "{{ item.item }}" -- set_fact: +- name: Gen_certs | Set cert names per node + set_fact: my_etcd_node_certs: ['ca.pem', 'node-{{ inventory_hostname }}.pem', 'node-{{ inventory_hostname }}-key.pem'] diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index c729b880d..30112176e 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -1,5 +1,6 @@ --- -- set_fact: +- name: set architecture_groups + set_fact: architecture_groups: x86_64: amd64 aarch64: arm64 diff --git a/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml b/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml index 053fbc0db..f6500f7c1 100644 --- a/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml +++ b/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml @@ -37,7 +37,8 @@ delegate_to: "{{ groups['kube-master'][0] }}" command: "{{ helm_script_dir }}/helm-make-ssl.sh -e {{ helm_home_dir }} -d {{ helm_tiller_cert_dir }}" -- set_fact: +- name: Check_helm_client_certs | Set helm_client_certs + set_fact: helm_client_certs: ['ca.pem', 'cert.pem', 'key.pem'] - name: "Check_helm_client_certs | check if a cert already exists on master node" diff --git a/roles/kubernetes-apps/network_plugin/contiv/tasks/configure.yml b/roles/kubernetes-apps/network_plugin/contiv/tasks/configure.yml index a080aa4f0..9611d1a47 100644 --- a/roles/kubernetes-apps/network_plugin/contiv/tasks/configure.yml +++ b/roles/kubernetes-apps/network_plugin/contiv/tasks/configure.yml @@ -16,7 +16,8 @@ run_once: true changed_when: false -- set_fact: +- name: Contiv | Set contiv_global_config + set_fact: contiv_global_config: "{{ (global_config.stdout|from_json)[0] }}" - name: Contiv | Set global forwarding mode diff --git a/roles/kubernetes/node/tasks/facts.yml b/roles/kubernetes/node/tasks/facts.yml index 98a6ba73f..6f8539c0e 100644 --- a/roles/kubernetes/node/tasks/facts.yml +++ b/roles/kubernetes/node/tasks/facts.yml @@ -4,7 +4,8 @@ register: docker_cgroup_driver_result changed_when: false -- set_fact: +- name: set facts + set_fact: standalone_kubelet: >- {%- if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] -%}true{%- else -%}false{%- endif -%} kubelet_cgroup_driver_detected: "{{ docker_cgroup_driver_result.stdout }}" diff --git a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml index c06207fd0..7fa7507ae 100644 --- a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml +++ b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml @@ -1,5 +1,6 @@ --- -- set_fact: +- name: set architecture_groups + set_fact: architecture_groups: x86_64: amd64 aarch64: arm64 @@ -25,10 +26,12 @@ path: /run/ostree-booted register: ostree -- set_fact: +- name: set is_atomic + set_fact: is_atomic: "{{ ostree.stat.exists }}" -- set_fact: +- name: set kube_cert_group on atomic hosts + set_fact: kube_cert_group: "kube" when: is_atomic @@ -39,11 +42,10 @@ changed_when: false check_mode: no -- set_fact: +- name: set dns facts + set_fact: resolvconf: >- {%- if resolvconf.rc == 0 -%}true{%- else -%}false{%- endif -%} - -- set_fact: bogus_domains: |- {% for d in [ 'default.svc.' + dns_domain, 'svc.' + dns_domain ] + searchdomains|default([]) -%} {{ dns_domain }}.{{ d }}./{{ d }}.{{ d }}./com.{{ d }}./ diff --git a/roles/network_plugin/contiv/tasks/main.yml b/roles/network_plugin/contiv/tasks/main.yml index d626cbd68..dafe2d7ae 100644 --- a/roles/network_plugin/contiv/tasks/main.yml +++ b/roles/network_plugin/contiv/tasks/main.yml @@ -56,7 +56,8 @@ - {name: contiv-netplugin, file: contiv-netplugin.yml, type: daemonset} when: inventory_hostname in groups['kube-master'] -- set_fact: +- name: Contiv | Add another manifest if contiv_enable_api_proxy is true + set_fact: contiv_manifests: |- {% set _ = contiv_manifests.append({"name": "contiv-api-proxy", "file": "contiv-api-proxy.yml", "type": "daemonset"}) %} {{ contiv_manifests }} diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml index 242834088..a8b149394 100644 --- a/roles/upgrade/pre-upgrade/tasks/main.yml +++ b/roles/upgrade/pre-upgrade/tasks/main.yml @@ -21,7 +21,8 @@ failed_when: false changed_when: false -- set_fact: +- name: Set if node needs cordoning + set_fact: needs_cordoning: >- {% if kubectl_node_ready.stdout == "True" and not kubectl_node_schedulable.stdout -%} true diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml index 9ba68c0e1..15f1c627f 100644 --- a/scripts/collect-info.yaml +++ b/scripts/collect-info.yaml @@ -101,7 +101,8 @@ ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" tasks: - - set_fact: + - name: set etcd_access_addresses + set_fact: etcd_access_addresses: |- {% for item in groups['etcd'] -%} https://{{ item }}:2379{% if not loop.last %},{% endif %} diff --git a/tests/cloud_playbooks/create-do.yml b/tests/cloud_playbooks/create-do.yml index 55932a408..37fbafbd6 100644 --- a/tests/cloud_playbooks/create-do.yml +++ b/tests/cloud_playbooks/create-do.yml @@ -57,7 +57,8 @@ - name: show vars debug: msg="{{ cloud_region }}, {{ cloud_image }}" - - set_fact: + - name: set instance names + set_fact: instance_names: >- {%- if mode in ['separate', 'ha'] -%} ["k8s-{{ test_name }}-1", "k8s-{{ test_name }}-2", "k8s-{{ test_name }}-3"] diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml index 61b3d852f..266481079 100644 --- a/tests/cloud_playbooks/create-gce.yml +++ b/tests/cloud_playbooks/create-gce.yml @@ -16,7 +16,8 @@ set_fact: test_name: "{{ test_id |regex_replace('\\.', '-') }}" - - set_fact: + - name: set instance names + set_fact: instance_names: >- {%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale'] -%} k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3 diff --git a/tests/cloud_playbooks/delete-gce.yml b/tests/cloud_playbooks/delete-gce.yml index a5b4a6e4d..ba50f92e6 100644 --- a/tests/cloud_playbooks/delete-gce.yml +++ b/tests/cloud_playbooks/delete-gce.yml @@ -10,7 +10,8 @@ set_fact: test_name: "{{ test_id |regex_replace('\\.', '-') }}" - - set_fact: + - name: set instance names + set_fact: instance_names: >- {%- if mode in ['separate', 'ha'] -%} k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3 diff --git a/tests/cloud_playbooks/upload-logs-gcs.yml b/tests/cloud_playbooks/upload-logs-gcs.yml index 39cec6f6a..f1e3cbaca 100644 --- a/tests/cloud_playbooks/upload-logs-gcs.yml +++ b/tests/cloud_playbooks/upload-logs-gcs.yml @@ -15,7 +15,8 @@ set_fact: test_name: "kargo-ci-{{ out.stdout_lines[0] }}" - - set_fact: + - name: Set file_name for logs + set_fact: file_name: "{{ ostype }}-{{ kube_network_plugin }}-{{ commit }}-logs.tar.gz" - name: Create a bucket diff --git a/tests/testcases/015_check-pods-running.yml b/tests/testcases/015_check-pods-running.yml index 28c5d8016..c24e00aca 100644 --- a/tests/testcases/015_check-pods-running.yml +++ b/tests/testcases/015_check-pods-running.yml @@ -7,7 +7,8 @@ bin_dir: "/opt/bin" when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] - - set_fact: + - name: Force binaries directory for other hosts + set_fact: bin_dir: "/usr/local/bin" when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] @@ -16,7 +17,8 @@ register: get_pods no_log: true - - debug: msg="{{ get_pods.stdout.split('\n') }}" + - debug: + msg: "{{ get_pods.stdout.split('\n') }}" - name: Check that all pods are running and ready shell: "{{ bin_dir }}/kubectl get pods --all-namespaces --no-headers -o yaml" @@ -36,5 +38,6 @@ register: get_pods no_log: true - - debug: msg="{{ get_pods.stdout.split('\n') }}" + - debug: + msg: "{{ get_pods.stdout.split('\n') }}" failed_when: not run_pods_log is success diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml index c9d0f8c43..6a1fa5c52 100644 --- a/tests/testcases/030_check-network.yml +++ b/tests/testcases/030_check-network.yml @@ -10,7 +10,8 @@ bin_dir: "/opt/bin" when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] - - set_fact: + - name: Force binaries directory for other hosts + set_fact: bin_dir: "/usr/local/bin" when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] @@ -38,7 +39,8 @@ register: pods no_log: true - - debug: msg="{{ pods.stdout.split('\n') }}" + - debug: + msg: "{{ pods.stdout.split('\n') }}" failed_when: not run_pods_log is success - name: Get hostnet pods @@ -58,9 +60,11 @@ register: get_pods no_log: true - - debug: msg="{{ get_pods.stdout.split('\n') }}" + - debug: + msg: "{{ get_pods.stdout.split('\n') }}" - - set_fact: + - name: Set networking facts + set_fact: kube_pods_subnet: 10.233.64.0/18 pod_names: "{{ (pods.stdout | from_json)['items'] | map(attribute = 'metadata.name') | list }}" pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute = 'status.podIP') | list }}" @@ -74,19 +78,25 @@ - name: Check pods IP are in correct network assert: that: item | ipaddr(kube_pods_subnet) - when: not item in pods_hostnet and item in pods_running + when: + - not item in pods_hostnet + - item in pods_running with_items: "{{ pod_ips }}" - name: Ping between pods is working shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}" - when: not item[0] in pods_hostnet and not item[1] in pods_hostnet + when: + - not item[0] in pods_hostnet + - not item[1] in pods_hostnet with_nested: - "{{ pod_names }}" - "{{ pod_ips }}" - name: Ping between hostnet pods is working shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}" - when: item[0] in pods_hostnet and item[1] in pods_hostnet + when: + - item[0] in pods_hostnet + - item[1] in pods_hostnet with_nested: - "{{ pod_names }}" - "{{ pod_ips }}" diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml index c1264f842..fe4e552c7 100644 --- a/tests/testcases/040_check-network-adv.yml +++ b/tests/testcases/040_check-network-adv.yml @@ -19,7 +19,8 @@ bin_dir: "/opt/bin" when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] - - set_fact: + - name: Force binaries directory on other hosts + set_fact: bin_dir: "/usr/local/bin" when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] @@ -42,7 +43,8 @@ delay: 10 failed_when: false - - command: "{{ bin_dir }}/kubectl -n {{ netcheck_namespace }} describe pod -l app={{ item }}" + - name: Get netchecker pods + command: "{{ bin_dir }}/kubectl -n {{ netcheck_namespace }} describe pod -l app={{ item }}" run_once: true delegate_to: "{{ groups['kube-master'][0] }}" no_log: false @@ -51,12 +53,15 @@ - netchecker-agent-hostnet when: not nca_pod is success - - debug: var=nca_pod.stdout_lines + - debug: + var: nca_pod.stdout_lines failed_when: not nca_pod is success run_once: true - name: Get netchecker agents - uri: url=http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/agents/ return_content=yes + uri: + url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/agents/" + return_content: yes run_once: true delegate_to: "{{ groups['kube-master'][0] }}" register: agents @@ -68,7 +73,8 @@ failed_when: false no_log: true - - debug: var=agents.content|from_json + - debug: + var: agents.content | from_json failed_when: not agents is success and not agents.content=='{}' run_once: true when: @@ -77,7 +83,10 @@ - agents.content[0] == '{' - name: Check netchecker status - uri: url=http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/connectivity_check status_code=200 return_content=yes + uri: + url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/connectivity_check" + status_code: 200 + return_content: yes delegate_to: "{{ groups['kube-master'][0] }}" run_once: true register: result @@ -90,17 +99,20 @@ when: - agents.content != '{}' - - debug: var=ncs_pod + - debug: + var: ncs_pod run_once: true when: not result is success - - command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app=kube-proxy" + - name: Get kube-proxy logs + command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app=kube-proxy" run_once: true when: not result is success delegate_to: "{{ groups['kube-master'][0] }}" no_log: false - - command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{ item }} --all-containers" + - name: Get logs from other apps + command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{ item }} --all-containers" run_once: true when: not result is success delegate_to: "{{ groups['kube-master'][0] }}" @@ -115,7 +127,8 @@ - calico-node - cilium - - debug: var=result.content|from_json + - debug: + var: result.content | from_json failed_when: not result is success run_once: true when: @@ -123,13 +136,15 @@ - result.content - result.content[0] == '{' - - debug: var=result + - debug: + var: result failed_when: not result is success run_once: true when: - not agents.content == '{}' - - debug: msg="Cannot get reports from agents, consider as PASSING" + - debug: + msg: "Cannot get reports from agents, consider as PASSING" run_once: true when: - agents.content == '{}'