From 8d7b25d4f0e2d0fe9d3bf51b86367ad881e39767 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 28 Nov 2016 13:18:06 +0100 Subject: [PATCH] Enable netchecker for CI * Enable netchecker app for CI postinstall tests * Rework outputs and better coverage to the ping between pods post intall test case. With netchecker deployed, the test covers hostnet to hostnet and standard to standrad pods ping check as well. Signed-off-by: Bogdan Dobrelya --- .travis.yml | 9 +-- tests/scripts/ansibl8s_test.sh | 12 ++-- tests/testcases/030_check-network.yml | 34 ++++++++++- tests/testcases/040_check-network-adv.yml | 73 +++++++++++++++++++++++ 4 files changed, 115 insertions(+), 13 deletions(-) create mode 100644 tests/testcases/040_check-network-adv.yml diff --git a/.travis.yml b/.travis.yml index 058018ab5..daec83cb2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -127,7 +127,7 @@ script: -e inventory_path=${PWD}/inventory/inventory.ini -e cloud_region=${CLOUD_REGION} - # Create cluster + # Create cluster with netchecker app deployed - > $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} @@ -136,15 +136,16 @@ script: -e download_run_once=true -e download_localhost=true -e local_release_dir=/var/tmp/releases + -e deploy_netchecker=true cluster.yml # Tests Cases ## Test Master API - - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} tests/testcases/010_check-apiserver.yml $LOG_LEVEL - ## Create a POD - - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/020_check-create-pod.yml $LOG_LEVEL + - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL ## Ping the between 2 pod - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL + ## Advanced DNS checks + - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL after_script: - > diff --git a/tests/scripts/ansibl8s_test.sh b/tests/scripts/ansibl8s_test.sh index 1f0c889df..1f61f456f 100644 --- a/tests/scripts/ansibl8s_test.sh +++ b/tests/scripts/ansibl8s_test.sh @@ -39,14 +39,14 @@ should_api_server_respond() { assertion__status_code_is_success $? } -should_create_pod() { - ansible-playbook -i inventory.ini -s ${private_key} testcases/020_check-create-pod.yml -vv - - assertion__status_code_is_success $? -} - should_pod_be_in_expected_subnet() { ansible-playbook -i inventory.ini -s ${private_key} testcases/030_check-network.yml -vv assertion__status_code_is_success $? } + +should_resolve_cluster_dns() { + ansible-playbook -i inventory.ini -s ${private_key} testcases/040_check-network-adv.yml -vv + + assertion__status_code_is_success $? +} diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml index b47f30942..abe2f7a81 100644 --- a/tests/testcases/030_check-network.yml +++ b/tests/testcases/030_check-network.yml @@ -15,16 +15,44 @@ - name: Get pod names shell: "{{bin_dir}}/kubectl get pods -o json" register: pods + no_log: true + + - name: Get hostnet pods + command: "{{bin_dir}}/kubectl get pods -o + jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {end}'" + register: hostnet_pods + + - name: Get running pods + command: "{{bin_dir}}/kubectl get pods -o + jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {end}'" + register: running_pods - set_fact: pod_names: "{{ (pods.stdout | from_json)['items'] | map(attribute = 'metadata.name') | list }}" - pod_ips: "{{ (pods.stdout | from_json)['items'] | map(attribute = 'status.podIP') | list }}" + pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute = 'status.podIP') | list }}" + pods_hostnet: | + {% set list = hostnet_pods.stdout.split(" ") %} + {{list}} + pods_running: | + {% set list = running_pods.stdout.split(" ") %} + {{list}} - name: Check pods IP are in correct network assert: that: item | ipaddr(kube_pods_subnet) + when: not item in pods_hostnet and item in pods_running with_items: "{{pod_ips}}" - - name: Ping between pods is working - shell: "{{bin_dir}}/kubectl exec {{pod_names[0]}} -- ping -c 4 {{ pod_ips[1] }}" + shell: "{{bin_dir}}/kubectl exec {{item[0]}} -- ping -c 4 {{ item[1] }}" + when: not item[0] in pods_hostnet and not item[1] in pods_hostnet + with_nested: + - "{{pod_names}}" + - "{{pod_ips}}" + + - name: Ping between hostnet pods is working + shell: "{{bin_dir}}/kubectl exec {{item[0]}} -- ping -c 4 {{ item[1] }}" + when: item[0] in pods_hostnet and item[1] in pods_hostnet + with_nested: + - "{{pod_names}}" + - "{{pod_ips}}" diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml new file mode 100644 index 000000000..2de604f7f --- /dev/null +++ b/tests/testcases/040_check-network-adv.yml @@ -0,0 +1,73 @@ +--- +- hosts: kube-node + tasks: + - name: Test tunl0 routes + shell: "! /sbin/ip ro | grep '/26 via' | grep -v tunl0" + when: (ipip|default(false) or cloud_provider is defined) and (kube_network_plugin == 'calico') + +- hosts: k8s-cluster + vars: + agent_report_interval: 10 + netcheck_namespace: default + netchecker_port: 31081 + + tasks: + - name: Force binaries directory for CoreOS + set_fact: + bin_dir: "/opt/bin" + when: ansible_os_family == "CoreOS" + + - set_fact: + bin_dir: "/usr/local/bin" + when: ansible_os_family != "CoreOS" + + - name: Wait for netchecker server + shell: "{{ bin_dir }}/kubectl get pods --namespace {{netcheck_namespace}} | grep ^netchecker-server" + delegate_to: "{{groups['kube-master'][0]}}" + run_once: true + register: ncs_pod + until: ncs_pod.stdout.find('Running') != -1 + retries: 3 + delay: 10 + + - name: Wait for netchecker agents + shell: "{{ bin_dir }}/kubectl get pods --namespace {{netcheck_namespace}} | grep '^netchecker-agent-.*Running'" + run_once: true + delegate_to: "{{groups['kube-master'][0]}}" + register: nca_pod + until: "{{ nca_pod.stdout_lines|length }} >= {{ groups['kube-node']|length * 2 }}" + retries: 3 + delay: 10 + + - name: Get netchecker agents + uri: url=http://localhost:{{netchecker_port}}/api/v1/agents/ return_content=yes + run_once: true + delegate_to: "{{groups['kube-node'][0]}}" + register: agents + retries: 3 + delay: "{{ agent_report_interval }}" + until: "{{ agents.content|length > 0 and + agents.content[0] == '{' and + agents.content|from_json|length >= groups['kube-node']|length * 2 }}" + ignore_errors: true + no_log: true + + - debug: var=agents.content|from_json + failed_when: not agents|success + delegate_to: "{{groups['kube-node'][0]}}" + run_once: true + + - name: Check netchecker status + uri: url=http://localhost:{{netchecker_port}}/api/v1/connectivity_check status_code=200 return_content=yes + delegate_to: "{{groups['kube-node'][0]}}" + run_once: true + register: result + retries: 3 + delay: "{{ agent_report_interval }}" + no_log: true + ignore_errors: true + + - debug: var=result.content|from_json + failed_when: not result|success + delegate_to: "{{groups['kube-node'][0]}}" + run_once: true