2019-04-24 04:36:03 +08:00
|
|
|
#!/bin/bash
|
|
|
|
set -euxo pipefail
|
|
|
|
|
|
|
|
echo "CI_JOB_NAME is $CI_JOB_NAME"
|
2020-04-18 21:05:36 +08:00
|
|
|
|
|
|
|
if [[ "$CI_JOB_NAME" =~ "upgrade" ]]; then
|
|
|
|
if [ "${UPGRADE_TEST}" == "false" ]; then
|
|
|
|
echo "Job name contains 'upgrade', but UPGRADE_TEST='false'"
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
else
|
|
|
|
if [ "${UPGRADE_TEST}" != "false" ]; then
|
|
|
|
echo "UPGRADE_TEST!='false', but job names does not contain 'upgrade'"
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
2022-02-23 23:36:19 +08:00
|
|
|
# needed for ara not to complain
|
|
|
|
export TZ=UTC
|
2019-04-24 04:36:03 +08:00
|
|
|
|
2019-04-29 13:56:19 +08:00
|
|
|
export ANSIBLE_REMOTE_USER=$SSH_USER
|
|
|
|
export ANSIBLE_BECOME=true
|
|
|
|
export ANSIBLE_BECOME_USER=root
|
2022-02-23 23:36:19 +08:00
|
|
|
export ANSIBLE_CALLBACK_PLUGINS="$(python -m ara.setup.callback_plugins)"
|
2019-04-29 13:56:19 +08:00
|
|
|
|
2019-05-10 16:25:59 +08:00
|
|
|
cd tests && make create-${CI_PLATFORM} -s ; cd -
|
|
|
|
ansible-playbook tests/cloud_playbooks/wait-for-ssh.yml
|
|
|
|
|
2020-08-28 17:28:53 +08:00
|
|
|
# Flatcar Container Linux needs auto update disabled
|
2019-05-28 16:51:53 +08:00
|
|
|
if [[ "$CI_JOB_NAME" =~ "coreos" ]]; then
|
|
|
|
ansible all -m raw -a 'systemctl disable locksmithd'
|
|
|
|
ansible all -m raw -a 'systemctl stop locksmithd'
|
2019-12-04 21:16:57 +08:00
|
|
|
mkdir -p /opt/bin && ln -s /usr/bin/python /opt/bin/python
|
|
|
|
fi
|
|
|
|
|
|
|
|
if [[ "$CI_JOB_NAME" =~ "opensuse" ]]; then
|
|
|
|
# OpenSUSE needs netconfig update to get correct resolv.conf
|
|
|
|
# See https://goinggnu.wordpress.com/2013/10/14/how-to-fix-the-dns-in-opensuse-13-1/
|
|
|
|
ansible all -m raw -a 'netconfig update -f'
|
|
|
|
# Auto import repo keys
|
|
|
|
ansible all -m raw -a 'zypper --gpg-auto-import-keys refresh'
|
2019-05-28 16:51:53 +08:00
|
|
|
fi
|
2019-05-10 16:25:59 +08:00
|
|
|
|
2019-04-24 04:36:03 +08:00
|
|
|
# Check out latest tag if testing upgrade
|
2019-04-25 20:24:46 +08:00
|
|
|
test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout "$KUBESPRAY_VERSION"
|
2019-04-24 04:36:03 +08:00
|
|
|
# Checkout the CI vars file so it is available
|
2023-05-11 19:21:17 +08:00
|
|
|
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" tests/files/${CI_JOB_NAME}.yml
|
|
|
|
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" ${CI_TEST_REGISTRY_MIRROR}
|
|
|
|
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" ${CI_TEST_SETTING}
|
2019-04-24 04:36:03 +08:00
|
|
|
|
2024-05-13 18:20:48 +08:00
|
|
|
|
|
|
|
run_playbook () {
|
|
|
|
playbook=$1
|
|
|
|
shift
|
|
|
|
# We can set --limit here and still pass it as supplemental args because `--limit` is a 'last one wins' option
|
|
|
|
ansible-playbook --limit "all:!fake_hosts" \
|
|
|
|
$ANSIBLE_LOG_LEVEL \
|
|
|
|
-e @${CI_TEST_SETTING} \
|
|
|
|
-e @${CI_TEST_REGISTRY_MIRROR} \
|
2024-05-14 10:51:11 +08:00
|
|
|
-e @${CI_TEST_VARS} \
|
2024-05-13 18:20:48 +08:00
|
|
|
-e local_release_dir=${PWD}/downloads \
|
|
|
|
"$@" \
|
|
|
|
${playbook}
|
|
|
|
}
|
|
|
|
|
2019-04-24 04:36:03 +08:00
|
|
|
# Create cluster
|
2024-05-13 18:20:48 +08:00
|
|
|
run_playbook cluster.yml
|
2019-04-24 04:36:03 +08:00
|
|
|
|
|
|
|
# Repeat deployment if testing upgrade
|
2024-05-13 18:20:48 +08:00
|
|
|
case "${UPGRADE_TEST}" in
|
|
|
|
|
|
|
|
"basic")
|
|
|
|
run_playbook cluster.yml
|
|
|
|
;;
|
|
|
|
"graceful")
|
|
|
|
run_playbook upgrade-cluster.yml
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
;;
|
|
|
|
esac
|
2019-04-24 04:36:03 +08:00
|
|
|
|
2020-02-11 17:38:01 +08:00
|
|
|
# Test control plane recovery
|
|
|
|
if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then
|
2024-05-13 18:20:48 +08:00
|
|
|
run_playbook reset.yml --limit "${RECOVER_CONTROL_PLANE_TEST_GROUPS}:!fake_hosts" -e reset_confirmation=yes
|
|
|
|
run_playbook recover-control-plane.yml -e etcd_retries=10 --limit "etcd:kube_control_plane:!fake_hosts"
|
2020-02-11 17:38:01 +08:00
|
|
|
fi
|
|
|
|
|
2023-03-27 17:25:55 +08:00
|
|
|
# Test collection build and install by installing our collection, emptying our repository, adding
|
|
|
|
# cluster.yml, reset.yml, and remote-node.yml files that simply point to our collection's playbooks, and then
|
|
|
|
# running the same tests as before
|
|
|
|
if [[ "${CI_JOB_NAME}" =~ "collection" ]]; then
|
|
|
|
# Build and install collection
|
|
|
|
ansible-galaxy collection build
|
|
|
|
ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz
|
|
|
|
|
|
|
|
# Simply remove all of our files and directories except for our tests directory
|
|
|
|
# to be absolutely certain that none of our playbooks or roles
|
|
|
|
# are interfering with our collection
|
|
|
|
find -maxdepth 1 ! -name tests -exec rm -rfv {} \;
|
|
|
|
|
|
|
|
# Write cluster.yml
|
|
|
|
cat > cluster.yml <<EOF
|
|
|
|
- name: Install Kubernetes
|
|
|
|
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.cluster
|
|
|
|
EOF
|
|
|
|
|
|
|
|
# Write reset.yml
|
|
|
|
cat > reset.yml <<EOF
|
|
|
|
- name: Remove Kubernetes
|
|
|
|
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.reset
|
|
|
|
EOF
|
|
|
|
|
|
|
|
# Write remove-node.yml
|
|
|
|
cat > remove-node.yml <<EOF
|
|
|
|
- name: Remove node from Kubernetes
|
2024-05-13 18:20:48 +08:00
|
|
|
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.remove_node
|
2023-03-27 17:25:55 +08:00
|
|
|
EOF
|
|
|
|
|
|
|
|
fi
|
2019-04-24 04:36:03 +08:00
|
|
|
# Tests Cases
|
|
|
|
## Test Master API
|
2024-05-13 18:20:48 +08:00
|
|
|
run_playbook tests/testcases/010_check-apiserver.yml
|
|
|
|
run_playbook tests/testcases/015_check-nodes-ready.yml
|
2019-04-24 04:36:03 +08:00
|
|
|
|
2020-03-19 19:09:14 +08:00
|
|
|
## Test that all nodes are Ready
|
|
|
|
|
2024-05-13 18:20:48 +08:00
|
|
|
if [[ ! ( "$CI_JOB_NAME" =~ "macvlan" ) ]]; then
|
|
|
|
run_playbook tests/testcases/020_check-pods-running.yml
|
|
|
|
run_playbook tests/testcases/030_check-network.yml
|
|
|
|
if [[ ! ( "$CI_JOB_NAME" =~ "hardening" ) ]]; then
|
|
|
|
# TODO: We need to remove this condition by finding alternative container
|
|
|
|
# image instead of netchecker which doesn't work at hardening environments.
|
|
|
|
run_playbook tests/testcases/040_check-network-adv.yml
|
|
|
|
fi
|
2022-10-19 20:35:04 +08:00
|
|
|
fi
|
2019-04-24 04:36:03 +08:00
|
|
|
|
2019-05-27 20:31:52 +08:00
|
|
|
## Kubernetes conformance tests
|
2024-05-13 18:20:48 +08:00
|
|
|
run_playbook tests/testcases/100_check-k8s-conformance.yml
|
2019-05-27 20:31:52 +08:00
|
|
|
|
2019-04-24 04:36:03 +08:00
|
|
|
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
2021-11-11 08:11:50 +08:00
|
|
|
## Idempotency checks 1/5 (repeat deployment)
|
2024-05-13 18:20:48 +08:00
|
|
|
run_playbook cluster.yml
|
2019-04-24 04:36:03 +08:00
|
|
|
|
2021-11-11 08:11:50 +08:00
|
|
|
## Idempotency checks 2/5 (Advanced DNS checks)
|
2024-05-13 18:20:48 +08:00
|
|
|
if [[ ! ( "$CI_JOB_NAME" =~ "hardening" ) ]]; then
|
|
|
|
run_playbook tests/testcases/040_check-network-adv.yml
|
|
|
|
fi
|
2019-04-24 04:36:03 +08:00
|
|
|
|
2021-11-11 08:11:50 +08:00
|
|
|
if [ "${RESET_CHECK}" = "true" ]; then
|
|
|
|
## Idempotency checks 3/5 (reset deployment)
|
2024-05-13 18:20:48 +08:00
|
|
|
run_playbook reset.yml -e reset_confirmation=yes
|
2021-11-11 08:11:50 +08:00
|
|
|
|
|
|
|
## Idempotency checks 4/5 (redeploy after reset)
|
2024-05-13 18:20:48 +08:00
|
|
|
run_playbook cluster.yml
|
2019-04-24 04:36:03 +08:00
|
|
|
|
2021-11-11 08:11:50 +08:00
|
|
|
## Idempotency checks 5/5 (Advanced DNS checks)
|
2024-05-13 18:20:48 +08:00
|
|
|
if [[ ! ( "$CI_JOB_NAME" =~ "hardening" ) ]]; then
|
|
|
|
run_playbook tests/testcases/040_check-network-adv.yml
|
|
|
|
fi
|
2021-11-11 08:11:50 +08:00
|
|
|
fi
|
2019-04-24 04:36:03 +08:00
|
|
|
fi
|
|
|
|
|
2022-04-19 15:49:39 +08:00
|
|
|
# Test node removal procedure
|
|
|
|
if [ "${REMOVE_NODE_CHECK}" = "true" ]; then
|
2024-05-13 18:20:48 +08:00
|
|
|
run_playbook remove-node.yml -e skip_confirmation=yes -e node=${REMOVE_NODE_NAME}
|
2022-04-19 15:49:39 +08:00
|
|
|
fi
|
|
|
|
|
2021-11-11 08:11:50 +08:00
|
|
|
# Clean up at the end, this is to allow stage1 tests to include cleanup test
|
|
|
|
if [ "${RESET_CHECK}" = "true" ]; then
|
2024-05-13 18:20:48 +08:00
|
|
|
run_playbook reset.yml -e reset_confirmation=yes
|
2019-04-24 04:36:03 +08:00
|
|
|
fi
|