CI: Factorize ansible-playbook flags (#11173)
We have inconsistent sets of options passed to the playbooks during our CI runs. Don't run ansible-playbook directly, instead factorize the execution in a bash function using all the common flags. Also remove various ENABLE_* variables and instead directly test for the relevant conditions at execution time, as this makes it more obvious and does not force one to go back and forth in the script.pull/11177/head
parent
a512b861e0
commit
d321e42d9e
|
@ -47,21 +47,6 @@ if [[ "$CI_JOB_NAME" =~ "ubuntu" ]]; then
|
||||||
CI_TEST_ADDITIONAL_VARS="-e ansible_python_interpreter=/usr/bin/python3"
|
CI_TEST_ADDITIONAL_VARS="-e ansible_python_interpreter=/usr/bin/python3"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ENABLE_020_TEST="true"
|
|
||||||
ENABLE_030_TEST="true"
|
|
||||||
ENABLE_040_TEST="true"
|
|
||||||
if [[ "$CI_JOB_NAME" =~ "macvlan" ]]; then
|
|
||||||
ENABLE_020_TEST="false"
|
|
||||||
ENABLE_030_TEST="false"
|
|
||||||
ENABLE_040_TEST="false"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$CI_JOB_NAME" =~ "hardening" ]]; then
|
|
||||||
# TODO: We need to remove this condition by finding alternative container
|
|
||||||
# image instead of netchecker which doesn't work at hardening environments.
|
|
||||||
ENABLE_040_TEST="false"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check out latest tag if testing upgrade
|
# Check out latest tag if testing upgrade
|
||||||
test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout "$KUBESPRAY_VERSION"
|
test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout "$KUBESPRAY_VERSION"
|
||||||
# Checkout the CI vars file so it is available
|
# Checkout the CI vars file so it is available
|
||||||
|
@ -69,21 +54,41 @@ test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" tests/files
|
||||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" ${CI_TEST_REGISTRY_MIRROR}
|
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" ${CI_TEST_REGISTRY_MIRROR}
|
||||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" ${CI_TEST_SETTING}
|
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" ${CI_TEST_SETTING}
|
||||||
|
|
||||||
|
|
||||||
|
run_playbook () {
|
||||||
|
playbook=$1
|
||||||
|
shift
|
||||||
|
# We can set --limit here and still pass it as supplemental args because `--limit` is a 'last one wins' option
|
||||||
|
ansible-playbook --limit "all:!fake_hosts" \
|
||||||
|
$ANSIBLE_LOG_LEVEL \
|
||||||
|
-e @${CI_TEST_SETTING} \
|
||||||
|
-e @${CI_TEST_REGISTRY_MIRROR} \
|
||||||
|
-e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} \
|
||||||
|
-e local_release_dir=${PWD}/downloads \
|
||||||
|
"$@" \
|
||||||
|
${playbook}
|
||||||
|
}
|
||||||
|
|
||||||
# Create cluster
|
# Create cluster
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml
|
run_playbook cluster.yml
|
||||||
|
|
||||||
# Repeat deployment if testing upgrade
|
# Repeat deployment if testing upgrade
|
||||||
if [ "${UPGRADE_TEST}" != "false" ]; then
|
case "${UPGRADE_TEST}" in
|
||||||
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml"
|
|
||||||
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml"
|
"basic")
|
||||||
git checkout "${CI_COMMIT_SHA}"
|
run_playbook cluster.yml
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" $PLAYBOOK
|
;;
|
||||||
fi
|
"graceful")
|
||||||
|
run_playbook upgrade-cluster.yml
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
# Test control plane recovery
|
# Test control plane recovery
|
||||||
if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then
|
if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "${RECOVER_CONTROL_PLANE_TEST_GROUPS}:!fake_hosts" -e reset_confirmation=yes reset.yml
|
run_playbook reset.yml --limit "${RECOVER_CONTROL_PLANE_TEST_GROUPS}:!fake_hosts" -e reset_confirmation=yes
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads -e etcd_retries=10 --limit "etcd:kube_control_plane:!fake_hosts" recover-control-plane.yml
|
run_playbook recover-control-plane.yml -e etcd_retries=10 --limit "etcd:kube_control_plane:!fake_hosts"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Test collection build and install by installing our collection, emptying our repository, adding
|
# Test collection build and install by installing our collection, emptying our repository, adding
|
||||||
|
@ -114,61 +119,59 @@ EOF
|
||||||
# Write remove-node.yml
|
# Write remove-node.yml
|
||||||
cat > remove-node.yml <<EOF
|
cat > remove-node.yml <<EOF
|
||||||
- name: Remove node from Kubernetes
|
- name: Remove node from Kubernetes
|
||||||
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.remote-node
|
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.remove_node
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Tests Cases
|
# Tests Cases
|
||||||
## Test Master API
|
## Test Master API
|
||||||
ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/010_check-apiserver.yml $ANSIBLE_LOG_LEVEL
|
run_playbook tests/testcases/010_check-apiserver.yml
|
||||||
|
run_playbook tests/testcases/015_check-nodes-ready.yml
|
||||||
|
|
||||||
## Test that all nodes are Ready
|
## Test that all nodes are Ready
|
||||||
ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/015_check-nodes-ready.yml $ANSIBLE_LOG_LEVEL
|
|
||||||
|
|
||||||
## Test that all pods are Running
|
if [[ ! ( "$CI_JOB_NAME" =~ "macvlan" ) ]]; then
|
||||||
if [ "${ENABLE_020_TEST}" = "true" ]; then
|
run_playbook tests/testcases/020_check-pods-running.yml
|
||||||
ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/020_check-pods-running.yml $ANSIBLE_LOG_LEVEL
|
run_playbook tests/testcases/030_check-network.yml
|
||||||
fi
|
if [[ ! ( "$CI_JOB_NAME" =~ "hardening" ) ]]; then
|
||||||
|
# TODO: We need to remove this condition by finding alternative container
|
||||||
## Test pod creation and ping between them
|
# image instead of netchecker which doesn't work at hardening environments.
|
||||||
if [ "${ENABLE_030_TEST}" = "true" ]; then
|
run_playbook tests/testcases/040_check-network-adv.yml
|
||||||
ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/030_check-network.yml $ANSIBLE_LOG_LEVEL
|
fi
|
||||||
fi
|
|
||||||
|
|
||||||
## Advanced DNS checks
|
|
||||||
if [ "${ENABLE_040_TEST}" = "true" ]; then
|
|
||||||
ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/040_check-network-adv.yml $ANSIBLE_LOG_LEVEL
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## Kubernetes conformance tests
|
## Kubernetes conformance tests
|
||||||
ansible-playbook -i ${ANSIBLE_INVENTORY} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} --limit "all:!fake_hosts" tests/testcases/100_check-k8s-conformance.yml $ANSIBLE_LOG_LEVEL
|
run_playbook tests/testcases/100_check-k8s-conformance.yml
|
||||||
|
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||||
## Idempotency checks 1/5 (repeat deployment)
|
## Idempotency checks 1/5 (repeat deployment)
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} ${CI_TEST_ADDITIONAL_VARS} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml
|
run_playbook cluster.yml
|
||||||
|
|
||||||
## Idempotency checks 2/5 (Advanced DNS checks)
|
## Idempotency checks 2/5 (Advanced DNS checks)
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml
|
if [[ ! ( "$CI_JOB_NAME" =~ "hardening" ) ]]; then
|
||||||
|
run_playbook tests/testcases/040_check-network-adv.yml
|
||||||
|
fi
|
||||||
|
|
||||||
if [ "${RESET_CHECK}" = "true" ]; then
|
if [ "${RESET_CHECK}" = "true" ]; then
|
||||||
## Idempotency checks 3/5 (reset deployment)
|
## Idempotency checks 3/5 (reset deployment)
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e reset_confirmation=yes --limit "all:!fake_hosts" reset.yml
|
run_playbook reset.yml -e reset_confirmation=yes
|
||||||
|
|
||||||
## Idempotency checks 4/5 (redeploy after reset)
|
## Idempotency checks 4/5 (redeploy after reset)
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml
|
run_playbook cluster.yml
|
||||||
|
|
||||||
## Idempotency checks 5/5 (Advanced DNS checks)
|
## Idempotency checks 5/5 (Advanced DNS checks)
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml
|
if [[ ! ( "$CI_JOB_NAME" =~ "hardening" ) ]]; then
|
||||||
|
run_playbook tests/testcases/040_check-network-adv.yml
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Test node removal procedure
|
# Test node removal procedure
|
||||||
if [ "${REMOVE_NODE_CHECK}" = "true" ]; then
|
if [ "${REMOVE_NODE_CHECK}" = "true" ]; then
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e skip_confirmation=yes -e node=${REMOVE_NODE_NAME} --limit "all:!fake_hosts" remove-node.yml
|
run_playbook remove-node.yml -e skip_confirmation=yes -e node=${REMOVE_NODE_NAME}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Clean up at the end, this is to allow stage1 tests to include cleanup test
|
# Clean up at the end, this is to allow stage1 tests to include cleanup test
|
||||||
if [ "${RESET_CHECK}" = "true" ]; then
|
if [ "${RESET_CHECK}" = "true" ]; then
|
||||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e reset_confirmation=yes --limit "all:!fake_hosts" reset.yml
|
run_playbook reset.yml -e reset_confirmation=yes
|
||||||
fi
|
fi
|
||||||
|
|
Loading…
Reference in New Issue