#!/bin/bash set -euxo pipefail echo "CI_JOB_NAME is $CI_JOB_NAME" if [[ "$CI_JOB_NAME" =~ "upgrade" ]]; then if [ "${UPGRADE_TEST}" == "false" ]; then echo "Job name contains 'upgrade', but UPGRADE_TEST='false'" exit 1 fi else if [ "${UPGRADE_TEST}" != "false" ]; then echo "UPGRADE_TEST!='false', but job names does not contain 'upgrade'" exit 1 fi fi # needed for ara not to complain export TZ=UTC export ANSIBLE_REMOTE_USER=$SSH_USER export ANSIBLE_BECOME=true export ANSIBLE_BECOME_USER=root export ANSIBLE_CALLBACK_PLUGINS="$(python -m ara.setup.callback_plugins)" cd tests && make create-${CI_PLATFORM} -s ; cd - ansible-playbook tests/cloud_playbooks/wait-for-ssh.yml # Flatcar Container Linux needs auto update disabled if [[ "$CI_JOB_NAME" =~ "coreos" ]]; then ansible all -m raw -a 'systemctl disable locksmithd' ansible all -m raw -a 'systemctl stop locksmithd' mkdir -p /opt/bin && ln -s /usr/bin/python /opt/bin/python fi if [[ "$CI_JOB_NAME" =~ "opensuse" ]]; then # OpenSUSE needs netconfig update to get correct resolv.conf # See https://goinggnu.wordpress.com/2013/10/14/how-to-fix-the-dns-in-opensuse-13-1/ ansible all -m raw -a 'netconfig update -f' # Auto import repo keys ansible all -m raw -a 'zypper --gpg-auto-import-keys refresh' fi # Check out latest tag if testing upgrade test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout "$KUBESPRAY_VERSION" # Checkout the CI vars file so it is available test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" tests/files/${CI_JOB_NAME}.yml test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" ${CI_TEST_REGISTRY_MIRROR} test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" ${CI_TEST_SETTING} run_playbook () { playbook=$1 shift # We can set --limit here and still pass it as supplemental args because `--limit` is a 'last one wins' option ansible-playbook --limit "all:!fake_hosts" \ $ANSIBLE_LOG_LEVEL \ -e @${CI_TEST_SETTING} \ -e @${CI_TEST_REGISTRY_MIRROR} \ -e @${CI_TEST_VARS} \ -e local_release_dir=${PWD}/downloads \ "$@" \ ${playbook} } # Create cluster run_playbook cluster.yml # Repeat deployment if testing upgrade case "${UPGRADE_TEST}" in "basic") run_playbook cluster.yml ;; "graceful") run_playbook upgrade-cluster.yml ;; *) ;; esac # Test control plane recovery if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then run_playbook reset.yml --limit "${RECOVER_CONTROL_PLANE_TEST_GROUPS}:!fake_hosts" -e reset_confirmation=yes run_playbook recover-control-plane.yml -e etcd_retries=10 --limit "etcd:kube_control_plane:!fake_hosts" fi # Test collection build and install by installing our collection, emptying our repository, adding # cluster.yml, reset.yml, and remote-node.yml files that simply point to our collection's playbooks, and then # running the same tests as before if [[ "${CI_JOB_NAME}" =~ "collection" ]]; then # Build and install collection ansible-galaxy collection build ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz # Simply remove all of our files and directories except for our tests directory # to be absolutely certain that none of our playbooks or roles # are interfering with our collection find -maxdepth 1 ! -name tests -exec rm -rfv {} \; # Write cluster.yml cat > cluster.yml < reset.yml < remove-node.yml <