From bf17099964ddfb4c7cf9bd9bc14f9d5e21e9ed9c Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Mon, 20 May 2019 13:43:11 +0200 Subject: [PATCH] tests: split update in a dedicated tox.ini file This commit splits the update scenario into a dedicated tox.ini file. Signed-off-by: Guillaume Abrioux --- tox-update.ini | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++ tox.ini | 30 ++----------------- 2 files changed, 81 insertions(+), 28 deletions(-) create mode 100644 tox-update.ini diff --git a/tox-update.ini b/tox-update.ini new file mode 100644 index 000000000..9c5fb5409 --- /dev/null +++ b/tox-update.ini @@ -0,0 +1,79 @@ +[tox] +envlist = luminous-{centos,ubuntu}-{container,non_container}-update + +skipsdist = True + +[testenv] +whitelist_externals = + vagrant + bash + cp + git + pip +passenv=* +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config + ANSIBLE_CONFIG = {toxinidir}/ansible.cfg + ANSIBLE_ACTION_PLUGINS = {toxinidir}/plugins/actions + ANSIBLE_CALLBACK_PLUGINS = {toxinidir}/plugins/callback + ANSIBLE_CALLBACK_WHITELIST = profile_tasks + ANSIBLE_CACHE_PLUGIN = memory + ANSIBLE_GATHERING = implicit + ANSIBLE_STDOUT_CALLBACK = debug + centos: CEPH_ANSIBLE_VAGRANT_BOX = centos/7 + fedora: CEPH_ANSIBLE_VAGRANT_BOX = fedora/29-atomic-host + # Set the vagrant box image to use + centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/7 + centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/atomic-host + ubuntu: CEPH_ANSIBLE_VAGRANT_BOX = ceph/ubuntu-xenial + + # Set the ansible inventory host file to be used according to which distrib we are running on + ubuntu: _INVENTORY = hosts-ubuntu + INVENTORY = {env:_INVENTORY:hosts} + container: CONTAINER_DIR = /container + container: PLAYBOOK = site-docker.yml.sample + non_container: PLAYBOOK = site.yml.sample + + CEPH_DOCKER_IMAGE_TAG = latest-jewel + CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-jewel + UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-luminous + CEPH_STABLE_RELEASE = jewel + UPDATE_CEPH_STABLE_RELEASE = luminous + ROLLING_UPDATE = True + +deps= -r{toxinidir}/tests/requirements.txt +changedir={toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:} +commands= + + vagrant up --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} + + ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml + + # deploy the cluster + ansible-playbook -vv -i {changedir}/{env:CONTAINER_DIR:}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit 'all:!iscsigws' --extra-vars "\ + delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ + fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ + ceph_stable_release={env:CEPH_STABLE_RELEASE:jewel} \ + ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ + ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ + ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-jewel} \ + copy_admin_key={env:COPY_ADMIN_KEY:False} \ + " + + cp {toxinidir}/infrastructure-playbooks/rolling_update.yml {toxinidir}/rolling_update.yml + ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/rolling_update.yml --extra-vars \ + "\{'ireallymeanit': 'yes', \ + 'cluster': 'test', \ + 'fetch_directory': '{env:FETCH_DIRECTORY:{changedir}/fetch}', \ + 'ceph_stable_release': '{env:UPDATE_CEPH_STABLE_RELEASE:luminous}', \ + 'ceph_docker_registry': '{env:CEPH_DOCKER_REGISTRY:docker.io}', \ + 'ceph_docker_image': '{env:UPDATE_CEPH_DOCKER_IMAGE:ceph/daemon}', \ + 'ceph_docker_image_tag': '{env:UPDATE_CEPH_DOCKER_IMAGE_TAG:latest-luminous}', \ + 'devices': [/dev/sda,/dev/sdb], \ + 'dedicated_devices': [/dev/sdc,/dev/sdc], \ + 'osd_scenario': 'non-collocated' \}" + + bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:luminous} ROLLING_UPDATE=TRUE py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests" + + vagrant destroy --force \ No newline at end of file diff --git a/tox.ini b/tox.ini index 02abea459..a5ab8a45e 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = {jewel,luminous,rhcs}-{centos,ubuntu}-{container,non_container}-{all_daemons,cluster,filestore_osds,update,purge_filestore,purge,ooo_collocation,shrink_mon,shrink_osd,collocation} +envlist = {jewel,luminous,rhcs}-{centos,ubuntu}-{container,non_container}-{all_daemons,cluster,filestore_osds,purge_filestore,purge,ooo_collocation,shrink_mon,shrink_osd,collocation} {luminous,rhcs}-{centos,ubuntu}-{container,non_container}-{bluestore_osds,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,purge_bluestore} {jewel,luminous}-{centos,ubuntu}-non_container-switch_to_containers infra_lv_create @@ -97,25 +97,6 @@ commands= # test that the cluster can be redeployed in a healthy state testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests -# extra commands for performing a rolling update -# currently this hardcodes the release to kraken -# as we're still installing jewel by default -[update] -commands= - cp {toxinidir}/infrastructure-playbooks/rolling_update.yml {toxinidir}/rolling_update.yml - ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/rolling_update.yml --extra-vars "\ - ireallymeanit=yes \ - ceph_stable_release={env:UPDATE_CEPH_STABLE_RELEASE:kraken} \ - fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ - ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ - ceph_docker_image={env:UPDATE_CEPH_DOCKER_IMAGE:ceph/daemon} \ - ceph_docker_image_tag={env:UPDATE_CEPH_DOCKER_IMAGE_TAG:latest} \ - ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \ - ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ - " - - bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:luminous} testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests" - [shrink-mon] commands= cp {toxinidir}/infrastructure-playbooks/shrink-mon.yml {toxinidir}/shrink-mon.yml @@ -129,7 +110,7 @@ commands= ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/shrink-osd.yml --extra-vars "\ ireallymeanit=yes \ osd_to_kill=0 \ - ceph_stable_release={env:UPDATE_CEPH_STABLE_RELEASE:luminous} \ + ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ @@ -187,21 +168,15 @@ setenv= rhcs: CEPH_STABLE_RELEASE = luminous jewel: CEPH_STABLE_RELEASE = jewel jewel: CEPH_DOCKER_IMAGE_TAG = latest-jewel - jewel: UPDATE_CEPH_STABLE_RELEASE = luminous - jewel: UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-luminous jewel: CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-jewel luminous: CEPH_STABLE_RELEASE = luminous luminous: CEPH_DOCKER_IMAGE_TAG = latest-luminous luminous: CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-luminous - luminous: UPDATE_CEPH_STABLE_RELEASE = luminous - luminous: UPDATE_CEPH_DOCKER_IMAGE_TAG = latest lvm_osds: CEPH_STABLE_RELEASE = luminous bluestore_lvm_osds: CEPH_STABLE_RELEASE = luminous - update: ROLLING_UPDATE = True deps= -r{toxinidir}/tests/requirements.txt changedir= all_daemons: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:} - update: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:} cluster: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:} filestore_osds: {toxinidir}/tests/functional/fs-osds{env:CONTAINER_DIR:} bluestore_osds: {toxinidir}/tests/functional/bs-osds{env:CONTAINER_DIR:} @@ -264,7 +239,6 @@ commands= purge_bluestore: {[purge]commands} purge_lvm_osds: {[purge-lvm]commands} switch_to_containers: {[switch-to-containers]commands} - update: {[update]commands} shrink_mon: {[shrink-mon]commands} shrink_osd: {[shrink-osd]commands}