[tox] envlist = {dev,jewel,luminous,rhcs}-{ansible2.2,ansible2.3}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,docker_dedicated_journal,docker_dmcrypt_journal_collocation,update_dmcrypt,update_cluster,cluster,purge_docker_cluster,update_docker_cluster,switch_to_containers} {dev,luminous}-{ansible2.2,ansible2.3}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,journal_collocation_auto,journal_collocation_auto_dmcrypt,shrink_mon_container,shrink_osd_container,docker_cluster_collocation} skipsdist = True # extra commands for purging clusters # that purge the cluster and then set it up again to # ensure that a purge can clear nodes well enough that they # can be redployed to. [purge] commands= cp {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml} ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\ ireallymeanit=yes \ remove_packages=yes \ ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest} \ " # set up the cluster again ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest} \ ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ " # test that the cluster can be redeployed in a healthy state testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests [purge-lvm] commands= cp {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml} ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\ ireallymeanit=yes \ remove_packages=yes \ ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest} \ " ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml # set up the cluster again ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest} \ ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ " # test that the cluster can be redeployed in a healthy state testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests # extra commands for performing a rolling update # currently this hardcodes the release to kraken # as we're still installing jewel by default [update] commands= cp {toxinidir}/infrastructure-playbooks/rolling_update.yml {toxinidir}/rolling_update.yml ansible-playbook -vv -i {changedir}/hosts {toxinidir}/rolling_update.yml --extra-vars "\ ireallymeanit=yes \ ceph_stable_release={env:UPDATE_CEPH_STABLE_RELEASE:kraken} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ ceph_docker_image={env:UPDATE_CEPH_DOCKER_IMAGE:ceph/daemon} \ ceph_docker_image_tag={env:UPDATE_CEPH_DOCKER_IMAGE_TAG:latest} \ ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \ ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ " testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests [shrink-mon] commands= cp {toxinidir}/infrastructure-playbooks/shrink-mon.yml {toxinidir}/shrink-mon.yml ansible-playbook -vv -i {changedir}/hosts {toxinidir}/shrink-mon.yml --extra-vars "\ ireallymeanit=yes \ mon_to_kill={env:MON_TO_KILL:ceph-mon2} \ " [shrink-osd] commands= cp {toxinidir}/infrastructure-playbooks/shrink-osd.yml {toxinidir}/shrink-osd.yml ansible-playbook -vv -i {changedir}/hosts {toxinidir}/shrink-osd.yml --extra-vars "\ ireallymeanit=yes \ osd_to_kill=0 \ " [switch-to-containers] commands= cp {toxinidir}/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml {toxinidir}/switch-from-non-containerized-to-containerized-ceph-daemons.yml ansible-playbook -vv -i {changedir}/hosts {toxinidir}/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\ ireallymeanit=yes \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ ceph_docker_image={env:UPDATE_CEPH_DOCKER_IMAGE:ceph/daemon} \ ceph_docker_image_tag={env:UPDATE_CEPH_DOCKER_IMAGE_TAG:latest} \ ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \ ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ " testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers {toxinidir}/tests/functional/tests [testenv] whitelist_externals = vagrant bash pip cp sleep passenv=* sitepackages=True setenv= ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config ANSIBLE_ACTION_PLUGINS = {toxinidir}/plugins/actions ANSIBLE_CALLBACK_WHITELIST = profile_tasks # only available for ansible >= 2.2 ANSIBLE_STDOUT_CALLBACK = debug docker_cluster: PLAYBOOK = site-docker.yml.sample docker_cluster_collocation: PLAYBOOK = site-docker.yml.sample update_docker_cluster: PLAYBOOK = site-docker.yml.sample purge_docker_cluster: PLAYBOOK = site-docker.yml.sample purge_docker_cluster: PURGE_PLAYBOOK = purge-docker-cluster.yml docker_dedicated_journal: PLAYBOOK = site-docker.yml.sample docker_dmcrypt_journal_collocation: PLAYBOOK = site-docker.yml.sample bluestore_docker_dedicated_journal: PLAYBOOK = site-docker.yml.sample bluestore_docker_dmcrypt_journal_collocation: PLAYBOOK = site-docker.yml.sample shrink_mon_container: PLAYBOOK = site-docker.yml.sample shrink_mon_container: MON_TO_KILL = mon2 shrink_osd_container: PLAYBOOK = site-docker.yml.sample rhcs: CEPH_STABLE_RELEASE = luminous jewel: CEPH_STABLE_RELEASE = jewel jewel: CEPH_DOCKER_IMAGE_TAG = tag-build-master-jewel-ubuntu-16.04 jewel: UPDATE_CEPH_STABLE_RELEASE = kraken luminous: CEPH_STABLE_RELEASE = luminous luminous: CEPH_DOCKER_IMAGE_TAG = tag-build-master-luminous-ubuntu-16.04 luminous: UPDATE_CEPH_STABLE_RELEASE = luminous luminous: UPDATE_CEPH_DOCKER_IMAGE_TAG = tag-build-master-luminous-ubuntu-16.04 lvm_osds: CEPH_STABLE_RELEASE = luminous deps= ansible2.2: ansible==2.2.3 ansible2.3: ansible==2.3.1 -r{toxinidir}/tests/requirements.txt changedir= # tests a 1 mon, 1 osd, 1 mds and 1 rgw xenial cluster using non-collocated OSD scenario xenial_cluster: {toxinidir}/tests/functional/ubuntu/16.04/cluster # tests a 1 mon 1 osd centos7 cluster using collocated OSD scenario journal_collocation: {toxinidir}/tests/functional/centos/7/jrn-col # tests a 1 mon 1 osd centos7 cluster using collocated OSD scenario with disk autodiscovery journal_collocation_auto: {toxinidir}/tests/functional/centos/7/jrn-col-auto # tests a 1 mon 1 osd centos7 cluster using collocated OSD scenario with encrypted disk autodiscovery journal_collocation_auto_dmcrypt: {toxinidir}/tests/functional/centos/7/jrn-col-auto-dm # tests a 1 mon 1 osd centos7 cluster using dmcrypt non-collocated OSD scenario dmcrypt_journal: {toxinidir}/tests/functional/centos/7/crypt-ded-jrn # tests a 1 mon 1 osd centos7 cluster using dmcrypt collocated OSD scenario dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/crypt-jrn-col # tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using non-collocated OSD scenario centos7_cluster: {toxinidir}/tests/functional/centos/7/cluster shrink_mon: {toxinidir}/tests/functional/centos/7/cluster shrink_mon_container: {toxinidir}/tests/functional/centos/7/docker shrink_osd: {toxinidir}/tests/functional/centos/7/cluster shrink_osd_container: {toxinidir}/tests/functional/centos/7/docker # an alias for centos7_cluster, this makes the name better suited for rhcs testing cluster: {toxinidir}/tests/functional/centos/7/cluster # tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using docker docker_cluster: {toxinidir}/tests/functional/centos/7/docker docker_cluster_collocation: {toxinidir}/tests/functional/centos/7/docker-collocation update_docker_cluster: {toxinidir}/tests/functional/centos/7/docker purge_docker_cluster: {toxinidir}/tests/functional/centos/7/docker docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/docker-ded-jrn docker_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/docker-crypt-jrn-col purge_cluster: {toxinidir}/tests/functional/centos/7/cluster purge_dmcrypt: {toxinidir}/tests/functional/centos/7/crypt-ded-jrn update_dmcrypt: {toxinidir}/tests/functional/centos/7/crypt-ded-jrn update_cluster: {toxinidir}/tests/functional/centos/7/cluster switch_to_containers: {toxinidir}/tests/functional/centos/7/cluster bluestore_journal_collocation: {toxinidir}/tests/functional/centos/7/bs-jrn-col bluestore_cluster: {toxinidir}/tests/functional/centos/7/bluestore bluestore_dmcrypt_journal: {toxinidir}/tests/functional/centos/7/bs-crypt-ded-jrn bluestore_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/bs-crypt-jrn-col bluestore_docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/bs-dock-ded-jrn bluestore_docker_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/bs-dock-crypt-jrn-col lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds purge_lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds commands= rhcs: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "change_dir={changedir}" --tags "vagrant_setup" dev: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" vagrant up --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml purge_lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml rhcs: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup" ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest} \ ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ " ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/setup.yml # wait 2 minutes for services to be ready sleep 120 # test cluster state using ceph-ansible tests testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests # reboot all vms vagrant reload --no-provision # wait 2 minutes for services to be ready sleep 120 # retest to ensure cluster came back up correctly after rebooting testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests # handlers/idempotency test ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} \ --extra-vars "\ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest} \ ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} " \ --extra-vars @ceph-override.json purge_cluster: {[purge]commands} purge_lvm_osds: {[purge-lvm]commands} purge_dmcrypt: {[purge]commands} purge_docker_cluster: {[purge]commands} switch_to_containers: {[switch-to-containers]commands} update_dmcrypt: {[update]commands} update_cluster: {[update]commands} update_docker_cluster: {[update]commands} shrink_mon: {[shrink-mon]commands} shrink_mon_container: {[shrink-mon]commands} shrink_osd: {[shrink-osd]commands} shrink_osd_container: {[shrink-osd]commands} vagrant destroy --force