tests: test idempotency only on all_daemons job

there's no need to test this on all scenarios.
testing idempotency on all_daemons should be enough and allow us to save
precious resources for the CI.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/3773/head
Guillaume Abrioux 2019-04-01 17:22:50 +02:00
parent 4241b6403f
commit 136bfe096c
1 changed files with 5 additions and 16 deletions

21
tox.ini
View File

@ -290,30 +290,19 @@ commands=
" "
# wait 30sec for services to be ready # wait 30sec for services to be ready
sleep 30 all_daemons: sleep 30
# test cluster state using ceph-ansible tests # test cluster state using ceph-ansible tests
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests all_daemons: py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
# reboot all vms # reboot all vms
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml all_daemons: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
# wait 30sec for services to be ready # wait 30sec for services to be ready
# retest to ensure cluster came back up correctly after rebooting # retest to ensure cluster came back up correctly after rebooting
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests all_daemons: py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
# handlers/idempotency test # handlers/idempotency test
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} \ all_daemons: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-master} ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} copy_admin_key={env:COPY_ADMIN_KEY:False}" --extra-vars @ceph-override.json
--extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-master} # not ideal but what can we do? \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
copy_admin_key={env:COPY_ADMIN_KEY:False} " \
--extra-vars @ceph-override.json
purge: {[purge]commands} purge: {[purge]commands}
switch_to_containers: {[switch-to-containers]commands} switch_to_containers: {[switch-to-containers]commands}