ceph-ansible/tox.ini

398 lines
22 KiB
INI
Raw Normal View History

[tox]
envlist = {dev,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,cluster,docker_cluster,update_cluster,update_docker_cluster,switch_to_containers,ooo_collocation,bluestore_lvm_osds,bluestore_lvm_osds_container,lvm_osds,purge_lvm_osds,purge_lvm_osds_container,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,lvm_batch,lvm_osds_container,lvm_batch_container,infra_lv_create,add_osds,add_osds_container,rgw_multisite,rgw_multisite_container,container_podman,storage_inventory,storage_inventory_container}
infra_lv_create
skipsdist = True
# a test scenario for the lv-create.yml and lv-teardown playbooks
[testenv:infra_lv_create]
whitelist_externals =
vagrant
bash
cp
mkdir
cat
passenv=*
setenv=
ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
ANSIBLE_CONFIG = -F {toxinidir}/ansible.cfg
ANSIBLE_ACTION_PLUGINS = {toxinidir}/plugins/actions
ANSIBLE_CALLBACK_PLUGINS = {toxinidir}/plugins/callback
ANSIBLE_CALLBACK_WHITELIST = profile_tasks
# only available for ansible >= 2.5
ANSIBLE_STDOUT_CALLBACK = yaml
deps= -r{toxinidir}/tests/requirements.txt
changedir={toxinidir}/tests/functional/centos/7/infra_lv_create
commands=
vagrant up --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
cp {toxinidir}/infrastructure-playbooks/lv-create.yml {toxinidir}/lv-create.yml
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/lv-create.yml
cp {toxinidir}/infrastructure-playbooks/lv-teardown.yml {toxinidir}/lv-teardown.yml
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/lv-teardown.yml --extra-vars "ireallymeanit=yes"
cat {toxinidir}/lv-create.log
vagrant destroy --force
# extra commands for purging clusters
# that purge the cluster and then set it up again to
# ensure that a purge can clear nodes well enough that they
# can be redployed to.
[purge]
commands=
cp {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml}
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
ireallymeanit=yes \
remove_packages=yes \
ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
"
# re-setup lvm
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
# set up the cluster again
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
# test that the cluster can be redeployed in a healthy state
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
[purge-lvm]
commands=
cp {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml}
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
ireallymeanit=yes \
remove_packages=yes \
ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
"
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
# set up the cluster again
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
# test that the cluster can be redeployed in a healthy state
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
# extra commands for performing a rolling update
# currently this hardcodes the release to kraken
# as we're still installing jewel by default
[update]
commands=
cp {toxinidir}/infrastructure-playbooks/rolling_update.yml {toxinidir}/rolling_update.yml
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/rolling_update.yml --extra-vars "\
ireallymeanit=yes \
ceph_stable_release={env:UPDATE_CEPH_STABLE_RELEASE:kraken} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:UPDATE_CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:UPDATE_CEPH_DOCKER_IMAGE_TAG:latest-master} \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
"
bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:luminous} testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests"
[shrink-mon]
commands=
cp {toxinidir}/infrastructure-playbooks/shrink-mon.yml {toxinidir}/shrink-mon.yml
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/shrink-mon.yml --extra-vars "\
ireallymeanit=yes \
mon_to_kill={env:MON_TO_KILL:ceph-mon2} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
"
[shrink-osd]
commands=
cp {toxinidir}/infrastructure-playbooks/shrink-osd.yml {toxinidir}/shrink-osd.yml
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=0 \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
"
[switch-to-containers]
commands=
cp {toxinidir}/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml {toxinidir}/switch-from-non-containerized-to-containerized-ceph-daemons.yml
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\
ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers {toxinidir}/tests/functional/tests
[add-osds]
commands=
ansible-playbook -vv -i {changedir}/hosts-2 --limit osd1 {toxinidir}/tests/functional/setup.yml
cp {toxinidir}/infrastructure-playbooks/add-osd.yml {toxinidir}/add-osd.yml
ansible-playbook -vv -i {changedir}/hosts-2 --limit osd1 {toxinidir}/add-osd.yml --extra-vars "\
ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:mimic} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests
[rgw-multisite]
commands=
bash -c "cd {changedir}/secondary && vagrant up --no-provision {posargs:--provider=virtualbox}"
bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}/secondary"
ansible-playbook --ssh-extra-args='-F {changedir}/secondary/vagrant_ssh_config' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "change_dir={changedir}/secondary ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1'
ansible-playbook --ssh-extra-args='-F {changedir}/secondary/vagrant_ssh_config' -vv -i {changedir}/secondary/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/secondary/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:mimic} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rgw_multisite.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest}"
ansible-playbook -vv --ssh-extra-args='-F {changedir}/secondary/vagrant_ssh_config' -i {changedir}/secondary/hosts {toxinidir}/tests/functional/rgw_multisite.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest}"
bash -c "cd {changedir}/secondary && vagrant destroy --force"
ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent'
[storage-inventory]
commands=
cp {toxinidir}/infrastructure-playbooks/storage-inventory.yml {toxinidir}/storage-inventory.yml
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/storage-inventory.yml --extra-vars "\
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
"
[testenv]
whitelist_externals =
vagrant
bash
pip
cp
sleep
rm
passenv=*
sitepackages=True
setenv=
ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
ANSIBLE_ACTION_PLUGINS = {toxinidir}/plugins/actions
ANSIBLE_CALLBACK_PLUGINS = {toxinidir}/plugins/callback
ANSIBLE_CALLBACK_WHITELIST = profile_tasks
# only available for ansible >= 2.5
ANSIBLE_STDOUT_CALLBACK = yaml
ansible2.2: DELEGATE_FACTS_HOST = False
docker_cluster: PLAYBOOK = site-docker.yml.sample
docker_cluster_collocation: PLAYBOOK = site-docker.yml.sample
ooo_collocation: PLAYBOOK = site-docker.yml.sample
update_docker_cluster: PLAYBOOK = site-docker.yml.sample
purge_cluster_container: PLAYBOOK = site-docker.yml.sample
purge_cluster_container: PURGE_PLAYBOOK = purge-docker-cluster.yml
purge_lvm_osds_container: PLAYBOOK = site-docker.yml.sample
purge_lvm_osds_container: PURGE_PLAYBOOK = purge-docker-cluster.yml
add_osds: PLAYBOOK = site.yml.sample
add_osds_container: PLAYBOOK = site-docker.yml.sample
rgw_multisite: PLAYBOOK = site.yml.sample
rgw_multisite_container: PLAYBOOK = site-docker.yml.sample
shrink_mon_container: PLAYBOOK = site-docker.yml.sample
shrink_mon_container: MON_TO_KILL = mon2
shrink_osd_container: PLAYBOOK = site-docker.yml.sample
shrink_osd_container: COPY_ADMIN_KEY = True
shrink_osd: COPY_ADMIN_KEY = True
container_podman: PLAYBOOK = site-docker.yml.sample
storage_inventory: COPY_ADMIN_KEY = True
storage_inventory_container: PLAYBOOK = site-docker.yml.sample
rhcs: CEPH_STABLE_RELEASE = luminous
luminous: CEPH_STABLE_RELEASE = luminous
luminous: CEPH_DOCKER_IMAGE_TAG = latest-luminous
luminous: CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-luminous
luminous: UPDATE_CEPH_STABLE_RELEASE = luminous
luminous: UPDATE_CEPH_DOCKER_IMAGE_TAG = latest
mimic: CEPH_STABLE_RELEASE = mimic
mimic: CEPH_DOCKER_IMAGE_TAG = latest-mimic
mimic: CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis
mimic: UPDATE_CEPH_STABLE_RELEASE = mimic
mimic: UPDATE_CEPH_DOCKER_IMAGE_TAG = latest
lvm_osds: CEPH_STABLE_RELEASE = luminous
lvm_osds: PLAYBOOK = site.yml.sample
lvm_osds_container: CEPH_STABLE_RELEASE = luminous
lvm_osds_container: PLAYBOOK = site-docker.yml.sample
bluestore_lvm_osds: CEPH_STABLE_RELEASE = luminous
bluestore_lvm_osds_container: CEPH_STABLE_RELEASE = luminous
bluestore_lvm_osds_container: PLAYBOOK = site-docker.yml.sample
lvm_batch_container: PLAYBOOK = site-docker.yml.sample
lvm_batch_container: CEPH_STABLE_RELEASE = luminous
update_cluster: ROLLING_UPDATE = True
update_docker_cluster: ROLLING_UPDATE = True
add_osds: CEPH_STABLE_RELEASE = luminous
add_osds_container: CEPH_STABLE_RELEASE = luminous
rgw_multisite: CEPH_STABLE_RELEASE = luminous
rgw_multisite_container: CEPH_STABLE_RELEASE = luminous
ooo_collocation: CEPH_DOCKER_IMAGE_TAG = v3.0.3-stable-3.0-luminous-centos-7-x86_64
deps= -r{toxinidir}/tests/requirements.txt
changedir=
# tests a 1 mon, 1 osd, 1 mds and 1 rgw xenial cluster using non-collocated OSD scenario
xenial_cluster: {toxinidir}/tests/functional/ubuntu/16.04/cluster
# tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using non-collocated OSD scenario
centos7_cluster: {toxinidir}/tests/functional/centos/7/cluster
shrink_mon: {toxinidir}/tests/functional/centos/7/cluster
shrink_mon_container: {toxinidir}/tests/functional/centos/7/docker
shrink_osd: {toxinidir}/tests/functional/centos/7/shrink_osd
shrink_osd_container: {toxinidir}/tests/functional/centos/7/shrink_osd_container
# an alias for centos7_cluster, this makes the name better suited for rhcs testing
cluster: {toxinidir}/tests/functional/centos/7/cluster
# tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using docker
docker_cluster: {toxinidir}/tests/functional/centos/7/docker
docker_cluster_collocation: {toxinidir}/tests/functional/centos/7/docker-collocation
update_docker_cluster: {toxinidir}/tests/functional/centos/7/docker
purge_cluster_non_container: {toxinidir}/tests/functional/centos/7/cluster
purge_cluster_container: {toxinidir}/tests/functional/centos/7/docker
update_cluster: {toxinidir}/tests/functional/centos/7/cluster
switch_to_containers: {toxinidir}/tests/functional/centos/7/cluster
lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
lvm_osds_container: {toxinidir}/tests/functional/centos/7/lvm-osds-container
lvm_batch: {toxinidir}/tests/functional/centos/7/lvm-batch
lvm_batch_container: {toxinidir}/tests/functional/centos/7/lvm-batch-container
bluestore_lvm_osds: {toxinidir}/tests/functional/centos/7/bs-lvm-osds
bluestore_lvm_osds_container: {toxinidir}/tests/functional/centos/7/bs-lvm-osds-container
purge_lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
purge_lvm_osds_container: {toxinidir}/tests/functional/centos/7/lvm-osds-container
ooo_collocation: {toxinidir}/tests/functional/centos/7/ooo-collocation
add_osds: {toxinidir}/tests/functional/centos/7/add-osds
add_osds_container: {toxinidir}/tests/functional/centos/7/add-osds-container
rgw_multisite: {toxinidir}/tests/functional/centos/7/rgw-multisite
rgw_multisite_container: {toxinidir}/tests/functional/centos/7/rgw-multisite-container
container_podman: {toxinidir}/tests/functional/fedora/29/container-podman
storage_inventory: {toxinidir}/tests/functional/centos/7/lvm-osds
storage_inventory_container: {toxinidir}/tests/functional/centos/7/lvm-osds-container
commands=
rhcs: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "change_dir={changedir}" --tags "vagrant_setup"
dev: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
vagrant up --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
# configure lvm
centos7_cluster: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
docker_cluster: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
docker_cluster_collocation: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
lvm_osds_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
bluestore_lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
bluestore_lvm_osds_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
shrink_osd: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
shrink_osd_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
shrink_mon: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
shrink_mon_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
purge_lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
purge_lvm_osds_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
update_docker_cluster: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
update_cluster: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
purge_cluster_non_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
purge_cluster_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
switch_to_containers: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
container_podman: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
container_podman: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/simulate_rhel8.yml
storage_inventory: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
storage_inventory_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
rhcs: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:mimic} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
copy_admin_key={env:COPY_ADMIN_KEY:False} \
"
# wait 30sec for services to be ready
sleep 30
# test cluster state using ceph-ansible tests
testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
# reboot all vms
ci: reboot with ansible instead of vagrant reload vagrant is serialized and takes a lot of time compare to simple reboot. See the benchmarks below for 3 VMs: [leseb@rick docker]$ time ANSIBLE_SSH_ARGS="-F /home/leseb/reproduce-ci/tmp.zgGC7d5mIC/build/workspace/ceph-ansible/tests/functional/centos/7/docker/vagrant_ssh_config" ansible-playbook -i /home/leseb/reproduce-ci/tmp.zgGC7d5mIC/build/workspace/ceph-ansible/tests/functional/centos/7/docker/hosts reboot.yml PLAY [mons] **************************************************************************************************************************************************************************************************** TASK [Gathering Facts] ***************************************************************************************************************************************************************************************** ok: [mon1] ok: [mon2] ok: [mon0] TASK [restart machine] ***************************************************************************************************************************************************************************************** changed: [mon2] changed: [mon1] changed: [mon0] TASK [wait for server to boot] ********************************************************************************************************************************************************************************* ok: [mon2 -> localhost] ok: [mon0 -> localhost] ok: [mon1 -> localhost] TASK [uptime] ************************************************************************************************************************************************************************************************** changed: [mon2] changed: [mon0] changed: [mon1] PLAY RECAP ***************************************************************************************************************************************************************************************************** mon0 : ok=4 changed=2 unreachable=0 failed=0 mon1 : ok=4 changed=2 unreachable=0 failed=0 mon2 : ok=4 changed=2 unreachable=0 failed=0 real 0m35.112s user 0m5.737s sys 0m1.849s [leseb@rick docker]$ time vagrant reload ==> mon0: Halting domain... ==> mon0: Starting domain. ==> mon0: Waiting for domain to get an IP address... ==> mon0: Waiting for SSH to become available... ==> mon0: Creating shared folders metadata... ==> mon0: Rsyncing folder: /home/leseb/reproduce-ci/tmp.zgGC7d5mIC/build/workspace/ceph-ansible/tests/functional/centos/7/docker/ => /home/vagrant/sync ==> mon0: Machine already provisioned. Run `vagrant provision` or use the `--provision` ==> mon0: flag to force provisioning. Provisioners marked to run always will still run. ==> mon1: Halting domain... ==> mon1: Starting domain. ==> mon1: Waiting for domain to get an IP address... ==> mon1: Waiting for SSH to become available... ==> mon1: Creating shared folders metadata... ==> mon1: Rsyncing folder: /home/leseb/reproduce-ci/tmp.zgGC7d5mIC/build/workspace/ceph-ansible/tests/functional/centos/7/docker/ => /home/vagrant/sync ==> mon1: Machine already provisioned. Run `vagrant provision` or use the `--provision` ==> mon1: flag to force provisioning. Provisioners marked to run always will still run. ==> mon2: Halting domain... ==> mon2: Starting domain. ==> mon2: Waiting for domain to get an IP address... ==> mon2: Waiting for SSH to become available... ==> mon2: Creating shared folders metadata... ==> mon2: Rsyncing folder: /home/leseb/reproduce-ci/tmp.zgGC7d5mIC/build/workspace/ceph-ansible/tests/functional/centos/7/docker/ => /home/vagrant/sync ==> mon2: Machine already provisioned. Run `vagrant provision` or use the `--provision` ==> mon2: flag to force provisioning. Provisioners marked to run always will still run. real 1m31.850s user 0m7.387s sys 0m0.796s Reboot via Ansible: 0m35.112s Reboot via vagrant: 1m31.850s We save 1/3 time. Signed-off-by: Sébastien Han <seb@redhat.com>
2017-10-13 05:41:02 +08:00
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/reboot.yml
# wait 30sec for services to be ready
sleep 30
# retest to ensure cluster came back up correctly after rebooting
testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
# handlers/idempotency test
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} \
--extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:mimic} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-master} # not ideal but what can we do? \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
copy_admin_key={env:COPY_ADMIN_KEY:False} " \
--extra-vars @ceph-override.json
purge_cluster_non_container: {[purge]commands}
purge_cluster_container: {[purge]commands}
purge_filestore_osds_non_container: {[purge]commands}
purge_bluestore_osds_non_container: {[purge]commands}
purge_filestore_osds_container: {[purge]commands}
purge_bluestore_osds_container: {[purge]commands}
purge_lvm_osds: {[purge-lvm]commands}
purge_lvm_osds_container: {[purge-lvm]commands}
switch_to_containers: {[switch-to-containers]commands}
update_cluster: {[update]commands}
update_docker_cluster: {[update]commands}
shrink_mon: {[shrink-mon]commands}
shrink_mon_container: {[shrink-mon]commands}
shrink_osd: {[shrink-osd]commands}
shrink_osd_container: {[shrink-osd]commands}
add_osds: {[add-osds]commands}
add_osds_container: {[add-osds]commands}
rgw_multisite: {[rgw-multisite]commands}
rgw_multisite_container: {[rgw-multisite]commands}
storage_inventory: {[storage-inventory]commands}
storage_inventory_container: {[storage-inventory]commands}
vagrant destroy --force