mirror of https://github.com/ceph/ceph-ansible.git
443 lines
23 KiB
INI
443 lines
23 KiB
INI
[tox]
|
|
envlist = {centos,ubuntu}-{container,non_container}-{all_daemons,collocation,lvm_osds,shrink_mon,shrink_osd,shrink_mgr,shrink_mds,shrink_rbdmirror,shrink_rgw,lvm_batch,add_mons,add_osds,add_mgrs,add_mdss,add_rbdmirrors,add_rgws,rgw_multisite,purge,storage_inventory,lvm_auto_discovery,all_in_one,purge_dashboard}
|
|
{centos,ubuntu}-container-{ooo_collocation}
|
|
{centos,ubuntu}-non_container-{switch_to_containers}
|
|
infra_lv_create
|
|
migrate_ceph_disk_to_ceph_volume
|
|
|
|
skipsdist = True
|
|
|
|
# a test scenario that deploys a luminous cluster with ceph-disk osds
|
|
# and then upgrades to nautilus and migrates those osds to ceph-volume
|
|
[testenv:migrate_ceph_disk_to_ceph_volume]
|
|
allowlist_externals =
|
|
vagrant
|
|
bash
|
|
git
|
|
pip
|
|
passenv=*
|
|
setenv=
|
|
ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
|
|
ANSIBLE_CONFIG = -F {toxinidir}/ansible.cfg
|
|
ANSIBLE_ACTION_PLUGINS = {toxinidir}/plugins/actions
|
|
ANSIBLE_CALLBACK_PLUGINS = {toxinidir}/plugins/callback
|
|
ANSIBLE_CALLBACK_WHITELIST = profile_tasks
|
|
ANSIBLE_FILTER_PLUGINS = {toxinidir}/plugins/filter
|
|
# only available for ansible >= 2.5
|
|
ANSIBLE_STDOUT_CALLBACK = yaml
|
|
changedir={toxinidir}/tests/functional/migrate_ceph_disk_to_ceph_volume
|
|
commands=
|
|
vagrant up --no-provision {posargs:--provider=virtualbox}
|
|
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
|
|
|
|
# use the stable-3.2 branch to deploy a luminous cluster
|
|
git clone -b {env:CEPH_ANSIBLE_BRANCH:stable-3.2} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
|
|
pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
|
|
|
|
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
|
|
|
|
# deploy the cluster
|
|
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
|
|
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
|
|
ceph_stable_release=luminous \
|
|
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
|
|
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
|
|
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
|
|
"
|
|
|
|
# wait 30sec for services to be ready
|
|
sleep 30
|
|
# test cluster state using ceph-ansible tests
|
|
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {envdir}/tmp/ceph-ansible/tests/functional/tests
|
|
|
|
# install ceph-ansible@master requirements
|
|
pip install -r {toxinidir}/tests/requirements.txt
|
|
|
|
# migrate osds to ceph-volume and upgrade to nautilus
|
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\
|
|
ireallymeanit=yes \
|
|
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
|
|
ceph_docker_image={env:UPDATE_CEPH_DOCKER_IMAGE:ceph/daemon} \
|
|
ceph_docker_image_tag=latest-nautilus \
|
|
osd_scenario=lvm \
|
|
"
|
|
|
|
# test cluster state again using ceph-ansible tests
|
|
bash -c "CEPH_STABLE_RELEASE=nautilus py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
|
|
|
|
# reboot all vms
|
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/reboot.yml
|
|
|
|
# retest to ensure cluster came back up correctly after rebooting
|
|
bash -c "CEPH_STABLE_RELEASE=nautilus py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
|
|
|
|
vagrant destroy --force
|
|
|
|
# a test scenario for the lv-create.yml and lv-teardown playbooks
|
|
[testenv:infra_lv_create]
|
|
allowlist_externals =
|
|
vagrant
|
|
bash
|
|
mkdir
|
|
cat
|
|
passenv=*
|
|
setenv=
|
|
ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
|
|
ANSIBLE_CONFIG = -F {toxinidir}/ansible.cfg
|
|
ANSIBLE_ACTION_PLUGINS = {toxinidir}/plugins/actions
|
|
ANSIBLE_CALLBACK_PLUGINS = {toxinidir}/plugins/callback
|
|
ANSIBLE_CALLBACK_WHITELIST = profile_tasks
|
|
# only available for ansible >= 2.5
|
|
ANSIBLE_STDOUT_CALLBACK = yaml
|
|
deps= -r{toxinidir}/tests/requirements.txt
|
|
changedir={toxinidir}/tests/functional/centos/7/infra_lv_create
|
|
commands=
|
|
vagrant up --no-provision {posargs:--provider=virtualbox}
|
|
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
|
|
|
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/lv-create.yml
|
|
|
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/lv-teardown.yml --extra-vars "ireallymeanit=yes"
|
|
|
|
cat {toxinidir}/lv-create.log
|
|
|
|
vagrant destroy --force
|
|
|
|
# extra commands for purging clusters
|
|
# that purge the cluster and then set it up again to
|
|
# ensure that a purge can clear nodes well enough that they
|
|
# can be redployed to.
|
|
[purge]
|
|
commands=
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rbd_map_devices.yml
|
|
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
|
|
ireallymeanit=yes \
|
|
remove_packages=yes \
|
|
"
|
|
|
|
# re-setup lvm
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
|
|
|
|
# set up the cluster again
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
|
|
ceph_docker_registry_auth=True \
|
|
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
|
|
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
|
|
"
|
|
# test that the cluster can be redeployed in a healthy state
|
|
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
|
|
|
[purge-dashboard]
|
|
commands=
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/purge-dashboard.yml --extra-vars "\
|
|
ireallymeanit=yes \
|
|
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \
|
|
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \
|
|
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
|
|
"
|
|
|
|
# set up the cluster again
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
|
|
ceph_docker_registry_auth=True \
|
|
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
|
|
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
|
|
"
|
|
# test that the cluster can be redeployed in a healthy state
|
|
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
|
|
|
[purge-lvm]
|
|
commands=
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
|
|
ireallymeanit=yes \
|
|
remove_packages=yes \
|
|
ceph_docker_registry_auth=True \
|
|
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
|
|
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
|
|
"
|
|
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
|
|
|
|
# set up the cluster again
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample}
|
|
# test that the cluster can be redeployed in a healthy state
|
|
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
|
|
|
[shrink-mon]
|
|
commands=
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-mon.yml --extra-vars "\
|
|
ireallymeanit=yes \
|
|
mon_to_kill={env:MON_TO_KILL:mon2} \
|
|
"
|
|
[shrink-osd]
|
|
commands=
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
|
|
ireallymeanit=yes \
|
|
osd_to_kill={env:OSD_TO_KILL:0} \
|
|
"
|
|
|
|
[shrink-mgr]
|
|
commands=
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-mgr.yml --extra-vars "\
|
|
ireallymeanit=yes \
|
|
mgr_to_kill={env:MGR_TO_KILL:mgr1} \
|
|
"
|
|
|
|
[shrink-mds]
|
|
commands=
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-mds.yml --extra-vars "\
|
|
ireallymeanit=yes \
|
|
mds_to_kill={env:MDS_TO_KILL:mds0} \
|
|
"
|
|
|
|
[shrink-rbdmirror]
|
|
commands=
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-rbdmirror.yml --extra-vars "\
|
|
ireallymeanit=yes \
|
|
rbdmirror_to_kill={env:RBDMIRROR_TO_KILL:rbd-mirror0} \
|
|
"
|
|
|
|
[shrink-rgw]
|
|
commands=
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-rgw.yml --extra-vars "\
|
|
ireallymeanit=yes \
|
|
rgw_to_kill={env:RGW_TO_KILL:rgw0.rgw0} \
|
|
"
|
|
|
|
[switch-to-containers]
|
|
commands=
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\
|
|
ireallymeanit=yes \
|
|
ceph_docker_registry_auth=True \
|
|
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
|
|
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
|
|
"
|
|
|
|
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
|
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
|
|
|
|
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
|
|
|
[add-mons]
|
|
commands=
|
|
ansible-playbook -vv -i {changedir}/hosts-2 --limit mon1 {toxinidir}/tests/functional/setup.yml
|
|
ansible-playbook -vv -i {changedir}/hosts-2 {toxinidir}/infrastructure-playbooks/add-mon.yml --extra-vars ireallymeanit=yes
|
|
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
|
|
|
[add-osds]
|
|
commands=
|
|
ansible-playbook -vv -i {changedir}/hosts-2 --limit osd1 {toxinidir}/tests/functional/setup.yml
|
|
ansible-playbook -vv -i {changedir}/hosts-2 --limit osd1 {toxinidir}/tests/functional/lvm_setup.yml
|
|
ansible-playbook -vv -i {changedir}/hosts-2 --limit osd1 {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
|
|
ireallymeanit=yes \
|
|
ceph_docker_registry_auth=True \
|
|
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
|
|
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
|
|
"
|
|
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
|
|
|
[add-mgrs]
|
|
commands=
|
|
ansible-playbook -vv -i {changedir}/hosts-2 --limit mgrs {toxinidir}/tests/functional/setup.yml
|
|
ansible-playbook -vv -i {changedir}/hosts-2 --limit mgrs {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
|
|
ireallymeanit=yes \
|
|
ceph_docker_registry_auth=True \
|
|
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
|
|
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
|
|
"
|
|
py.test --reruns 5 --reruns-delay 1 -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
|
|
|
[add-mdss]
|
|
commands=
|
|
ansible-playbook -vv -i {changedir}/hosts-2 --limit mdss {toxinidir}/tests/functional/setup.yml
|
|
ansible-playbook -vv -i {changedir}/hosts-2 --limit mdss {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
|
|
ireallymeanit=yes \
|
|
ceph_docker_registry_auth=True \
|
|
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
|
|
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
|
|
"
|
|
py.test --reruns 5 --reruns-delay 1 -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
|
|
|
[add-rbdmirrors]
|
|
commands=
|
|
ansible-playbook -vv -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/tests/functional/setup.yml
|
|
ansible-playbook -vv -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
|
|
ireallymeanit=yes \
|
|
ceph_docker_registry_auth=True \
|
|
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
|
|
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
|
|
"
|
|
py.test --reruns 5 --reruns-delay 1 -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
|
|
|
[add-rgws]
|
|
commands=
|
|
ansible-playbook -vv -i {changedir}/hosts-2 --limit rgws {toxinidir}/tests/functional/setup.yml
|
|
ansible-playbook -vv -i {changedir}/hosts-2 --limit rgws {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
|
|
ireallymeanit=yes \
|
|
ceph_docker_registry_auth=True \
|
|
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
|
|
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
|
|
"
|
|
py.test --reruns 5 --reruns-delay 1 -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
|
|
|
[rgw-multisite]
|
|
commands=
|
|
bash -c "cd {changedir}/secondary && vagrant up --no-provision {posargs:--provider=virtualbox}"
|
|
bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}/secondary"
|
|
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/setup.yml
|
|
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/lvm_setup.yml
|
|
# ensure the rule isn't already present
|
|
ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent'
|
|
ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=present'
|
|
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
|
|
ireallymeanit=yes \
|
|
ceph_docker_registry_auth=True \
|
|
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
|
|
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
|
|
"
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit rgws --extra-vars "\
|
|
ceph_docker_registry_auth=True \
|
|
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
|
|
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
|
|
"
|
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rgw_multisite.yml --skip-tags download
|
|
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/rgw_multisite.yml --skip-tags download
|
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rgw_multisite.yml --skip-tags upload
|
|
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/rgw_multisite.yml --skip-tags upload
|
|
bash -c "cd {changedir}/secondary && vagrant destroy --force"
|
|
# clean rule after the scenario is complete
|
|
ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent'
|
|
|
|
[storage-inventory]
|
|
commands=
|
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/storage-inventory.yml --extra-vars "\
|
|
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
|
|
"
|
|
|
|
[testenv]
|
|
allowlist_externals =
|
|
vagrant
|
|
bash
|
|
pip
|
|
sleep
|
|
rm
|
|
passenv=*
|
|
sitepackages=False
|
|
setenv=
|
|
ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
|
|
ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
|
|
ANSIBLE_CALLBACK_WHITELIST = profile_tasks
|
|
ANSIBLE_KEEP_REMOTE_FILES = 1
|
|
ANSIBLE_CACHE_PLUGIN = memory
|
|
ANSIBLE_GATHERING = implicit
|
|
# only available for ansible >= 2.5
|
|
ANSIBLE_STDOUT_CALLBACK = yaml
|
|
non_container: DEV_SETUP = True
|
|
# Set the vagrant box image to use
|
|
centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/7
|
|
centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/atomic-host
|
|
ubuntu: CEPH_ANSIBLE_VAGRANT_BOX = guits/ubuntu-bionic64
|
|
|
|
# Set the ansible inventory host file to be used according to which distrib we are running on
|
|
ubuntu: _INVENTORY = hosts-ubuntu
|
|
INVENTORY = {env:_INVENTORY:hosts}
|
|
container: CONTAINER_DIR = /container
|
|
container: PLAYBOOK = site-docker.yml.sample
|
|
container: PURGE_PLAYBOOK = purge-container-cluster.yml
|
|
non_container: PLAYBOOK = site.yml.sample
|
|
shrink_mds: MDS_TO_KILL = mds0
|
|
shrink_mgr: MGR_TO_KILL = mgr1
|
|
shrink_mon: MON_TO_KILL = mon2
|
|
shrink_osd: OSD_TO_KILL = 0
|
|
shrink_rbdmirror: RBDMIRROR_TO_KILL = rbd-mirror0
|
|
shrink_rgw: RGW_TO_KILL = rgw0.rgw0
|
|
|
|
CEPH_DOCKER_IMAGE_TAG = latest-nautilus
|
|
CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-nautilus
|
|
|
|
ooo_collocation: CEPH_DOCKER_IMAGE_TAG = latest-nautilus
|
|
deps= -r{toxinidir}/tests/requirements.txt
|
|
changedir=
|
|
all_daemons: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:}
|
|
cluster: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:}
|
|
shrink_mon: {toxinidir}/tests/functional/shrink_mon{env:CONTAINER_DIR:}
|
|
shrink_mgn: {toxinidir}/tests/functional/shrink_mon{env:CONTAINER_DIR:}
|
|
shrink_osd: {toxinidir}/tests/functional/shrink_osd{env:CONTAINER_DIR:}
|
|
shrink_mgr: {toxinidir}/tests/functional/shrink_mgr{env:CONTAINER_DIR:}
|
|
shrink_mds: {toxinidir}/tests/functional/shrink_mds{env:CONTAINER_DIR:}
|
|
shrink_rbdmirror: {toxinidir}/tests/functional/shrink_rbdmirror{env:CONTAINER_DIR:}
|
|
shrink_rgw: {toxinidir}/tests/functional/shrink_rgw{env:CONTAINER_DIR:}
|
|
# tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using docker
|
|
collocation: {toxinidir}/tests/functional/collocation{env:CONTAINER_DIR:}
|
|
purge: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:}
|
|
purge_dashboard: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:}
|
|
switch_to_containers: {toxinidir}/tests/functional/all_daemons
|
|
lvm_osds: {toxinidir}/tests/functional/lvm-osds{env:CONTAINER_DIR:}
|
|
lvm_batch: {toxinidir}/tests/functional/lvm-batch{env:CONTAINER_DIR:}
|
|
ooo_collocation: {toxinidir}/tests/functional/ooo-collocation
|
|
add_mons: {toxinidir}/tests/functional/add-mons{env:CONTAINER_DIR:}
|
|
add_osds: {toxinidir}/tests/functional/add-osds{env:CONTAINER_DIR:}
|
|
add_mgrs: {toxinidir}/tests/functional/add-mgrs{env:CONTAINER_DIR:}
|
|
add_mdss: {toxinidir}/tests/functional/add-mdss{env:CONTAINER_DIR:}
|
|
add_rbdmirrors: {toxinidir}/tests/functional/add-rbdmirrors{env:CONTAINER_DIR:}
|
|
add_rgws: {toxinidir}/tests/functional/add-rgws{env:CONTAINER_DIR:}
|
|
rgw_multisite: {toxinidir}/tests/functional/rgw-multisite{env:CONTAINER_DIR:}
|
|
storage_inventory: {toxinidir}/tests/functional/lvm-osds{env:CONTAINER_DIR:}
|
|
lvm_auto_discovery: {toxinidir}/tests/functional/lvm-auto-discovery{env:CONTAINER_DIR:}
|
|
all_in_one: {toxinidir}/tests/functional/all-in-one{env:CONTAINER_DIR:}
|
|
|
|
commands=
|
|
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
|
|
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
|
|
|
|
# configure lvm
|
|
!lvm_batch-!lvm_auto_discovery-!ooo_collocation: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
|
|
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
|
|
|
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
|
|
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
|
|
no_log_on_ceph_key_tasks=false \
|
|
deploy_secondary_zones=False \
|
|
ceph_docker_registry_auth=True \
|
|
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
|
|
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
|
|
"
|
|
|
|
# wait 30sec for services to be ready
|
|
sleep 30
|
|
# test cluster state using ceph-ansible tests
|
|
py.test --reruns 20 --reruns-delay 3 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
|
|
|
# reboot all vms
|
|
all_daemons,collocation: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
|
|
|
|
# wait 30sec for services to be ready
|
|
sleep 30
|
|
# retest to ensure cluster came back up correctly after rebooting
|
|
all_daemons,collocation: py.test --reruns 20 --reruns-delay 3 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
|
|
|
# handlers/idempotency test
|
|
all_daemons,all_in_one,collocation: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-nautilus}" --extra-vars @ceph-override.json
|
|
|
|
purge: {[purge]commands}
|
|
purge_dashboard: {[purge-dashboard]commands}
|
|
switch_to_containers: {[switch-to-containers]commands}
|
|
shrink_mon: {[shrink-mon]commands}
|
|
shrink_osd: {[shrink-osd]commands}
|
|
shrink_mgr: {[shrink-mgr]commands}
|
|
shrink_mds: {[shrink-mds]commands}
|
|
shrink_rbdmirror: {[shrink-rbdmirror]commands}
|
|
shrink_rgw: {[shrink-rgw]commands}
|
|
add_osds: {[add-osds]commands}
|
|
add_mons: {[add-mons]commands}
|
|
add_mgrs: {[add-mgrs]commands}
|
|
add_mdss: {[add-mdss]commands}
|
|
add_rbdmirrors: {[add-rbdmirrors]commands}
|
|
add_rgws: {[add-rgws]commands}
|
|
rgw_multisite: {[rgw-multisite]commands}
|
|
storage_inventory: {[storage-inventory]commands}
|
|
|
|
vagrant destroy --force
|