mirror of https://github.com/ceph/ceph-ansible.git
tox: split shrink_osd scenario
Let's split this scenario with a dedicated tox ini file. This is for testing in two ways: 1/ shrinking OSDs one by one 2/ shrinking multiple OSDs with a single call of the playbook ceph-build related PR: ceph/ceph-build#1629 Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>pull/5622/head
parent
7efea219d6
commit
78e4faf077
|
@ -0,0 +1,160 @@
|
|||
[tox]
|
||||
envlist = {centos}-{container,non_container}-{shrink_osd_single,shrink_osd_multiple}
|
||||
skipsdist = True
|
||||
|
||||
[shrink-osd-single]
|
||||
commands=
|
||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
|
||||
ireallymeanit=yes \
|
||||
osd_to_kill=0 \
|
||||
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
|
||||
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
|
||||
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
|
||||
"
|
||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
|
||||
ireallymeanit=yes \
|
||||
osd_to_kill=1 \
|
||||
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
|
||||
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
|
||||
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
|
||||
"
|
||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
|
||||
ireallymeanit=yes \
|
||||
osd_to_kill=2 \
|
||||
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
|
||||
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
|
||||
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
|
||||
"
|
||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
|
||||
ireallymeanit=yes \
|
||||
osd_to_kill=3 \
|
||||
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
|
||||
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
|
||||
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
|
||||
"
|
||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
|
||||
ireallymeanit=yes \
|
||||
osd_to_kill=4 \
|
||||
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
|
||||
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
|
||||
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
|
||||
"
|
||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
|
||||
ireallymeanit=yes \
|
||||
osd_to_kill=5 \
|
||||
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
|
||||
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
|
||||
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
|
||||
"
|
||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
|
||||
ireallymeanit=yes \
|
||||
osd_to_kill=6 \
|
||||
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
|
||||
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
|
||||
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
|
||||
"
|
||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
|
||||
ireallymeanit=yes \
|
||||
osd_to_kill=7 \
|
||||
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
|
||||
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
|
||||
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
|
||||
"
|
||||
|
||||
[shrink-osd-multiple]
|
||||
commands=
|
||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
|
||||
ireallymeanit=yes \
|
||||
osd_to_kill=0,1,2,3,4,5,6,7 \
|
||||
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
|
||||
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
|
||||
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
|
||||
"
|
||||
|
||||
[testenv]
|
||||
whitelist_externals =
|
||||
vagrant
|
||||
bash
|
||||
sleep
|
||||
passenv=*
|
||||
sitepackages=False
|
||||
setenv=
|
||||
ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
|
||||
ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
|
||||
ANSIBLE_CALLBACK_WHITELIST = profile_tasks
|
||||
ANSIBLE_KEEP_REMOTE_FILES = 1
|
||||
ANSIBLE_CACHE_PLUGIN = memory
|
||||
ANSIBLE_GATHERING = implicit
|
||||
# only available for ansible >= 2.5
|
||||
ANSIBLE_STDOUT_CALLBACK = yaml
|
||||
non_container: DEV_SETUP = True
|
||||
# Set the vagrant box image to use
|
||||
centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/8
|
||||
centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/8
|
||||
INVENTORY = {env:_INVENTORY:hosts}
|
||||
container: CONTAINER_DIR = /container
|
||||
container: PLAYBOOK = site-container.yml.sample
|
||||
container: PURGE_PLAYBOOK = purge-container-cluster.yml
|
||||
non_container: PLAYBOOK = site.yml.sample
|
||||
|
||||
CEPH_DOCKER_IMAGE_TAG = latest-master
|
||||
CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-master
|
||||
UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-master
|
||||
CEPH_STABLE_RELEASE = pacific
|
||||
|
||||
deps= -r{toxinidir}/tests/requirements.txt
|
||||
changedir=
|
||||
shrink_osd_single: {toxinidir}/tests/functional/shrink_osd{env:CONTAINER_DIR:}
|
||||
shrink_osd_multiple: {toxinidir}/tests/functional/shrink_osd{env:CONTAINER_DIR:}
|
||||
|
||||
|
||||
commands=
|
||||
ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
|
||||
|
||||
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
|
||||
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
|
||||
|
||||
# configure lvm
|
||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
|
||||
|
||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
|
||||
|
||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
|
||||
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
|
||||
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
|
||||
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
|
||||
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
|
||||
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
|
||||
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
|
||||
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
|
||||
ceph_docker_registry_auth=True \
|
||||
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
|
||||
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
|
||||
"
|
||||
|
||||
# wait 30sec for services to be ready
|
||||
sleep 30
|
||||
# test cluster state using ceph-ansible tests
|
||||
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
||||
|
||||
shrink_osd_single: {[shrink-osd-single]commands}
|
||||
shrink_osd_multiple: {[shrink-osd-multiple]commands}
|
||||
|
||||
# configure lvm
|
||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
|
||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit osds --extra-vars "\
|
||||
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
|
||||
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
|
||||
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
|
||||
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
|
||||
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
|
||||
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
|
||||
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
|
||||
ceph_docker_registry_auth=True \
|
||||
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
|
||||
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
|
||||
"
|
||||
# retest to ensure OSDs are well redeployed
|
||||
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
||||
|
||||
vagrant destroy --force
|
Loading…
Reference in New Issue