tests: use lvm batch on osd2 (all_daemons)

in order to test lvm batch in purge scenario.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/6236/head
Guillaume Abrioux 2021-02-01 20:32:37 +01:00
parent 984191ac7f
commit 7c9063b1d2
3 changed files with 6 additions and 6 deletions

View File

@ -9,7 +9,7 @@ mgr0
[osds] [osds]
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }" osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }" osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" lvm_volumes="[]"
[mdss] [mdss]
mds0 mds0

View File

@ -9,7 +9,7 @@ mgr0
[osds] [osds]
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }" osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }" osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" lvm_volumes="[]"
[mdss] [mdss]
mds0 mds0

View File

@ -56,8 +56,8 @@ commands=
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
" "
# re-setup lvm # re-setup lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2'
# set up the cluster again # set up the cluster again
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
@ -337,8 +337,8 @@ commands=
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
# configure lvm # configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
!lvm_batch-!lvm_auto_discovery: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml !lvm_batch-!lvm_auto_discovery: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2'
rhcs: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup" rhcs: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"