purge-cluster: adds support for purging lvm osds

This also adds a new testing scenario for purging lvm osds

Signed-off-by: Andrew Schoen <aschoen@redhat.com>
pull/1797/head
Andrew Schoen 2017-08-23 09:12:40 -05:00
parent 594d5e017a
commit bed57572cc
2 changed files with 57 additions and 2 deletions

View File

@ -194,12 +194,17 @@
tasks: tasks:
- name: set devices if osd scenario is lvm
set_fact:
devices: []
when: osd_scenario == "lvm"
- name: check for a device list - name: check for a device list
fail: fail:
msg: "OSD automatic discovery was detected, purge cluster does not support this scenario. If you want to purge the cluster, manually provide the list of devices in group_vars/{{ osd_group_name }} using the devices variable." msg: "OSD automatic discovery was detected, purge cluster does not support this scenario. If you want to purge the cluster, manually provide the list of devices in group_vars/{{ osd_group_name }} using the devices variable."
when: when:
- devices|length == 0 - devices|length == 0
- osd_auto_discovery - osd_auto_discovery|default(false)
- name: get osd numbers - name: get osd numbers
shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi" shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi"
@ -333,6 +338,24 @@
- ceph_disk_present.rc == 0 - ceph_disk_present.rc == 0
- ceph_data_partlabels.rc == 0 - ceph_data_partlabels.rc == 0
# this should go away once 'ceph-volume lvm zap' is available
- name: remove osd logical volumes
command: "lvremove -f {{ item.data_vg }}/{{ item.data }}"
with_items: "{{ lvm_volumes }}"
when:
- osd_scenario == "lvm"
# this should go away once 'ceph-volume lvm zap' is available
- name: remove osd lvm journals
command: "lvremove -f {{ item.journal_vg }}/{{ item.journal }}"
with_items: "{{ lvm_volumes }}"
# journals might be logical volumes, but they could also be
# devices so fail silently if this doesn't work
failed_when: false
when:
- osd_scenario == "lvm"
- item.journal_vg is defined
- name: get ceph journal partitions - name: get ceph journal partitions
shell: | shell: |
blkid | awk '/ceph journal/ { sub (":", "", $1); print $1 }' blkid | awk '/ceph journal/ { sub (":", "", $1); print $1 }'

34
tox.ini
View File

@ -1,6 +1,6 @@
[tox] [tox]
envlist = {dev,jewel,luminous,rhcs}-{ansible2.2,ansible2.3}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,docker_dedicated_journal,docker_dmcrypt_journal_collocation,update_dmcrypt,update_cluster,cluster,purge_docker_cluster,update_docker_cluster} envlist = {dev,jewel,luminous,rhcs}-{ansible2.2,ansible2.3}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,docker_dedicated_journal,docker_dmcrypt_journal_collocation,update_dmcrypt,update_cluster,cluster,purge_docker_cluster,update_docker_cluster}
{dev,luminous}-{ansible2.2,ansible2.3}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_cluster,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds} {dev,luminous}-{ansible2.2,ansible2.3}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_cluster,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds,purge_lvm_osds}
skipsdist = True skipsdist = True
@ -20,6 +20,35 @@ commands=
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest} \
" "
# set up the cluster again
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ceph_stable_release={env:CEPH_STABLE_RELEASE:kraken} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
# test that the cluster can be redeployed in a healthy state
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
[purge-lvm]
commands=
cp {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml}
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
ireallymeanit=yes \
remove_packages=yes \
ceph_stable_release={env:CEPH_STABLE_RELEASE:kraken} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest} \
"
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
# set up the cluster again # set up the cluster again
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ceph_stable_release={env:CEPH_STABLE_RELEASE:kraken} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:kraken} \
@ -118,6 +147,7 @@ changedir=
bluestore_docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/bs-dock-ded-jrn bluestore_docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/bs-dock-ded-jrn
bluestore_docker_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/bs-dock-crypt-jrn-col bluestore_docker_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/bs-dock-crypt-jrn-col
lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
purge_lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
commands= commands=
rhcs: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "change_dir={changedir}" --tags "vagrant_setup" rhcs: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "change_dir={changedir}" --tags "vagrant_setup"
@ -127,6 +157,7 @@ commands=
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
purge_lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
rhcs: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup" rhcs: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"
@ -145,6 +176,7 @@ commands=
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
purge_cluster: {[purge]commands} purge_cluster: {[purge]commands}
purge_lvm_osds: {[purge-lvm]commands}
purge_dmcrypt: {[purge]commands} purge_dmcrypt: {[purge]commands}
purge_docker_cluster: {[purge]commands} purge_docker_cluster: {[purge]commands}
update_dmcrypt: {[update]commands} update_dmcrypt: {[update]commands}