Merge pull request #688 from bengland2/safer-disk-zap

low-priority - more robust, simpler, idempotent purge-cluster.yml
pull/704/merge
Leseb 2016-04-12 12:04:39 +02:00
commit d756ee325f
1 changed files with 39 additions and 19 deletions

View File

@ -50,8 +50,8 @@
# This can cause problem with qemu-kvm # This can cause problem with qemu-kvm
purge_all_packages: true purge_all_packages: true
# When set to true and raw _multi_journal is used then journal disk are also zapped # When set to true and raw _multi_journal is used then block devices are also zapped
zap_journal_disks: true zap_block_devs: true
ceph_packages: ceph_packages:
- ceph - ceph
@ -108,7 +108,8 @@
shell: "if [ -d /usr/lib/systemd ] ; then find /usr/lib/systemd/system -name 'ceph*' | wc -l ; else echo 0 ; fi" shell: "if [ -d /usr/lib/systemd ] ; then find /usr/lib/systemd/system -name 'ceph*' | wc -l ; else echo 0 ; fi"
register: systemd_unit_files register: systemd_unit_files
# Infernalis # after Hammer release
- name: stop ceph.target with systemd - name: stop ceph.target with systemd
service: service:
name: ceph.target name: ceph.target
@ -166,34 +167,33 @@
systemd_unit_files.stdout != "0" and systemd_unit_files.stdout != "0" and
rbdmirror_group_name in group_names rbdmirror_group_name in group_names
# before infernalis # before infernalis release, using sysvinit scripts
# we use this test so we do not have to know which RPM contains the boot script
# or where it is placed.
- name: stop ceph osds - name: stop ceph osds
command: service ceph stop osd shell: "service ceph status osd ; if [ $? == 0 ] ; then service ceph stop osd ; else echo ; fi"
when: when:
ansible_os_family == 'RedHat' and ansible_os_family == 'RedHat' and
osd_group_name in group_names and osd_group_name in group_names
systemd_unit_files.stdout == "0"
- name: stop ceph mons - name: stop ceph mons
command: service ceph stop mon shell: "service ceph status mon ; if [ $? == 0 ] ; then service ceph stop mon ; else echo ; fi"
when: when:
ansible_os_family == 'RedHat' and ansible_os_family == 'RedHat' and
mon_group_name in group_names and mon_group_name in group_names
systemd_unit_files.stdout == "0"
- name: stop ceph mdss - name: stop ceph mdss
command: service ceph stop mds shell: "service ceph status mds ; if [ $? == 0 ] ; then service ceph stop mds ; else echo ; fi"
when: when:
ansible_os_family == 'RedHat' and ansible_os_family == 'RedHat' and
mds_group_name in group_names and mds_group_name in group_names
systemd_unit_files.stdout == "0"
- name: stop ceph rgws - name: stop ceph rgws
command: service ceph-radosgw stop shell: "service ceph-radosgw status ; if [ $? == 0 ] ; then service ceph-radosgw stop ; else echo ; fi"
when: when:
ansible_os_family == 'RedHat' and ansible_os_family == 'RedHat' and
rgw_group_name in group_names and rgw_group_name in group_names
systemd_unit_files.stdout == "0"
# Ubuntu 14.04 # Ubuntu 14.04
- name: stop ceph osds on ubuntu - name: stop ceph osds on ubuntu
@ -240,6 +240,16 @@
register: check_for_running_ceph register: check_for_running_ceph
failed_when: check_for_running_ceph.rc == 0 failed_when: check_for_running_ceph.rc == 0
- name: see if ceph-disk-created data partitions are present
shell: "ls /dev/disk/by-partlabel | grep -q 'ceph\\\\x20data'"
failed_when: false
register: ceph_data_partlabels
- name: see if ceph-disk-created journal partitions are present
shell: "ls /dev/disk/by-partlabel | grep -q 'ceph\\\\x20journal'"
failed_when: false
register: ceph_journal_partlabels
- name: get osd data mount points - name: get osd data mount points
shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'" shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'"
register: mounted_osd register: mounted_osd
@ -280,19 +290,29 @@
osd_group_name in group_names and osd_group_name in group_names and
remove_osd_mountpoints.rc != 0 remove_osd_mountpoints.rc != 0
- name: see if ceph-disk is installed
shell: "which ceph-disk"
failed_when: false
register: ceph_disk_present
- name: zap osd disks - name: zap osd disks
shell: ceph-disk zap "{{ item }}" shell: ceph-disk zap "{{ item }}"
with_items: devices with_items: devices
when: when:
osd_group_name in group_names osd_group_name in group_names and
ceph_disk_present.rc == 0 and
ceph_data_partlabels.rc == 0 and
zap_block_devs
- name: zap journal devices - name: zap journal devices
shell: ceph-disk zap "{{ item }}" shell: ceph-disk zap "{{ item }}"
with_items: "{{ raw_journal_devices|default([])|unique }}" with_items: "{{ raw_journal_devices|default([])|unique }}"
when: when:
osd_group_name in group_names and osd_group_name in group_names and
raw_multi_journal and ceph_disk_present.rc == 0 and
zap_journal_disks ceph_journal_partlabels.rc == 0 and
zap_block_devs and
raw_multi_journal
- name: purge ceph packages with yum - name: purge ceph packages with yum
yum: yum: