From b7fcbe5ca218639e0930a13a7001918606aa64e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Wed, 18 Jan 2017 10:53:21 +0100 Subject: [PATCH 1/3] purge: cosmetic cleanup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just applying our writing syntax convention in the playbook. Signed-off-by: Sébastien Han --- infrastructure-playbooks/purge-cluster.yml | 157 +++++++++------------ 1 file changed, 64 insertions(+), 93 deletions(-) diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index 83fad3016..69e9fbcc2 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -84,8 +84,8 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph mdss with systemd service: @@ -93,20 +93,18 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph mdss shell: "service ceph status mds ; if [ $? == 0 ] ; then service ceph stop mds ; else echo ; fi" - when: - ansible_os_family == 'RedHat' + when: ansible_os_family == 'RedHat' # Ubuntu 14.04 - name: stop ceph mdss on ubuntu command: initctl stop ceph-mds cluster={{ cluster }} id={{ ansible_hostname }} failed_when: false - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' - name: purge ceph rgw cluster @@ -135,8 +133,8 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph rgws with systemd service: @@ -144,20 +142,18 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph rgws shell: "service ceph-radosgw status ; if [ $? == 0 ] ; then service ceph-radosgw stop ; else echo ; fi" - when: - ansible_os_family == 'RedHat' + when: ansible_os_family == 'RedHat' # Ubuntu 14.04 - name: stop ceph rgws on ubuntu command: initctl stop radosgw cluster={{ cluster }} id={{ ansible_hostname }} failed_when: false - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' - name: purge ceph rbd-mirror cluster @@ -186,23 +182,22 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph rbd mirror with systemd service: name: ceph-rbd-mirror@admin.service state: stopped when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" # Ubuntu 14.04 - name: stop ceph rbd mirror on ubuntu command: initctl stop ceph-rbd-mirror cluster={{ cluster }} id=admin failed_when: false - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' - name: purge ceph nfs cluster @@ -231,28 +226,26 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph nfss with systemd service: name: nfs-ganesha state: stopped when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph nfss shell: "service nfs-ganesha status ; if [ $? == 0 ] ; then service nfs-ganesha stop ; else echo ; fi" - when: - ansible_os_family == 'RedHat' + when: ansible_os_family == 'RedHat' # Ubuntu 14.04 - name: stop ceph nfss on ubuntu command: initctl stop nfs-ganesha failed_when: false - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' - name: purge ceph osd cluster @@ -296,8 +289,8 @@ fail: msg: "OSD automatic discovery was detected, purge cluster does not support this scenario. If you want to purge the cluster, manually provide the list of devices in group_vars/{{ osd_group_name }} using the devices variable." when: - devices|length == 0 and - osd_auto_discovery + - devices|length == 0 + - osd_auto_discovery - name: get osd numbers shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | cut -d '-' -f 2 ; fi" @@ -310,8 +303,8 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph-osd with systemd service: @@ -320,8 +313,8 @@ enabled: no with_items: "{{ osd_ids.stdout_lines }}" when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" # before infernalis release, using sysvinit scripts # we use this test so we do not have to know which RPM contains the boot script @@ -329,8 +322,7 @@ - name: stop ceph osds shell: "service ceph status osd ; if [ $? == 0 ] ; then service ceph stop osd ; else echo ; fi" - when: - ansible_os_family == 'RedHat' + when: ansible_os_family == 'RedHat' # Ubuntu 14.04 - name: stop ceph osds on ubuntu @@ -339,8 +331,7 @@ initctl stop ceph-osd cluster={{ cluster }} id=$id done failed_when: false - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' with_items: "{{ osd_ids.stdout_lines }}" - name: see if ceph-disk-created data partitions are present @@ -365,8 +356,7 @@ - name: umount osd data partition shell: umount {{ item }} - with_items: - - "{{ mounted_osd.stdout_lines }}" + with_items: "{{ mounted_osd.stdout_lines }}" - name: remove osd mountpoint tree file: @@ -382,8 +372,7 @@ - restart machine - wait for server to boot - remove data - when: - remove_osd_mountpoints.failed is defined + when: remove_osd_mountpoints.failed is defined - name: see if ceph-disk is installed shell: "which ceph-disk" @@ -394,15 +383,14 @@ shell: ceph-disk zap "{{ item }}" with_items: "{{ devices }}" when: - ceph_disk_present.rc == 0 and - ceph_data_partlabels.rc == 0 and - zap_block_devs + - ceph_disk_present.rc == 0 + - ceph_data_partlabels.rc == 0 + - zap_block_devs - name: get ceph journal partitions shell: | blkid | awk '/ceph journal/ { sub (":", "", $1); print $1 }' - when: - - ceph_journal_partlabels.rc == 0 + when: ceph_journal_partlabels.rc == 0 failed_when: false register: ceph_journal_partition_to_erase_path @@ -454,8 +442,8 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph mons with systemd service: @@ -463,19 +451,17 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph mons shell: "service ceph status mon ; if [ $? == 0 ] ; then service ceph stop mon ; else echo ; fi" - when: - ansible_os_family == 'RedHat' + when: ansible_os_family == 'RedHat' - name: stop ceph mons on ubuntu command: initctl stop ceph-mon cluster={{ cluster }} id={{ ansible_hostname }} failed_when: false - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' - name: remove monitor store and bootstrap keys file: @@ -543,58 +529,49 @@ yum: name: "{{ item }}" state: absent - with_items: - - "{{ ceph_packages }}" - when: - ansible_pkg_mgr == 'yum' + with_items: "{{ ceph_packages }}" + when: ansible_pkg_mgr == 'yum' - name: purge ceph packages with dnf dnf: name: "{{ item }}" state: absent - with_items: - - "{{ ceph_packages }}" - when: - ansible_pkg_mgr == 'dnf' + with_items: "{{ ceph_packages }}" + when: ansible_pkg_mgr == 'dnf' - name: purge ceph packages with apt apt: name: "{{ item }}" state: absent - with_items: - - "{{ ceph_packages }}" - when: - ansible_pkg_mgr == 'apt' + with_items: "{{ ceph_packages }}" + when: ansible_pkg_mgr == 'apt' - name: purge remaining ceph packages with yum yum: name: "{{ item }}" state: absent - with_items: - - "{{ ceph_remaining_packages }}" + with_items: "{{ ceph_remaining_packages }}" when: - ansible_pkg_mgr == 'yum' and - purge_all_packages == true + - ansible_pkg_mgr == 'yum' + - purge_all_packages == true - name: purge remaining ceph packages with dnf dnf: name: "{{ item }}" state: absent - with_items: - - "{{ ceph_remaining_packages }}" + with_items: "{{ ceph_remaining_packages }}" when: - ansible_pkg_mgr == 'dnf' and - purge_all_packages == true + - ansible_pkg_mgr == 'dnf' + - purge_all_packages == true - name: purge remaining ceph packages with apt apt: name: "{{ item }}" state: absent - with_items: - - "{{ ceph_remaining_packages }}" + with_items: "{{ ceph_remaining_packages }}" when: - ansible_pkg_mgr == 'apt' and - purge_all_packages == true + - ansible_pkg_mgr == 'apt' + - purge_all_packages == true - name: remove config file: @@ -608,18 +585,15 @@ - name: remove from SysV shell: "update-rc.d -f ceph remove" - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' - name: remove Upstart and SysV files shell: "find /etc -name '*ceph*' -delete" - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' - name: remove Upstart and apt logs and cache shell: "find /var -name '*ceph*' -delete" - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' - name: request data removal local_action: shell echo requesting data removal @@ -629,8 +603,7 @@ - name: purge dnf cache command: dnf clean all - when: - ansible_pkg_mgr == 'dnf' + when: ansible_pkg_mgr == 'dnf' - name: purge RPM cache in /tmp file: @@ -639,15 +612,13 @@ - name: clean apt shell: apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - when: - ansible_pkg_mgr == 'apt' + when: ansible_pkg_mgr == 'apt' - name: purge rh_storage.repo file in /etc/yum.repos.d file: path: /etc/yum.repos.d/rh_storage.repo state: absent - when: - ansible_os_family == 'RedHat' + when: ansible_os_family == 'RedHat' - name: purge fetch directory From adeb3decf38bddb2972e597f22a27bf9638fb41e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Wed, 18 Jan 2017 10:55:01 +0100 Subject: [PATCH 2/3] purge: remove zap_block_devs variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The name of this variable was a bit confusing since its activation will zap all the block devices no matter which osd scenario we are using. Removing this variable and applying a condition on the OSD scenario is now feasible and easier since we import group_vars variable files for OSDs. Signed-off-by: Sébastien Han --- infrastructure-playbooks/purge-cluster.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index 69e9fbcc2..fd243b5d3 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -253,9 +253,6 @@ vars: osd_group_name: osds -# When set to true and raw _multi_journal is used then block devices are also zapped - zap_block_devs: true - hosts: - "{{ osd_group_name }}" @@ -385,7 +382,6 @@ when: - ceph_disk_present.rc == 0 - ceph_data_partlabels.rc == 0 - - zap_block_devs - name: get ceph journal partitions shell: | @@ -409,7 +405,7 @@ with_items: "{{ ceph_journal_partition_to_erase_path.stdout_lines }}" when: - ceph_journal_partlabels.rc == 0 - - zap_block_devs + - raw_multi_journal - name: purge ceph mon cluster From 73ca1a7a00ae2b8147fb80b7177f54d738439128 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Thu, 19 Jan 2017 15:28:44 +0100 Subject: [PATCH 3/3] purge: remove dm-crypt devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When running encrypted OSDs, an encrypted device mapper is used (because created by the crypsetup tool). So before attempting to remove all the partitions on a device we must delete all the encrypted device mappers, then we can delete all the partitions. Signed-off-by: Sébastien Han Please enter the commit message for your changes. Lines starting --- infrastructure-playbooks/purge-cluster.yml | 34 ++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index fd243b5d3..e09b9552e 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -343,6 +343,35 @@ failed_when: false register: ceph_journal_partlabels +# Initial attempt, doing everything in Ansible... +# - name: see if encrypted partitions are present +# shell: blkid -t TYPE=crypto_LUKS -o value -s PARTUUID +# register: encrypted_partuuid +# +# - name: find if these encrypted partitions are ceph data partitions +# shell: blkid -t PARTLABEL="ceph data" -o value -s PARTUUID $(blkid -U {{ item }}) +# failed_when: false +# with_items: "{{ encrypted_partuuid.stdout_lines }}" +# when: "{{ encrypted_partuuid | length > 0 }}" +# register: encrypted_partuuid_ceph_data +# +# - name: find if these encrypted partitions are ceph journal partitions +# shell: blkid -t PARTLABEL="ceph journal" -o value -s PARTUUID $(blkid -U {{ item }}) +# failed_when: false +# with_items: "{{ encrypted_partuuid.stdout_lines }}" +# when: "{{ encrypted_partuuid | length > 0 }}" +# register: encrypted_partuuid_ceph_journal +# +# - name: merge the list of ceph encrypted partitions +# set_fact: +# encrypted_partuuid_ceph: "{{ encrypted_partuuid_ceph_data + encrypted_partuuid_ceph_journal }}" + + # NOTE(leseb): hope someone will find a more elegant way one day... + - name: see if encrypted partitions are present + shell: | + blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2 + register: encrypted_ceph_partuuid + - name: get osd data mount points shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'" register: mounted_osd @@ -376,6 +405,11 @@ failed_when: false register: ceph_disk_present + - name: delete dm-crypt devices if any + command: dmsetup remove {{ item }} + with_items: encrypted_ceph_partuuid.stdout_lines + when: "{{ encrypted_ceph_partuuid.stdout_lines | length > 0 }}" + - name: zap osd disks shell: ceph-disk zap "{{ item }}" with_items: "{{ devices }}"