From 95b52ad6af18aa41c2b6fdfab82361476bfcdff0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Wed, 18 Jan 2017 10:53:21 +0100 Subject: [PATCH] purge: cosmetic cleanup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just applying our writing syntax convention in the playbook. Signed-off-by: Sébastien Han (cherry picked from commit b7fcbe5ca218639e0930a13a7001918606aa64e2) Resolves: backport#1235 --- infrastructure-playbooks/purge-cluster.yml | 157 +++++++++------------ 1 file changed, 64 insertions(+), 93 deletions(-) diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index b340968bf..ed470da06 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -84,8 +84,8 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph mdss with systemd service: @@ -93,20 +93,18 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph mdss shell: "service ceph status mds ; if [ $? == 0 ] ; then service ceph stop mds ; else echo ; fi" - when: - ansible_os_family == 'RedHat' + when: ansible_os_family == 'RedHat' # Ubuntu 14.04 - name: stop ceph mdss on ubuntu command: initctl stop ceph-mds cluster={{ cluster }} id={{ ansible_hostname }} failed_when: false - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' - name: purge ceph rgw cluster @@ -135,8 +133,8 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph rgws with systemd service: @@ -144,20 +142,18 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph rgws shell: "service ceph-radosgw status ; if [ $? == 0 ] ; then service ceph-radosgw stop ; else echo ; fi" - when: - ansible_os_family == 'RedHat' + when: ansible_os_family == 'RedHat' # Ubuntu 14.04 - name: stop ceph rgws on ubuntu command: initctl stop radosgw cluster={{ cluster }} id={{ ansible_hostname }} failed_when: false - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' - name: purge ceph rbd-mirror cluster @@ -186,23 +182,22 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph rbd mirror with systemd service: name: ceph-rbd-mirror@admin.service state: stopped when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" # Ubuntu 14.04 - name: stop ceph rbd mirror on ubuntu command: initctl stop ceph-rbd-mirror cluster={{ cluster }} id=admin failed_when: false - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' - name: purge ceph nfs cluster @@ -231,28 +226,26 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph nfss with systemd service: name: nfs-ganesha state: stopped when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph nfss shell: "service nfs-ganesha status ; if [ $? == 0 ] ; then service nfs-ganesha stop ; else echo ; fi" - when: - ansible_os_family == 'RedHat' + when: ansible_os_family == 'RedHat' # Ubuntu 14.04 - name: stop ceph nfss on ubuntu command: initctl stop nfs-ganesha failed_when: false - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' - name: purge ceph osd cluster @@ -296,8 +289,8 @@ fail: msg: "OSD automatic discovery was detected, purge cluster does not support this scenario. If you want to purge the cluster, manually provide the list of devices in group_vars/{{ osd_group_name }} using the devices variable." when: - devices|length == 0 and - osd_auto_discovery + - devices|length == 0 + - osd_auto_discovery - name: get osd numbers shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | cut -d '-' -f 2 ; fi" @@ -310,8 +303,8 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph-osd with systemd service: @@ -320,8 +313,8 @@ enabled: no with_items: "{{ osd_ids.stdout_lines }}" when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" # before infernalis release, using sysvinit scripts # we use this test so we do not have to know which RPM contains the boot script @@ -329,8 +322,7 @@ - name: stop ceph osds shell: "service ceph status osd ; if [ $? == 0 ] ; then service ceph stop osd ; else echo ; fi" - when: - ansible_os_family == 'RedHat' + when: ansible_os_family == 'RedHat' # Ubuntu 14.04 - name: stop ceph osds on ubuntu @@ -339,8 +331,7 @@ initctl stop ceph-osd cluster={{ cluster }} id=$id done failed_when: false - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' with_items: "{{ osd_ids.stdout_lines }}" - name: see if ceph-disk-created data partitions are present @@ -365,8 +356,7 @@ - name: umount osd data partition shell: umount {{ item }} - with_items: - - "{{ mounted_osd.stdout_lines }}" + with_items: "{{ mounted_osd.stdout_lines }}" - name: remove osd mountpoint tree file: @@ -382,8 +372,7 @@ - restart machine - wait for server to boot - remove data - when: - remove_osd_mountpoints.failed is defined + when: remove_osd_mountpoints.failed is defined - name: see if ceph-disk is installed shell: "which ceph-disk" @@ -394,15 +383,14 @@ shell: ceph-disk zap "{{ item }}" with_items: "{{ devices }}" when: - ceph_disk_present.rc == 0 and - ceph_data_partlabels.rc == 0 and - zap_block_devs + - ceph_disk_present.rc == 0 + - ceph_data_partlabels.rc == 0 + - zap_block_devs - name: get ceph journal partitions shell: | blkid | awk '/ceph journal/ { sub (":", "", $1); print $1 }' - when: - - ceph_journal_partlabels.rc == 0 + when: ceph_journal_partlabels.rc == 0 failed_when: false register: ceph_journal_partition_to_erase_path @@ -454,8 +442,8 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph mons with systemd service: @@ -463,19 +451,17 @@ state: stopped enabled: no when: - ansible_os_family == 'RedHat' and - systemd_unit_files.stdout != "0" + - ansible_os_family == 'RedHat' + - systemd_unit_files.stdout != "0" - name: stop ceph mons shell: "service ceph status mon ; if [ $? == 0 ] ; then service ceph stop mon ; else echo ; fi" - when: - ansible_os_family == 'RedHat' + when: ansible_os_family == 'RedHat' - name: stop ceph mons on ubuntu command: initctl stop ceph-mon cluster={{ cluster }} id={{ ansible_hostname }} failed_when: false - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' - name: remove monitor store and bootstrap keys file: @@ -542,58 +528,49 @@ yum: name: "{{ item }}" state: absent - with_items: - - "{{ ceph_packages }}" - when: - ansible_pkg_mgr == 'yum' + with_items: "{{ ceph_packages }}" + when: ansible_pkg_mgr == 'yum' - name: purge ceph packages with dnf dnf: name: "{{ item }}" state: absent - with_items: - - "{{ ceph_packages }}" - when: - ansible_pkg_mgr == 'dnf' + with_items: "{{ ceph_packages }}" + when: ansible_pkg_mgr == 'dnf' - name: purge ceph packages with apt apt: name: "{{ item }}" state: absent - with_items: - - "{{ ceph_packages }}" - when: - ansible_pkg_mgr == 'apt' + with_items: "{{ ceph_packages }}" + when: ansible_pkg_mgr == 'apt' - name: purge remaining ceph packages with yum yum: name: "{{ item }}" state: absent - with_items: - - "{{ ceph_remaining_packages }}" + with_items: "{{ ceph_remaining_packages }}" when: - ansible_pkg_mgr == 'yum' and - purge_all_packages == true + - ansible_pkg_mgr == 'yum' + - purge_all_packages == true - name: purge remaining ceph packages with dnf dnf: name: "{{ item }}" state: absent - with_items: - - "{{ ceph_remaining_packages }}" + with_items: "{{ ceph_remaining_packages }}" when: - ansible_pkg_mgr == 'dnf' and - purge_all_packages == true + - ansible_pkg_mgr == 'dnf' + - purge_all_packages == true - name: purge remaining ceph packages with apt apt: name: "{{ item }}" state: absent - with_items: - - "{{ ceph_remaining_packages }}" + with_items: "{{ ceph_remaining_packages }}" when: - ansible_pkg_mgr == 'apt' and - purge_all_packages == true + - ansible_pkg_mgr == 'apt' + - purge_all_packages == true - name: remove config file: @@ -607,18 +584,15 @@ - name: remove from SysV shell: "update-rc.d -f ceph remove" - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' - name: remove Upstart and SysV files shell: "find /etc -name '*ceph*' -delete" - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' - name: remove Upstart and apt logs and cache shell: "find /var -name '*ceph*' -delete" - when: - ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' - name: request data removal local_action: shell echo requesting data removal @@ -628,8 +602,7 @@ - name: purge dnf cache command: dnf clean all - when: - ansible_pkg_mgr == 'dnf' + when: ansible_pkg_mgr == 'dnf' - name: purge RPM cache in /tmp file: @@ -638,15 +611,13 @@ - name: clean apt shell: apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - when: - ansible_pkg_mgr == 'apt' + when: ansible_pkg_mgr == 'apt' - name: purge rh_storage.repo file in /etc/yum.repos.d file: path: /etc/yum.repos.d/rh_storage.repo state: absent - when: - ansible_os_family == 'RedHat' + when: ansible_os_family == 'RedHat' - name: purge fetch directory