purge: reindent playbook

This commit reindents the playbook.
Also improve readability by adding an extra line between plays.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 60aa70a128)
pull/6749/head
Guillaume Abrioux 2021-07-13 14:26:40 +02:00
parent eba580320c
commit cf812d06e3
2 changed files with 693 additions and 803 deletions

View File

@ -14,13 +14,11 @@
- name: confirm whether user really meant to purge the cluster
hosts: localhost
gather_facts: false
vars_prompt:
- name: ireallymeanit
prompt: Are you sure you want to purge the cluster?
default: 'no'
private: no
tasks:
- name: exit playbook, if user did not mean to purge cluster
fail:
@ -31,8 +29,8 @@
invoking the playbook"
when: ireallymeanit != 'yes'
- name: gather facts on all hosts
- name: gather facts on all hosts
hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ osd_group_name | default('osds') }}"
@ -45,15 +43,15 @@
- grafana-server
become: true
tasks:
- debug: msg="gather facts on all Ceph hosts for following reference"
- debug:
msg: "gather facts on all Ceph hosts for following reference"
- name: check there's no ceph kernel threads present
hosts: "{{ client_group_name | default('clients') }}"
become: true
any_errors_fatal: true
tasks:
- import_role:
name: ceph-defaults
@ -109,19 +107,14 @@
- ceph
- libceph
- name: purge ceph nfs cluster
- name: purge ceph nfs cluster
vars:
nfs_group_name: nfss
hosts: "{{ nfs_group_name | default('nfss') }}"
gather_facts: false # Already gathered previously
become: true
tasks:
- name: stop ceph nfss with systemd
service:
name: nfs-ganesha
@ -129,6 +122,7 @@
failed_when: false
when: ansible_facts['service_mgr'] == 'systemd'
- name: purge node-exporter
hosts:
- "{{ mon_group_name | default('mons') }}"
@ -142,9 +136,7 @@
- grafana-server
- clients
- iscsigws
become: true
tasks:
- import_role:
name: ceph-defaults
@ -182,7 +174,6 @@
- grafana-server
- prometheus
- alertmanager
tasks:
- import_role:
name: ceph-defaults
@ -235,18 +226,12 @@
- name: purge ceph mds cluster
vars:
mds_group_name: mdss
hosts: "{{ mds_group_name | default('mdss') }}"
gather_facts: false # Already gathered previously
become: true
tasks:
- name: stop ceph mdss with systemd
service:
name: ceph-mds@{{ ansible_facts['hostname'] }}
@ -256,18 +241,12 @@
- name: purge ceph mgr cluster
vars:
mgr_group_name: mgrs
hosts: "{{ mgr_group_name | default('mgrs') }}"
gather_facts: false # Already gathered previously
become: true
tasks:
- name: stop ceph mgrs with systemd
service:
name: ceph-mgr@{{ ansible_facts['hostname'] }}
@ -276,20 +255,15 @@
failed_when: false
when: ansible_facts['service_mgr'] == 'systemd'
- name: purge rgwloadbalancer cluster
- name: purge rgwloadbalancer cluster
vars:
rgwloadbalancer_group_name: rgwloadbalancers
hosts:
- "{{ rgwloadbalancer_group_name | default('rgwloadbalancers') }}"
gather_facts: false # Already gathered previously
become: true
tasks:
- name: stop rgwloadbalancer services
service:
name: ['keepalived', 'haproxy']
@ -297,17 +271,13 @@
enabled: no
failed_when: false
- name: purge ceph rgw cluster
- name: purge ceph rgw cluster
vars:
rgw_group_name: rgws
hosts: "{{ rgw_group_name | default('rgws') }}"
gather_facts: false # Already gathered previously
become: true
tasks:
- import_role:
name: ceph-defaults
@ -326,18 +296,12 @@
- name: purge ceph rbd-mirror cluster
vars:
rbdmirror_group_name: rbdmirrors
hosts: "{{ rbdmirror_group_name | default('rbdmirrors') }}"
gather_facts: false # Already gathered previously
become: true
tasks:
- name: stop ceph rbd mirror with systemd
service:
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
@ -346,17 +310,12 @@
- name: purge ceph osd cluster
vars:
osd_group_name: osds
reboot_osd_node: False
hosts: "{{ osd_group_name | default('osds') }}"
gather_facts: false # Already gathered previously
become: true
handlers:
- name: restart machine
shell: sleep 2 && shutdown -r now "Ansible updates triggered"
@ -376,9 +335,7 @@
- name: remove data
shell: rm -rf /var/lib/ceph/* # noqa 302
tasks:
- import_role:
name: ceph-defaults
@ -581,15 +538,20 @@
changed_when: false
with_items: "{{ combined_devices_list }}"
- name: check parent device partition
parted:
device: "/dev/{{ item }}"
loop: "{{ resolved_parent_device }}"
register: parted_info
- name: fail if there is a boot partition on the device
fail:
msg: "{{ item.item }} has a boot partition"
loop: "{{ parted_info.results }}"
when: "'boot' in (item.partitions | map(attribute='flags') | list | flatten)"
- name: zap ceph journal/block db/block wal partitions # noqa 306
shell: |
# if the disk passed is a raw device AND the boot system disk
if parted -s /dev/"{{ item }}" print | grep -sq boot; then
echo "Looks like /dev/{{ item }} has a boot partition,"
echo "if you want to delete specific partitions point to the partition instead of the raw device"
echo "Do not use your system disk!"
exit 1
fi
sgdisk -Z --clear --mbrtogpt -g -- /dev/"{{ item }}"
dd if=/dev/zero of=/dev/"{{ item }}" bs=1M count=200
parted -s /dev/"{{ item }}" mklabel gpt
@ -598,19 +560,14 @@
with_items: "{{ resolved_parent_device }}"
changed_when: false
- name: purge ceph mon cluster
- name: purge ceph mon cluster
vars:
mon_group_name: mons
hosts: "{{ mon_group_name|default('mons') }}"
gather_facts: false # already gathered previously
become: true
tasks:
- name: stop ceph mons with systemd
service:
name: "ceph-{{ item }}@{{ ansible_facts['hostname'] }}"
@ -621,7 +578,6 @@
- mon
- mgr
- name: remove monitor store and bootstrap keys
file:
path: "{{ item }}"
@ -635,6 +591,7 @@
- /var/lib/ceph/bootstrap-mgr
- /var/lib/ceph/tmp
- name: purge ceph-crash daemons
hosts:
- "{{ mon_group_name | default('mons') }}"
@ -660,12 +617,10 @@
- name: final cleanup - check any running ceph, purge ceph packages, purge config and remove data
vars:
# When set to true both groups of packages are purged.
# This can cause problem with qemu-kvm
purge_all_packages: true
ceph_packages:
- ceph
- ceph-common
@ -677,7 +632,7 @@
- ceph-radosgw
- calamari-server
- ceph-grafana-dashboards
- rbd-mirror
ceph_remaining_packages:
- libcephfs1
- libcephfs2
@ -692,7 +647,6 @@
extra_packages:
- keepalived
- haproxy
hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ osd_group_name | default('osds') }}"
@ -703,11 +657,8 @@
- "{{ client_group_name | default('clients') }}"
- "{{ mgr_group_name | default('mgrs') }}"
- grafana-server
gather_facts: false # Already gathered previously
become: true
handlers:
- name: get osd data and lockbox mount points
shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'"
@ -725,9 +676,7 @@
- name: remove data
shell: rm -rf /var/lib/ceph/* # noqa 302
listen: "remove data"
tasks:
- name: purge ceph packages with yum
yum:
name: "{{ ceph_packages }}"
@ -820,11 +769,6 @@
command: dnf clean all
when: ansible_facts['pkg_mgr'] == 'dnf'
- name: purge rpm cache in /tmp
file:
path: /tmp/rh-storage-repo
state: absent
- name: clean apt
command: apt-get clean # noqa 303
when: ansible_facts['pkg_mgr'] == 'apt'
@ -836,7 +780,6 @@
with_items:
- ceph-dev
- ceph_stable
- rh_storage
when: ansible_facts['os_family'] == 'RedHat'
- name: check for anything running ceph
@ -862,13 +805,9 @@
- name: purge fetch directory
hosts: localhost
gather_facts: false
tasks:
- name: set fetch_directory value if not set
set_fact:
fetch_directory: "fetch/"

View File

@ -3,11 +3,8 @@
# It removes: packages, containers, configuration files and ALL THE DATA
- name: confirm whether user really meant to purge the cluster
hosts: localhost
gather_facts: false
vars_prompt:
- name: ireallymeanit
prompt: >
@ -17,7 +14,6 @@
Do you want to continue?
default: 'no'
private: no
tasks:
- name: exit playbook, if user did not mean to purge cluster
fail:
@ -33,11 +29,11 @@
ceph_docker_registry: "docker.io"
when: ceph_docker_registry is not defined
- name: check there's no ceph kernel threads present
hosts: "{{ client_group_name|default('clients') }}"
become: true
any_errors_fatal: true
tasks:
- import_role:
name: ceph-defaults
@ -95,13 +91,9 @@
- name: purge ceph nfs cluster
hosts: "{{ nfs_group_name | default('nfss') }}"
become: true
tasks:
- name: disable ceph nfs service
service:
name: "ceph-nfs@{{ ansible_facts['hostname'] }}"
@ -125,13 +117,9 @@
- name: purge ceph mds cluster
hosts: "{{ mds_group_name | default('mdss') }}"
become: true
tasks:
- name: disable ceph mds service
service:
name: "ceph-mds@{{ ansible_facts['hostname'] }}"
@ -146,11 +134,9 @@
- name: purge ceph iscsigws cluster
hosts: "{{ iscsi_gw_group_name | default('iscsigws') }}"
become: true
tasks:
- name: disable ceph iscsigw services
service:
name: "{{ item }}"
@ -174,11 +160,9 @@
- name: purge ceph mgr cluster
hosts: "{{ mgr_group_name | default('mgrs') }}"
become: true
tasks:
- name: disable ceph mgr service
service:
name: "ceph-mgr@{{ ansible_facts['hostname'] }}"
@ -193,11 +177,8 @@
- name: purge ceph rgw cluster
hosts: "{{ rgw_group_name | default('rgws') }}"
become: true
tasks:
- import_role:
name: ceph-defaults
@ -221,13 +202,9 @@
- name: purge ceph rbd-mirror cluster
hosts: "{{ rbdmirror_group_name | default('rbdmirrors') }}"
become: true
tasks:
- name: disable ceph rbd-mirror service
service:
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
@ -242,14 +219,10 @@
- name: purge ceph osd cluster
hosts: "{{ osd_group_name | default('osds') }}"
gather_facts: true
become: true
tasks:
- import_role:
name: ceph-defaults
@ -283,7 +256,6 @@
file:
path: /var/lib/ceph/osd/
state: absent
register: remove_osd_mountpoints
ignore_errors: true
- name: default lvm_volumes if not defined
@ -336,14 +308,11 @@
- "{{ playbook_dir }}/group_vars/osds.yml"
skip: true
- name: purge ceph mon cluster
hosts: "{{ mon_group_name|default('mons') }}"
become: true
tasks:
# since mgr are now collocated with mons by default
- name: disable ceph mon and mgr service
service:
@ -365,7 +334,6 @@
- name: purge node-exporter
hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ osd_group_name | default('osds') }}"
@ -377,11 +345,8 @@
- grafana-server
- iscsigws
- clients
gather_facts: false
become: true
tasks:
- import_role:
name: ceph-defaults
@ -410,20 +375,16 @@
- remove_img
when: dashboard_enabled | bool
- name: purge ceph-grafana
hosts: grafana-server
gather_facts: false
become: true
vars:
grafana_services:
- grafana-server
- prometheus
- alertmanager
tasks:
- import_role:
name: ceph-defaults
@ -473,6 +434,7 @@
failed_when: false
when: dashboard_enabled | bool
- name: purge ceph-crash containers
hosts:
- "{{ mon_group_name | default('mons') }}"
@ -502,8 +464,8 @@
path: /var/lib/ceph/crash
state: absent
- name: check container hosts
- name: check container hosts
hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ osd_group_name | default('osds') }}"
@ -512,11 +474,11 @@
- "{{ rbdmirror_group_name | default('rbdmirrors') }}"
- "{{ nfs_group_name | default('nfss') }}"
- "{{ mgr_group_name | default('mgrs') }}"
gather_facts: true
become: true
tasks:
- import_role:
name: ceph-defaults
- import_role:
name: ceph-defaults
@ -543,7 +505,6 @@
- name: final cleanup
hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ osd_group_name | default('osds') }}"
@ -553,11 +514,8 @@
- "{{ nfs_group_name | default('nfss') }}"
- "{{ mgr_group_name | default('mgrs') }}"
- "{{ grafana_server_group_name | default('grafana-server') }}"
become: true
tags: with_pkg
tasks:
- import_role:
name: ceph-defaults
@ -663,7 +621,6 @@
- name: purge ceph directories
hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ osd_group_name | default('osds') }}"
@ -672,11 +629,8 @@
- "{{ rbdmirror_group_name | default('rbdmirrors') }}"
- "{{ nfs_group_name | default('nfss') }}"
- "{{ mgr_group_name | default('mgrs') }}"
gather_facts: false # Already gathered previously
become: true
tasks:
- name: purge ceph directories for "{{ ansible_facts['hostname'] }}" and ceph socket
file:
@ -698,14 +652,11 @@
shell: rm -rf /var/lib/docker/* # noqa 302
when: not is_atomic | bool
- name: purge fetch directory
hosts: localhost
gather_facts: false
tasks:
- name: set fetch_directory value if not set
set_fact:
fetch_directory: "fetch/"