ceph-ansible/infrastructure-playbooks/purge-cluster.yml

693 lines
18 KiB
YAML
Raw Normal View History

---
# This playbook purges Ceph
# It removes: packages, configuration files and ALL THE DATA
#
# Use it like this:
# ansible-playbook purge-cluster.yml
# Prompts for confirmation to purge, defaults to no and
# doesn't purge the cluster. yes purges the cluster.
#
# ansible-playbook -e ireallymeanit=yes|no purge-cluster.yml
# Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt.
- name: confirm whether user really meant to purge the cluster
hosts: localhost
gather_facts: false
vars_prompt:
- name: ireallymeanit
prompt: Are you sure you want to purge the cluster?
default: 'no'
private: no
tasks:
- name: exit playbook, if user did not mean to purge cluster
fail:
msg: >
"Exiting purge-cluster playbook, cluster was NOT purged.
To purge the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
when: ireallymeanit != 'yes'
- name: gather facts and check if using systemd
vars:
mon_group_name: mons
osd_group_name: osds
mds_group_name: mdss
rgw_group_name: rgws
rbdmirror_group_name: rbd-mirrors
nfs_group_name: nfss
hosts:
- "{{ mon_group_name }}"
- "{{ osd_group_name }}"
- "{{ mds_group_name }}"
- "{{ rgw_group_name }}"
- "{{ rbdmirror_group_name }}"
- "{{ nfs_group_name }}"
become: true
tasks:
- name: are we using systemd
shell: "if [ -d /usr/lib/systemd ] ; then find /usr/lib/systemd/system -name 'ceph*' | wc -l ; else echo 0 ; fi"
register: systemd_unit_files
- name: purge ceph mds cluster
vars:
mds_group_name: mdss
hosts:
- "{{ mds_group_name }}"
gather_facts: false # Already gathered previously
become: true
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-mds/defaults/main.yml
- include_vars: group_vars/all
failed_when: false
- include_vars: group_vars/{{ mds_group_name }}
failed_when: false
2016-04-07 03:58:17 +08:00
- name: stop ceph.target with systemd
service:
name: ceph.target
state: stopped
enabled: no
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0"
- name: stop ceph mdss with systemd
service:
name: ceph-mds@{{ ansible_hostname }}
state: stopped
enabled: no
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0"
- name: stop ceph mdss
shell: "service ceph status mds ; if [ $? == 0 ] ; then service ceph stop mds ; else echo ; fi"
when:
ansible_os_family == 'RedHat'
# Ubuntu 14.04
- name: stop ceph mdss on ubuntu
command: initctl stop ceph-mds cluster={{ cluster }} id={{ ansible_hostname }}
failed_when: false
when:
ansible_distribution == 'Ubuntu'
- name: purge ceph rgw cluster
vars:
rgw_group_name: rgws
hosts:
- "{{ rgw_group_name }}"
gather_facts: false # Already gathered previously
become: true
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-rgw/defaults/main.yml
- include_vars: group_vars/all
failed_when: false
- include_vars: group_vars/{{ rgw_group_name }}
failed_when: false
- name: stop ceph.target with systemd
service:
name: ceph.target
state: stopped
enabled: no
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0"
- name: stop ceph rgws with systemd
service:
name: ceph-radosgw@rgw.{{ ansible_hostname }}
state: stopped
enabled: no
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0"
- name: stop ceph rgws
shell: "service ceph-radosgw status ; if [ $? == 0 ] ; then service ceph-radosgw stop ; else echo ; fi"
when:
ansible_os_family == 'RedHat'
# Ubuntu 14.04
- name: stop ceph rgws on ubuntu
command: initctl stop radosgw cluster={{ cluster }} id={{ ansible_hostname }}
failed_when: false
when:
ansible_distribution == 'Ubuntu'
- name: purge ceph rbd-mirror cluster
vars:
rbdmirror_group_name: rbd-mirrors
hosts:
- "{{ rbdmirror_group_name }}"
gather_facts: false # Already gathered previously
become: true
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-rbd-mirror/defaults/main.yml
- include_vars: group_vars/all
failed_when: false
- include_vars: group_vars/{{ rbdmirror_group_name }}
failed_when: false
- name: stop ceph.target with systemd
service:
name: ceph.target
state: stopped
enabled: no
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0"
- name: stop ceph rbd mirror with systemd
service:
name: ceph-rbd-mirror@admin.service
state: stopped
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0"
2016-04-07 03:58:17 +08:00
# Ubuntu 14.04
- name: stop ceph rbd mirror on ubuntu
command: initctl stop ceph-rbd-mirror cluster={{ cluster }} id=admin
failed_when: false
when:
ansible_distribution == 'Ubuntu'
- name: purge ceph nfs cluster
vars:
nfs_group_name: nfss
hosts:
- "{{ nfs_group_name }}"
gather_facts: false # Already gathered previously
become: true
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-nfs/defaults/main.yml
- include_vars: group_vars/all
failed_when: false
- include_vars: group_vars/{{ nfs_group_name }}
failed_when: false
- name: stop ceph.target with systemd
service:
name: ceph.target
state: stopped
enabled: no
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0"
- name: stop ceph nfss with systemd
service:
name: nfs-ganesha
state: stopped
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0"
- name: stop ceph nfss
shell: "service nfs-ganesha status ; if [ $? == 0 ] ; then service nfs-ganesha stop ; else echo ; fi"
when:
ansible_os_family == 'RedHat'
# Ubuntu 14.04
- name: stop ceph nfss on ubuntu
command: initctl stop nfs-ganesha
failed_when: false
when:
ansible_distribution == 'Ubuntu'
- name: purge ceph osd cluster
vars:
osd_group_name: osds
# When set to true and raw _multi_journal is used then block devices are also zapped
zap_block_devs: true
hosts:
- "{{ osd_group_name }}"
gather_facts: false # Already gathered previously
become: true
handlers:
- name: restart machine
shell: sleep 2 && shutdown -r now "Ansible updates triggered"
async: 1
poll: 0
ignore_errors: true
- name: wait for server to boot
become: false
local_action: wait_for port=22 host={{ inventory_hostname }} state=started delay=10 timeout=500
- name: remove data
file:
path: /var/lib/ceph
state: absent
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-osd/defaults/main.yml
- include_vars: group_vars/all
failed_when: false
- include_vars: group_vars/{{ osd_group_name }}
failed_when: false
- name: check for a device list
fail:
msg: "OSD automatic discovery was detected, purge cluster does not support this scenario. If you want to purge the cluster, manually provide the list of devices in group_vars/{{ osd_group_name }} using the devices variable."
when:
devices|length == 0 and
osd_auto_discovery
- name: get osd numbers
shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | cut -d '-' -f 2 ; fi"
register: osd_ids
changed_when: false
- name: stop ceph.target with systemd
service:
name: ceph.target
state: stopped
enabled: no
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0"
- name: stop ceph-osd with systemd
service:
name: ceph-osd@{{item}}
state: stopped
enabled: no
with_items: "{{ osd_ids.stdout_lines }}"
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0"
# before infernalis release, using sysvinit scripts
# we use this test so we do not have to know which RPM contains the boot script
# or where it is placed.
- name: stop ceph osds
shell: "service ceph status osd ; if [ $? == 0 ] ; then service ceph stop osd ; else echo ; fi"
when:
ansible_os_family == 'RedHat'
# Ubuntu 14.04
- name: stop ceph osds on ubuntu
shell: |
for id in $(ls /var/lib/ceph/osd/ |grep -oh '[0-9]*'); do
initctl stop ceph-osd cluster={{ cluster }} id=$id
done
failed_when: false
when:
ansible_distribution == 'Ubuntu'
with_items: "{{ osd_ids.stdout_lines }}"
2016-04-07 03:58:17 +08:00
- name: see if ceph-disk-created data partitions are present
shell: |
ls /dev/disk/by-partlabel | grep -q "ceph.*.data"
2016-04-07 03:58:17 +08:00
failed_when: false
register: ceph_data_partlabels
- name: see if ceph-disk-created journal partitions are present
shell: |
ls /dev/disk/by-partlabel | grep -q "ceph.*.journal"
2016-04-07 03:58:17 +08:00
failed_when: false
register: ceph_journal_partlabels
- name: get ceph journal partitions
shell: |
blkid | awk '/ceph journal/ { sub (":", "", $1); print $1 }'
when:
- ceph_journal_partlabels.rc == 0
failed_when: false
register: ceph_journal_partition_to_erase_path
- name: get osd data mount points
shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'"
register: mounted_osd
changed_when: false
- name: drop all cache
shell: "sync && sleep 1 && echo 3 > /proc/sys/vm/drop_caches"
- name: umount osd data partition
shell: umount {{ item }}
with_items:
- "{{ mounted_osd.stdout_lines }}"
- name: remove osd mountpoint tree
file:
path: /var/lib/ceph/osd/
state: absent
register: remove_osd_mountpoints
ignore_errors: true
- name: is reboot needed
local_action: shell echo requesting reboot
become: false
notify:
- restart machine
- wait for server to boot
- remove data
when:
remove_osd_mountpoints.failed is defined
2016-04-07 03:58:17 +08:00
- name: see if ceph-disk is installed
shell: "which ceph-disk"
failed_when: false
register: ceph_disk_present
- name: zap osd disks
shell: ceph-disk zap "{{ item }}"
with_items: "{{ devices }}"
when:
2016-04-07 03:58:17 +08:00
ceph_disk_present.rc == 0 and
ceph_data_partlabels.rc == 0 and
zap_block_devs
- name: zap ceph journal partitions
shell: |
# if the disk passed is a raw device AND the boot system disk
if echo "{{ item }}" | egrep -sq '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p){1,2}$' && parted -s $(echo "{{ item }}" | egrep -o '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p){1,2}') print | grep -sq boot; then
echo "Looks like {{ item }} has a boot partition,"
echo "if you want to delete specific partitions point to the partition instead of the raw device"
echo "Do not use your system disk!"
exit 1
fi
raw_device=$(echo "{{ item }}" | egrep -o '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p){1,2}')
partition_nb=$(echo "{{ item }}" | egrep -o '[0-9]{1,2}$')
sgdisk --delete $partition_nb $raw_device
with_items: "{{ceph_journal_partition_to_erase_path.stdout_lines}}"
when:
2016-04-07 03:58:17 +08:00
ceph_journal_partlabels.rc == 0 and
zap_block_devs
- name: purge ceph mon cluster
vars:
mon_group_name: mons
restapi_group_name: restapis
hosts:
- "{{ mon_group_name }}"
gather_facts: false # Already gathered previously
become: true
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-mon/defaults/main.yml
- include_vars: roles/ceph-restapi/defaults/main.yml
- include_vars: group_vars/all
failed_when: false
- include_vars: group_vars/{{ mon_group_name }}
failed_when: false
- include_vars: group_vars/{{ restapi_group_name }}
failed_when: false
- name: stop ceph.target with systemd
service:
name: ceph.target
state: stopped
enabled: no
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0"
- name: stop ceph mons with systemd
service:
name: ceph-mon@{{ ansible_hostname }}
state: stopped
enabled: no
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0"
- name: stop ceph mons
shell: "service ceph status mon ; if [ $? == 0 ] ; then service ceph stop mon ; else echo ; fi"
when:
ansible_os_family == 'RedHat'
- name: stop ceph mons on ubuntu
command: initctl stop ceph-mon cluster={{ cluster }} id={{ ansible_hostname }}
failed_when: false
when:
ansible_distribution == 'Ubuntu'
- name: remove monitor store and bootstrap keys
file:
path: /var/lib/ceph/
state: absent
- name: final cleanup - check any running ceph, purge ceph packages, purge config and remove data
vars:
mon_group_name: mons
osd_group_name: osds
mds_group_name: mdss
rgw_group_name: rgws
rbdmirror_group_name: rbd-mirrors
nfs_group_name: nfss
# When set to true both groups of packages are purged.
# This can cause problem with qemu-kvm
purge_all_packages: true
ceph_packages:
- ceph
- ceph-common
- ceph-fs-common
- ceph-fuse
- ceph-mds
- ceph-release
- ceph-radosgw
ceph_remaining_packages:
- libcephfs1
- librados2
- libradosstriper1
- librbd1
- python-cephfs
- python-rados
- python-rbd
hosts:
- "{{ mon_group_name }}"
- "{{ osd_group_name }}"
- "{{ mds_group_name }}"
- "{{ rgw_group_name }}"
- "{{ rbdmirror_group_name }}"
- "{{ nfs_group_name }}"
gather_facts: false # Already gathered previously
become: true
handlers:
- name: remove data
file:
path: /var/lib/ceph
state: absent
tasks:
- name: check for anything running ceph
shell: "ps awux | grep -- /usr/bin/[c]eph-"
register: check_for_running_ceph
failed_when: check_for_running_ceph.rc == 0
- name: purge ceph packages with yum
yum:
name: "{{ item }}"
state: absent
with_items:
- "{{ ceph_packages }}"
when:
ansible_pkg_mgr == 'yum'
- name: purge ceph packages with dnf
dnf:
name: "{{ item }}"
state: absent
with_items:
- "{{ ceph_packages }}"
when:
ansible_pkg_mgr == 'dnf'
- name: purge ceph packages with apt
apt:
name: "{{ item }}"
state: absent
with_items:
- "{{ ceph_packages }}"
when:
ansible_pkg_mgr == 'apt'
- name: purge remaining ceph packages with yum
yum:
name: "{{ item }}"
state: absent
with_items:
- "{{ ceph_remaining_packages }}"
when:
ansible_pkg_mgr == 'yum' and
purge_all_packages == true
- name: purge remaining ceph packages with dnf
dnf:
name: "{{ item }}"
state: absent
with_items:
- "{{ ceph_remaining_packages }}"
when:
ansible_pkg_mgr == 'dnf' and
purge_all_packages == true
- name: purge remaining ceph packages with apt
apt:
name: "{{ item }}"
state: absent
with_items:
- "{{ ceph_remaining_packages }}"
when:
ansible_pkg_mgr == 'apt' and
purge_all_packages == true
- name: remove config
file:
path: /etc/ceph
state: absent
- name: remove logs
file:
path: /var/log/ceph
state: absent
- name: remove from SysV
shell: "update-rc.d -f ceph remove"
when:
ansible_distribution == 'Ubuntu'
- name: remove Upstart and SysV files
shell: "find /etc -name '*ceph*' -delete"
when:
ansible_distribution == 'Ubuntu'
- name: remove Upstart and apt logs and cache
shell: "find /var -name '*ceph*' -delete"
when:
ansible_distribution == 'Ubuntu'
- name: request data removal
local_action: shell echo requesting data removal
become: false
notify:
- remove data
- name: purge dnf cache
command: dnf clean all
when:
ansible_pkg_mgr == 'dnf'
- name: purge RPM cache in /tmp
2016-04-28 01:36:32 +08:00
file:
path: /tmp/rh-storage-repo
state: absent
- name: clean apt
shell: apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
when:
ansible_pkg_mgr == 'apt'
- name: purge rh_storage.repo file in /etc/yum.repos.d
file:
path: /etc/yum.repos.d/rh_storage.repo
state: absent
when:
ansible_os_family == 'RedHat'
- name: purge fetch directory
vars:
mon_group_name: mons
osd_group_name: osds
mds_group_name: mdss
rgw_group_name: rgws
rbdmirror_group_name: rbdmirrors
nfs_group_name: nfss
restapi_group_name: restapis
hosts:
- localhost
gather_facts: false
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: group_vars/all
failed_when: false
- include_vars: group_vars/{{ mds_group_name }}
failed_when: false
- include_vars: group_vars/{{ rgw_group_name }}
failed_when: false
- include_vars: group_vars/{{ rbdmirror_group_name }}
failed_when: false
- include_vars: group_vars/{{ nfs_group_name }}
failed_when: false
- include_vars: group_vars/{{ osd_group_name }}
failed_when: false
- include_vars: group_vars/{{ mon_group_name }}
failed_when: false
- include_vars: group_vars/{{ restapi_group_name }}
failed_when: false
- name: purge fetch directory for localhost
file:
path: "{{ fetch_directory }}"
state: absent