mirror of https://github.com/ceph/ceph-ansible.git
424 lines
11 KiB
YAML
424 lines
11 KiB
YAML
---
|
|
# This playbook purges Ceph
|
|
# It removes: packages, configuration files and ALL THE DATA
|
|
#
|
|
# Use it like this:
|
|
# ansible-playbook purge-cluster.yml
|
|
# Prompts for confirmation to purge, defaults to no and
|
|
# doesn't purge the cluster. yes purges the cluster.
|
|
#
|
|
# ansible-playbook -e ireallymeanit=yes|no purge-cluster.yml
|
|
# Overrides the prompt using -e option. Can be used in
|
|
# automation scripts to avoid interactive prompt.
|
|
|
|
- name: confirm whether user really meant to purge the cluster
|
|
hosts: localhost
|
|
|
|
vars_prompt:
|
|
- name: ireallymeanit
|
|
prompt: Are you sure you want to purge the cluster?
|
|
default: 'no'
|
|
private: no
|
|
|
|
tasks:
|
|
- name: exit playbook, if user did not mean to purge cluster
|
|
fail:
|
|
msg: >
|
|
"Exiting purge-cluster playbook, cluster was NOT purged.
|
|
To purge the cluster, either say 'yes' on the prompt or
|
|
or use `-e ireallymeanit=yes` on the command line when
|
|
invoking the playbook"
|
|
when: ireallymeanit != 'yes'
|
|
|
|
- name: stop ceph cluster
|
|
hosts:
|
|
- mons
|
|
- osds
|
|
- mdss
|
|
- rgws
|
|
|
|
become: yes
|
|
|
|
vars:
|
|
osd_group_name: osds
|
|
mon_group_name: mons
|
|
rgw_group_name: rgws
|
|
mds_group_name: mdss
|
|
rbdmirror_group_name: rbdmirrors
|
|
|
|
# When set to true both groups of packages are purged.
|
|
# This can cause problem with qemu-kvm
|
|
purge_all_packages: true
|
|
|
|
# When set to true and raw _multi_journal is used then block devices are also zapped
|
|
zap_block_devs: true
|
|
|
|
ceph_packages:
|
|
- ceph
|
|
- ceph-common
|
|
- ceph-fs-common
|
|
- ceph-fuse
|
|
- ceph-mds
|
|
- ceph-release
|
|
- ceph-radosgw
|
|
|
|
ceph_remaining_packages:
|
|
- libcephfs1
|
|
- librados2
|
|
- libradosstriper1
|
|
- librbd1
|
|
- python-cephfs
|
|
- python-rados
|
|
- python-rbd
|
|
|
|
cluster: ceph # name of the cluster
|
|
monitor_name: "{{ ansible_hostname }}"
|
|
mds_name: "{{ ansible_hostname }}"
|
|
|
|
|
|
handlers:
|
|
- name: restart machine
|
|
shell: sleep 2 && shutdown -r now "Ansible updates triggered"
|
|
async: 1
|
|
poll: 0
|
|
ignore_errors: true
|
|
|
|
- name: wait for server to boot
|
|
local_action: wait_for port=22 host={{ inventory_hostname }} state=started delay=10 timeout=400
|
|
|
|
- name: remove data
|
|
file:
|
|
path: /var/lib/ceph
|
|
state: absent
|
|
|
|
tasks:
|
|
- name: check for a device list
|
|
fail:
|
|
msg: "OSD automatic discovery was detected, purge cluster does not support this scenario. If you want to purge the cluster, manually provide the list of devices in group_vars/osds using the devices variable."
|
|
when:
|
|
osd_group_name in group_names and
|
|
devices is not defined and
|
|
osd_auto_discovery
|
|
|
|
- name: get osd numbers
|
|
shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | cut -d '-' -f 2 ; fi"
|
|
register: osd_ids
|
|
changed_when: false
|
|
|
|
- name: are we using systemd
|
|
shell: "if [ -d /usr/lib/systemd ] ; then find /usr/lib/systemd/system -name 'ceph*' | wc -l ; else echo 0 ; fi"
|
|
register: systemd_unit_files
|
|
|
|
# after Hammer release
|
|
|
|
- name: stop ceph.target with systemd
|
|
service:
|
|
name: ceph.target
|
|
state: stopped
|
|
enabled: no
|
|
when:
|
|
ansible_os_family == 'RedHat' and
|
|
systemd_unit_files.stdout != "0"
|
|
|
|
- name: stop ceph-osd with systemd
|
|
service:
|
|
name: ceph-osd@{{item}}
|
|
state: stopped
|
|
enabled: no
|
|
with_items: "{{ osd_ids.stdout_lines }}"
|
|
when:
|
|
ansible_os_family == 'RedHat' and
|
|
systemd_unit_files.stdout != "0" and
|
|
osd_group_name in group_names
|
|
|
|
- name: stop ceph mons with systemd
|
|
service:
|
|
name: ceph-mon@{{ ansible_hostname }}
|
|
state: stopped
|
|
enabled: no
|
|
when:
|
|
ansible_os_family == 'RedHat' and
|
|
systemd_unit_files.stdout != "0" and
|
|
mon_group_name in group_names
|
|
|
|
- name: stop ceph mdss with systemd
|
|
service:
|
|
name: ceph-mds@{{ ansible_hostname }}
|
|
state: stopped
|
|
when:
|
|
ansible_os_family == 'RedHat' and
|
|
systemd_unit_files.stdout != "0" and
|
|
mds_group_name in group_names
|
|
|
|
- name: stop ceph rgws with systemd
|
|
service:
|
|
name: ceph-radosgw@rgw.{{ ansible_hostname }}
|
|
state: stopped
|
|
when:
|
|
ansible_os_family == 'RedHat' and
|
|
systemd_unit_files.stdout != "0" and
|
|
rgw_group_name in group_names
|
|
|
|
- name: stop ceph rbd mirror with systemd
|
|
service:
|
|
name: ceph-rbd-mirror@admin.service
|
|
state: stopped
|
|
when:
|
|
ansible_os_family == 'RedHat' and
|
|
systemd_unit_files.stdout != "0" and
|
|
rbdmirror_group_name in group_names
|
|
|
|
# before infernalis release, using sysvinit scripts
|
|
# we use this test so we do not have to know which RPM contains the boot script
|
|
# or where it is placed.
|
|
|
|
- name: stop ceph osds
|
|
shell: "service ceph status osd ; if [ $? == 0 ] ; then service ceph stop osd ; else echo ; fi"
|
|
when:
|
|
ansible_os_family == 'RedHat' and
|
|
osd_group_name in group_names
|
|
|
|
- name: stop ceph mons
|
|
shell: "service ceph status mon ; if [ $? == 0 ] ; then service ceph stop mon ; else echo ; fi"
|
|
when:
|
|
ansible_os_family == 'RedHat' and
|
|
mon_group_name in group_names
|
|
|
|
- name: stop ceph mdss
|
|
shell: "service ceph status mds ; if [ $? == 0 ] ; then service ceph stop mds ; else echo ; fi"
|
|
when:
|
|
ansible_os_family == 'RedHat' and
|
|
mds_group_name in group_names
|
|
|
|
- name: stop ceph rgws
|
|
shell: "service ceph-radosgw status ; if [ $? == 0 ] ; then service ceph-radosgw stop ; else echo ; fi"
|
|
when:
|
|
ansible_os_family == 'RedHat' and
|
|
rgw_group_name in group_names
|
|
|
|
# Ubuntu 14.04
|
|
- name: stop ceph osds on ubuntu
|
|
shell: |
|
|
for id in $(ls /var/lib/ceph/osd/ |grep -oh '[0-9]*'); do
|
|
initctl stop ceph-osd cluster={{ cluster }} id=$id
|
|
done
|
|
failed_when: false
|
|
when:
|
|
ansible_distribution == 'Ubuntu' and
|
|
osd_group_name in group_names
|
|
with_items: "{{ osd_ids.stdout_lines }}"
|
|
|
|
- name: stop ceph mons on ubuntu
|
|
command: initctl stop ceph-mon cluster={{ cluster }} id={{ monitor_name }}
|
|
failed_when: false
|
|
when:
|
|
ansible_distribution == 'Ubuntu' and
|
|
mon_group_name in group_names
|
|
|
|
- name: stop ceph mdss on ubuntu
|
|
command: initctl stop ceph-mds cluster={{ cluster }} id={{ mds_name }}
|
|
failed_when: false
|
|
when:
|
|
ansible_distribution == 'Ubuntu' and
|
|
mds_group_name in group_names
|
|
|
|
- name: stop ceph rgws on ubuntu
|
|
command: initctl stop radosgw cluster={{ cluster }} id={{ ansible_hostname }}
|
|
failed_when: false
|
|
when:
|
|
ansible_distribution == 'Ubuntu' and
|
|
rgw_group_name in group_names
|
|
|
|
- name: stop ceph rbd mirror on ubuntu
|
|
command: initctl stop ceph-rbd-mirorr cluster={{ cluster }} id=admin
|
|
failed_when: false
|
|
when:
|
|
ansible_distribution == 'Ubuntu' and
|
|
rbdmirror_group_name in group_names
|
|
|
|
- name: check for anything running ceph
|
|
shell: "ps awux | grep -v grep | grep -q -- ceph-"
|
|
register: check_for_running_ceph
|
|
failed_when: check_for_running_ceph.rc == 0
|
|
|
|
- name: see if ceph-disk-created data partitions are present
|
|
shell: "ls /dev/disk/by-partlabel | grep -q 'ceph\\\\x20data'"
|
|
failed_when: false
|
|
register: ceph_data_partlabels
|
|
|
|
- name: see if ceph-disk-created journal partitions are present
|
|
shell: "ls /dev/disk/by-partlabel | grep -q 'ceph\\\\x20journal'"
|
|
failed_when: false
|
|
register: ceph_journal_partlabels
|
|
|
|
- name: get osd data mount points
|
|
shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'"
|
|
register: mounted_osd
|
|
changed_when: false
|
|
|
|
- name: drop all cache
|
|
shell: "sync && sleep 1 && echo 3 > /proc/sys/vm/drop_caches"
|
|
when:
|
|
osd_group_name in group_names
|
|
|
|
- name: umount osd data partition
|
|
shell: umount {{ item }}
|
|
with_items:
|
|
- "{{ mounted_osd.stdout_lines }}"
|
|
when:
|
|
osd_group_name in group_names
|
|
|
|
- name: remove osd mountpoint tree
|
|
shell: rm -rf /var/lib/ceph/osd
|
|
register: remove_osd_mountpoints
|
|
failed_when: false
|
|
when:
|
|
osd_group_name in group_names
|
|
|
|
- name: remove monitor store and bootstrap keys
|
|
shell: rm -rf /var/lib/ceph/
|
|
failed_when: false
|
|
when:
|
|
mon_group_name in group_names
|
|
|
|
- name: is reboot needed
|
|
local_action: shell echo requesting reboot
|
|
notify:
|
|
- restart machine
|
|
- wait for server to boot
|
|
- remove data
|
|
when:
|
|
osd_group_name in group_names and
|
|
remove_osd_mountpoints.rc != 0
|
|
|
|
- name: see if ceph-disk is installed
|
|
shell: "which ceph-disk"
|
|
failed_when: false
|
|
register: ceph_disk_present
|
|
|
|
- name: zap osd disks
|
|
shell: ceph-disk zap "{{ item }}"
|
|
with_items: devices
|
|
when:
|
|
osd_group_name in group_names and
|
|
ceph_disk_present.rc == 0 and
|
|
ceph_data_partlabels.rc == 0 and
|
|
zap_block_devs
|
|
|
|
- name: zap journal devices
|
|
shell: ceph-disk zap "{{ item }}"
|
|
with_items: "{{ raw_journal_devices|default([])|unique }}"
|
|
when:
|
|
osd_group_name in group_names and
|
|
ceph_disk_present.rc == 0 and
|
|
ceph_journal_partlabels.rc == 0 and
|
|
zap_block_devs and
|
|
raw_multi_journal
|
|
|
|
- name: purge ceph packages with yum
|
|
yum:
|
|
name: "{{ item }}"
|
|
state: absent
|
|
with_items:
|
|
- "{{ ceph_packages }}"
|
|
when:
|
|
ansible_pkg_mgr == 'yum'
|
|
|
|
- name: purge ceph packages with dnf
|
|
dnf:
|
|
name: "{{ item }}"
|
|
state: absent
|
|
with_items:
|
|
- "{{ ceph_packages }}"
|
|
when:
|
|
ansible_pkg_mgr == 'dnf'
|
|
|
|
- name: purge ceph packages with apt
|
|
apt:
|
|
name: "{{ item }}"
|
|
state: absent
|
|
with_items:
|
|
- "{{ ceph_packages }}"
|
|
when:
|
|
ansible_pkg_mgr == 'apt'
|
|
|
|
- name: purge remaining ceph packages with yum
|
|
yum:
|
|
name: "{{ item }}"
|
|
state: absent
|
|
with_items:
|
|
- "{{ ceph_remaining_packages }}"
|
|
when:
|
|
ansible_pkg_mgr == 'yum' and
|
|
purge_all_packages == true
|
|
|
|
- name: purge remaining ceph packages with dnf
|
|
dnf:
|
|
name: "{{ item }}"
|
|
state: absent
|
|
with_items:
|
|
- "{{ ceph_remaining_packages }}"
|
|
when:
|
|
ansible_pkg_mgr == 'dnf' and
|
|
purge_all_packages == true
|
|
|
|
- name: purge remaining ceph packages with apt
|
|
apt:
|
|
name: "{{ item }}"
|
|
state: absent
|
|
with_items:
|
|
- "{{ ceph_remaining_packages }}"
|
|
when:
|
|
ansible_pkg_mgr == 'apt' and
|
|
purge_all_packages == true
|
|
|
|
- name: remove config
|
|
file:
|
|
path: /etc/ceph
|
|
state: absent
|
|
|
|
- name: remove logs
|
|
file:
|
|
path: /var/log/ceph
|
|
state: absent
|
|
|
|
- name: remove from SysV
|
|
shell: "update-rc.d -f ceph remove"
|
|
when:
|
|
ansible_distribution == 'Ubuntu'
|
|
|
|
- name: remove Upstart nad SysV files
|
|
shell: "find /etc -name '*ceph*' -delete"
|
|
when:
|
|
ansible_distribution == 'Ubuntu'
|
|
|
|
- name: remove Upstart and apt logs and cache
|
|
shell: "find /var -name '*ceph*' -delete"
|
|
when:
|
|
ansible_distribution == 'Ubuntu'
|
|
|
|
- name: request data removal
|
|
local_action: shell echo requesting data removal
|
|
notify:
|
|
- remove data
|
|
|
|
- name: purge yum cache
|
|
command: yum clean all
|
|
when:
|
|
ansible_pkg_mgr == 'yum'
|
|
|
|
- name: purge dnf cache
|
|
command: dnf clean all
|
|
when:
|
|
ansible_pkg_mgr == 'dnf'
|
|
|
|
- name: purge RPM cache in /tmp
|
|
file:
|
|
path: /tmp/rh-storage-repo
|
|
state: absent
|
|
|
|
- name: clean apt
|
|
shell: apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
|
when:
|
|
ansible_pkg_mgr == 'apt'
|