2014-04-11 22:34:33 +08:00
|
|
|
---
|
|
|
|
# This playbook purges Ceph
|
|
|
|
# It removes: packages, configuration files and ALL THE DATA
|
2016-03-24 19:28:01 +08:00
|
|
|
#
|
|
|
|
# Use it like this:
|
|
|
|
# ansible-playbook purge-cluster.yml
|
|
|
|
# Prompts for confirmation to purge, defaults to no and
|
|
|
|
# doesn't purge the cluster. yes purges the cluster.
|
|
|
|
#
|
|
|
|
# ansible-playbook -e ireallymeanit=yes|no purge-cluster.yml
|
|
|
|
# Overrides the prompt using -e option. Can be used in
|
|
|
|
# automation scripts to avoid interactive prompt.
|
|
|
|
|
|
|
|
- name: confirm whether user really meant to purge the cluster
|
|
|
|
hosts: localhost
|
2016-10-06 12:32:38 +08:00
|
|
|
gather_facts: false
|
2016-03-24 19:28:01 +08:00
|
|
|
|
|
|
|
vars_prompt:
|
|
|
|
- name: ireallymeanit
|
|
|
|
prompt: Are you sure you want to purge the cluster?
|
|
|
|
default: 'no'
|
|
|
|
private: no
|
|
|
|
|
|
|
|
tasks:
|
2016-03-29 21:37:31 +08:00
|
|
|
- name: exit playbook, if user did not mean to purge cluster
|
2016-03-24 19:28:01 +08:00
|
|
|
fail:
|
|
|
|
msg: >
|
|
|
|
"Exiting purge-cluster playbook, cluster was NOT purged.
|
|
|
|
To purge the cluster, either say 'yes' on the prompt or
|
|
|
|
or use `-e ireallymeanit=yes` on the command line when
|
|
|
|
invoking the playbook"
|
|
|
|
when: ireallymeanit != 'yes'
|
2014-04-11 22:34:33 +08:00
|
|
|
|
2017-02-08 01:57:38 +08:00
|
|
|
- name: gather facts on all hosts
|
2016-02-10 20:13:39 +08:00
|
|
|
|
2016-10-06 12:32:38 +08:00
|
|
|
hosts:
|
2017-03-08 00:16:09 +08:00
|
|
|
- "{{ mon_group_name|default('mons') }}"
|
|
|
|
- "{{ osd_group_name|default('osds') }}"
|
|
|
|
- "{{ mds_group_name|default('mdss') }}"
|
|
|
|
- "{{ rgw_group_name|default('rgws') }}"
|
|
|
|
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
|
|
|
|
- "{{ nfs_group_name|default('nfss') }}"
|
|
|
|
- "{{ client_group_name|default('clients') }}"
|
2019-03-05 15:44:25 +08:00
|
|
|
- "{{ mgr_group_name|default('mgrs') }}"
|
2018-12-06 02:59:47 +08:00
|
|
|
- grafana-server
|
2016-02-10 20:13:39 +08:00
|
|
|
|
2016-10-06 12:32:38 +08:00
|
|
|
become: true
|
2016-02-29 20:19:56 +08:00
|
|
|
|
2016-10-06 12:32:38 +08:00
|
|
|
tasks:
|
2017-02-08 01:57:38 +08:00
|
|
|
- debug: msg="gather facts on all Ceph hosts for following reference"
|
2016-02-10 20:13:39 +08:00
|
|
|
|
2019-06-21 22:10:16 +08:00
|
|
|
- name: check there's no ceph kernel threads present
|
|
|
|
hosts: "{{ client_group_name|default('clients') }}"
|
|
|
|
become: true
|
|
|
|
any_errors_fatal: true
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- import_role:
|
|
|
|
name: ceph-defaults
|
|
|
|
|
|
|
|
- block:
|
|
|
|
- name: get nfs nodes ansible facts
|
|
|
|
setup:
|
2020-06-30 22:13:42 +08:00
|
|
|
gather_subset:
|
|
|
|
- 'all'
|
|
|
|
- '!facter'
|
|
|
|
- '!ohai'
|
2019-06-21 22:10:16 +08:00
|
|
|
delegate_to: "{{ item }}"
|
|
|
|
delegate_facts: True
|
|
|
|
with_items: "{{ groups[nfs_group_name] }}"
|
|
|
|
run_once: true
|
|
|
|
|
|
|
|
- name: get all nfs-ganesha mount points
|
2019-08-15 01:14:09 +08:00
|
|
|
command: grep "{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
|
2019-06-21 22:10:16 +08:00
|
|
|
register: nfs_ganesha_mount_points
|
|
|
|
failed_when: false
|
|
|
|
with_items: "{{ groups[nfs_group_name] }}"
|
|
|
|
|
|
|
|
- name: ensure nfs-ganesha mountpoint(s) are unmounted
|
|
|
|
mount:
|
|
|
|
path: "{{ item.split(' ')[1] }}"
|
|
|
|
state: unmounted
|
|
|
|
with_items:
|
|
|
|
- "{{ nfs_ganesha_mount_points.results | map(attribute='stdout_lines') | list }}"
|
|
|
|
when: item | length > 0
|
|
|
|
when: groups[nfs_group_name] | default([]) | length > 0
|
|
|
|
|
|
|
|
- name: ensure cephfs mountpoint(s) are unmounted
|
|
|
|
command: umount -a -t ceph
|
2020-10-02 18:55:53 +08:00
|
|
|
changed_when: false
|
2019-06-21 22:10:16 +08:00
|
|
|
|
2020-08-04 23:29:41 +08:00
|
|
|
- name: find mapped rbd ids
|
|
|
|
find:
|
|
|
|
paths: /sys/bus/rbd/devices
|
|
|
|
file_type: any
|
|
|
|
register: rbd_mapped_ids
|
|
|
|
|
|
|
|
- name: use sysfs to unmap rbd devices
|
|
|
|
shell: "echo {{ item.path | basename }} > /sys/bus/rbd/remove_single_major"
|
2020-10-02 18:55:53 +08:00
|
|
|
changed_when: false
|
2020-08-04 23:29:41 +08:00
|
|
|
with_items: "{{ rbd_mapped_ids.files }}"
|
2019-06-21 22:10:16 +08:00
|
|
|
|
|
|
|
- name: unload ceph kernel modules
|
|
|
|
modprobe:
|
|
|
|
name: "{{ item }}"
|
|
|
|
state: absent
|
|
|
|
with_items:
|
|
|
|
- rbd
|
|
|
|
- ceph
|
|
|
|
- libceph
|
|
|
|
|
|
|
|
- name: purge ceph nfs cluster
|
|
|
|
|
|
|
|
vars:
|
|
|
|
nfs_group_name: nfss
|
|
|
|
|
|
|
|
hosts: "{{ nfs_group_name|default('nfss') }}"
|
|
|
|
|
|
|
|
gather_facts: false # Already gathered previously
|
|
|
|
|
|
|
|
become: true
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
|
|
|
|
- name: stop ceph nfss with systemd
|
|
|
|
service:
|
|
|
|
name: nfs-ganesha
|
|
|
|
state: stopped
|
|
|
|
failed_when: false
|
|
|
|
when: ansible_service_mgr == 'systemd'
|
2018-12-06 02:59:47 +08:00
|
|
|
|
|
|
|
- name: purge node-exporter
|
|
|
|
hosts:
|
|
|
|
- "{{ mon_group_name|default('mons') }}"
|
|
|
|
- "{{ osd_group_name|default('osds') }}"
|
|
|
|
- "{{ mds_group_name|default('mdss') }}"
|
|
|
|
- "{{ rgw_group_name|default('rgws') }}"
|
|
|
|
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
|
|
|
|
- "{{ nfs_group_name|default('nfss') }}"
|
|
|
|
- "{{ client_group_name|default('clients') }}"
|
2019-07-19 02:57:46 +08:00
|
|
|
- "{{ mgr_group_name|default('mgrs') }}"
|
2018-12-06 02:59:47 +08:00
|
|
|
- grafana-server
|
|
|
|
- clients
|
|
|
|
- iscsigws
|
|
|
|
|
|
|
|
become: true
|
|
|
|
|
|
|
|
tasks:
|
2019-12-04 23:10:08 +08:00
|
|
|
- import_role:
|
|
|
|
name: ceph-defaults
|
2018-12-06 02:59:47 +08:00
|
|
|
|
2020-01-05 15:31:46 +08:00
|
|
|
- block:
|
|
|
|
- import_role:
|
|
|
|
name: ceph-facts
|
|
|
|
tasks_from: container_binary
|
|
|
|
|
|
|
|
- name: disable node_exporter service
|
|
|
|
service:
|
|
|
|
name: node_exporter
|
|
|
|
state: stopped
|
|
|
|
enabled: no
|
|
|
|
failed_when: false
|
|
|
|
|
|
|
|
- name: remove node_exporter service file
|
|
|
|
file:
|
|
|
|
name: /etc/systemd/system/node_exporter.service
|
|
|
|
state: absent
|
|
|
|
|
|
|
|
- name: remove node-exporter image
|
|
|
|
command: "{{ container_binary }} rmi {{ node_exporter_container_image }}"
|
|
|
|
failed_when: false
|
|
|
|
tags:
|
|
|
|
- remove_img
|
|
|
|
when: dashboard_enabled | bool
|
2018-12-06 02:59:47 +08:00
|
|
|
|
|
|
|
|
|
|
|
- name: purge ceph grafana-server
|
|
|
|
hosts: grafana-server
|
|
|
|
become: true
|
|
|
|
vars:
|
|
|
|
grafana_services:
|
|
|
|
- grafana-server
|
|
|
|
- prometheus
|
|
|
|
- alertmanager
|
|
|
|
|
|
|
|
tasks:
|
2019-12-04 23:10:08 +08:00
|
|
|
- import_role:
|
|
|
|
name: ceph-defaults
|
2018-12-06 02:59:47 +08:00
|
|
|
|
2020-01-05 15:31:46 +08:00
|
|
|
- block:
|
|
|
|
- import_role:
|
|
|
|
name: ceph-facts
|
|
|
|
tasks_from: container_binary
|
|
|
|
|
|
|
|
- name: stop services
|
|
|
|
service:
|
|
|
|
name: "{{ item }}"
|
|
|
|
state: stopped
|
|
|
|
enabled: no
|
|
|
|
with_items: "{{ grafana_services }}"
|
|
|
|
failed_when: false
|
2018-12-06 02:59:47 +08:00
|
|
|
|
2020-01-05 15:31:46 +08:00
|
|
|
- name: remove service files
|
|
|
|
file:
|
|
|
|
name: "/etc/systemd/system/{{ item }}.service"
|
|
|
|
state: absent
|
|
|
|
with_items: "{{ grafana_services }}"
|
|
|
|
failed_when: false
|
|
|
|
|
|
|
|
- name: remove ceph dashboard container images
|
|
|
|
command: "{{ container_binary }} rmi {{ item }}"
|
|
|
|
with_items:
|
|
|
|
- "{{ prometheus_container_image }}"
|
|
|
|
- "{{ grafana_container_image }}"
|
|
|
|
- "{{ alertmanager_container_image }}"
|
|
|
|
failed_when: false
|
|
|
|
tags:
|
|
|
|
- remove_img
|
|
|
|
|
|
|
|
- name: remove data
|
|
|
|
file:
|
|
|
|
name: "{{ item }}"
|
|
|
|
state: absent
|
|
|
|
with_items:
|
|
|
|
- /etc/grafana/dashboards
|
|
|
|
- /etc/grafana/grafana.ini
|
|
|
|
- /etc/grafana/provisioning
|
|
|
|
- /var/lib/grafana
|
|
|
|
- /etc/alertmanager
|
|
|
|
- /var/lib/alertmanager
|
|
|
|
- /var/lib/prometheus
|
|
|
|
- /etc/prometheus
|
|
|
|
failed_when: false
|
|
|
|
when: dashboard_enabled | bool
|
2018-12-06 02:59:47 +08:00
|
|
|
|
|
|
|
|
2016-10-06 12:32:38 +08:00
|
|
|
- name: purge ceph mds cluster
|
2016-03-29 21:37:31 +08:00
|
|
|
|
2016-10-06 12:32:38 +08:00
|
|
|
vars:
|
|
|
|
mds_group_name: mdss
|
2014-04-11 22:34:33 +08:00
|
|
|
|
2019-04-01 23:46:15 +08:00
|
|
|
hosts: "{{ mds_group_name|default('mdss') }}"
|
2016-03-23 01:29:00 +08:00
|
|
|
|
2016-10-06 12:32:38 +08:00
|
|
|
gather_facts: false # Already gathered previously
|
2016-03-23 01:29:00 +08:00
|
|
|
|
2016-10-06 12:32:38 +08:00
|
|
|
become: true
|
2016-03-23 01:29:00 +08:00
|
|
|
|
2015-10-21 06:32:42 +08:00
|
|
|
tasks:
|
2016-04-07 03:58:17 +08:00
|
|
|
|
2016-10-06 12:32:38 +08:00
|
|
|
- name: stop ceph mdss with systemd
|
2016-02-10 20:13:39 +08:00
|
|
|
service:
|
2016-10-06 12:32:38 +08:00
|
|
|
name: ceph-mds@{{ ansible_hostname }}
|
2016-02-10 20:13:39 +08:00
|
|
|
state: stopped
|
|
|
|
enabled: no
|
2017-10-07 04:52:53 +08:00
|
|
|
failed_when: false
|
2015-10-21 06:32:42 +08:00
|
|
|
|
2016-10-06 12:32:38 +08:00
|
|
|
|
2017-09-24 00:02:49 +08:00
|
|
|
- name: purge ceph mgr cluster
|
|
|
|
|
|
|
|
vars:
|
|
|
|
mgr_group_name: mgrs
|
|
|
|
|
2019-04-01 23:46:15 +08:00
|
|
|
hosts: "{{ mgr_group_name|default('mgrs') }}"
|
2017-09-24 00:02:49 +08:00
|
|
|
|
|
|
|
gather_facts: false # Already gathered previously
|
|
|
|
|
|
|
|
become: true
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
|
|
|
|
- name: stop ceph mgrs with systemd
|
|
|
|
service:
|
2020-10-02 19:05:01 +08:00
|
|
|
name: ceph-mgr@{{ ansible_hostname }}
|
2017-09-24 00:02:49 +08:00
|
|
|
state: stopped
|
|
|
|
enabled: no
|
2017-10-07 04:52:53 +08:00
|
|
|
failed_when: false
|
2017-09-24 00:02:49 +08:00
|
|
|
when: ansible_service_mgr == 'systemd'
|
|
|
|
|
2019-03-01 15:51:43 +08:00
|
|
|
- name: purge rgwloadbalancer cluster
|
|
|
|
|
|
|
|
vars:
|
|
|
|
rgwloadbalancer_group_name: rgwloadbalancers
|
|
|
|
|
|
|
|
hosts:
|
|
|
|
- "{{ rgwloadbalancer_group_name|default('rgwloadbalancers') }}"
|
|
|
|
|
|
|
|
gather_facts: false # Already gathered previously
|
|
|
|
|
|
|
|
become: true
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
|
|
|
|
- name: stop rgwloadbalancer services
|
|
|
|
service:
|
|
|
|
name: ['keepalived', 'haproxy']
|
|
|
|
state: stopped
|
|
|
|
enabled: no
|
|
|
|
failed_when: false
|
2018-12-04 05:58:19 +08:00
|
|
|
|
2016-10-06 12:32:38 +08:00
|
|
|
- name: purge ceph rgw cluster
|
|
|
|
|
|
|
|
vars:
|
|
|
|
rgw_group_name: rgws
|
|
|
|
|
2019-04-01 23:46:15 +08:00
|
|
|
hosts: "{{ rgw_group_name|default('rgws') }}"
|
2016-10-06 12:32:38 +08:00
|
|
|
|
|
|
|
gather_facts: false # Already gathered previously
|
|
|
|
|
|
|
|
become: true
|
|
|
|
|
|
|
|
tasks:
|
2020-01-31 23:42:10 +08:00
|
|
|
- import_role:
|
|
|
|
name: ceph-defaults
|
2016-10-06 12:32:38 +08:00
|
|
|
|
2020-01-31 23:42:10 +08:00
|
|
|
- import_role:
|
|
|
|
name: ceph-facts
|
|
|
|
tasks_from: set_radosgw_address
|
|
|
|
|
|
|
|
- name: stop ceph rgws with systemd
|
|
|
|
service:
|
|
|
|
name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
|
|
|
|
state: stopped
|
|
|
|
enabled: no
|
|
|
|
failed_when: false
|
|
|
|
with_items: "{{ rgw_instances }}"
|
2016-10-06 12:32:38 +08:00
|
|
|
|
|
|
|
|
|
|
|
- name: purge ceph rbd-mirror cluster
|
|
|
|
|
|
|
|
vars:
|
2019-03-02 01:23:39 +08:00
|
|
|
rbdmirror_group_name: rbdmirrors
|
2016-10-06 12:32:38 +08:00
|
|
|
|
2019-04-01 23:46:15 +08:00
|
|
|
hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
|
2016-10-06 12:32:38 +08:00
|
|
|
|
|
|
|
gather_facts: false # Already gathered previously
|
|
|
|
|
|
|
|
become: true
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
|
2016-04-04 06:35:09 +08:00
|
|
|
- name: stop ceph rbd mirror with systemd
|
|
|
|
service:
|
2019-03-01 23:47:36 +08:00
|
|
|
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
|
2016-04-04 06:35:09 +08:00
|
|
|
state: stopped
|
2017-10-07 04:52:53 +08:00
|
|
|
failed_when: false
|
2014-08-14 20:08:52 +08:00
|
|
|
|
2014-04-11 22:34:33 +08:00
|
|
|
|
2016-10-06 12:32:38 +08:00
|
|
|
- name: purge ceph osd cluster
|
|
|
|
|
|
|
|
vars:
|
|
|
|
osd_group_name: osds
|
2017-10-26 20:18:38 +08:00
|
|
|
reboot_osd_node: False
|
2016-10-06 12:32:38 +08:00
|
|
|
|
2019-04-01 23:46:15 +08:00
|
|
|
hosts: "{{ osd_group_name|default('osds') }}"
|
2016-10-06 12:32:38 +08:00
|
|
|
|
|
|
|
gather_facts: false # Already gathered previously
|
|
|
|
|
|
|
|
become: true
|
|
|
|
|
|
|
|
handlers:
|
|
|
|
- name: restart machine
|
|
|
|
shell: sleep 2 && shutdown -r now "Ansible updates triggered"
|
|
|
|
async: 1
|
|
|
|
poll: 0
|
|
|
|
ignore_errors: true
|
|
|
|
|
|
|
|
- name: wait for server to boot
|
|
|
|
become: false
|
syntax: change local_action syntax
Use a nicer syntax for `local_action` tasks.
We used to have oneliner like this:
```
local_action: wait_for port=22 host={{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} state=started delay=10 timeout=500 }}
```
The usual syntax:
```
local_action:
module: wait_for
port: 22
host: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
state: started
delay: 10
timeout: 500
```
is nicer and kind of way to keep consistency regarding the whole
playbook.
This also fix a potential issue about missing quotation :
```
Traceback (most recent call last):
File "/tmp/ansible_wQtWsi/ansible_module_command.py", line 213, in <module>
main()
File "/tmp/ansible_wQtWsi/ansible_module_command.py", line 185, in main
rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None, data=stdin)
File "/tmp/ansible_wQtWsi/ansible_modlib.zip/ansible/module_utils/basic.py", line 2710, in run_command
File "/usr/lib64/python2.7/shlex.py", line 279, in split
return list(lex) File "/usr/lib64/python2.7/shlex.py", line 269, in next
token = self.get_token()
File "/usr/lib64/python2.7/shlex.py", line 96, in get_token
raw = self.read_token()
File "/usr/lib64/python2.7/shlex.py", line 172, in read_token
raise ValueError, "No closing quotation"
ValueError: No closing quotation
```
writing `local_action: shell echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf`
can cause trouble because it's complaining with missing quotes, this fix solves this issue.
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1510555
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
2018-01-31 16:23:28 +08:00
|
|
|
local_action:
|
|
|
|
module: wait_for
|
|
|
|
port: 22
|
|
|
|
host: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
|
|
|
|
state: started
|
|
|
|
delay: 10
|
|
|
|
timeout: 500
|
2016-10-06 12:32:38 +08:00
|
|
|
|
|
|
|
- name: remove data
|
2018-09-27 17:33:51 +08:00
|
|
|
shell: rm -rf /var/lib/ceph/*
|
2016-10-06 12:32:38 +08:00
|
|
|
|
|
|
|
tasks:
|
|
|
|
|
2020-08-06 15:46:12 +08:00
|
|
|
- import_role:
|
|
|
|
name: ceph-defaults
|
|
|
|
|
2017-09-27 04:14:29 +08:00
|
|
|
- name: default lvm_volumes if not defined
|
|
|
|
set_fact:
|
|
|
|
lvm_volumes: []
|
|
|
|
when: lvm_volumes is not defined
|
|
|
|
|
2016-10-06 12:32:38 +08:00
|
|
|
- name: get osd numbers
|
2020-10-02 17:23:42 +08:00
|
|
|
shell: |
|
|
|
|
set -o pipefail;
|
|
|
|
if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi
|
2016-10-06 12:32:38 +08:00
|
|
|
register: osd_ids
|
|
|
|
changed_when: false
|
|
|
|
|
|
|
|
- name: stop ceph-osd with systemd
|
|
|
|
service:
|
2020-10-02 19:05:01 +08:00
|
|
|
name: ceph-osd@{{ item }}
|
2016-10-06 12:32:38 +08:00
|
|
|
state: stopped
|
|
|
|
enabled: no
|
|
|
|
with_items: "{{ osd_ids.stdout_lines }}"
|
2017-02-08 01:57:38 +08:00
|
|
|
when: ansible_service_mgr == 'systemd'
|
2016-05-06 02:20:03 +08:00
|
|
|
|
2017-09-01 00:22:34 +08:00
|
|
|
- name: remove ceph udev rules
|
|
|
|
file:
|
|
|
|
path: "{{ item }}"
|
|
|
|
state: absent
|
|
|
|
with_items:
|
|
|
|
- /usr/lib/udev/rules.d/95-ceph-osd.rules
|
|
|
|
- /usr/lib/udev/rules.d/60-ceph-by-parttypeuuid.rules
|
|
|
|
|
2017-01-19 22:28:44 +08:00
|
|
|
# NOTE(leseb): hope someone will find a more elegant way one day...
|
|
|
|
- name: see if encrypted partitions are present
|
|
|
|
shell: |
|
2020-10-02 17:23:42 +08:00
|
|
|
set -o pipefail;
|
2017-01-19 22:28:44 +08:00
|
|
|
blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2
|
|
|
|
register: encrypted_ceph_partuuid
|
2020-10-02 17:23:42 +08:00
|
|
|
failed_when: false
|
2020-10-02 18:55:53 +08:00
|
|
|
changed_when: false
|
2017-01-19 22:28:44 +08:00
|
|
|
|
2017-09-01 00:22:34 +08:00
|
|
|
- name: get osd data and lockbox mount points
|
2020-10-02 17:23:42 +08:00
|
|
|
shell: |
|
|
|
|
set -o pipefail;
|
|
|
|
(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'
|
2016-02-10 20:13:39 +08:00
|
|
|
register: mounted_osd
|
|
|
|
changed_when: false
|
2016-03-23 01:29:00 +08:00
|
|
|
|
|
|
|
- name: drop all cache
|
|
|
|
shell: "sync && sleep 1 && echo 3 > /proc/sys/vm/drop_caches"
|
2020-10-02 18:55:53 +08:00
|
|
|
changed_when: false
|
2016-02-10 20:13:39 +08:00
|
|
|
|
|
|
|
- name: umount osd data partition
|
2020-10-02 18:55:53 +08:00
|
|
|
mount:
|
|
|
|
path: "{{ item }}"
|
|
|
|
state: unmounted
|
2017-01-18 17:53:21 +08:00
|
|
|
with_items: "{{ mounted_osd.stdout_lines }}"
|
2016-02-10 20:13:39 +08:00
|
|
|
|
2016-03-23 01:29:00 +08:00
|
|
|
- name: remove osd mountpoint tree
|
2016-08-10 10:53:07 +08:00
|
|
|
file:
|
|
|
|
path: /var/lib/ceph/osd/
|
|
|
|
state: absent
|
2016-03-23 01:29:00 +08:00
|
|
|
register: remove_osd_mountpoints
|
2016-08-10 10:53:07 +08:00
|
|
|
ignore_errors: true
|
2016-03-24 20:49:26 +08:00
|
|
|
|
2016-03-23 01:29:00 +08:00
|
|
|
- name: is reboot needed
|
2018-01-31 16:31:11 +08:00
|
|
|
local_action:
|
|
|
|
module: command
|
|
|
|
echo requesting reboot
|
2016-09-03 07:31:59 +08:00
|
|
|
become: false
|
2016-03-23 01:29:00 +08:00
|
|
|
notify:
|
|
|
|
- restart machine
|
|
|
|
- wait for server to boot
|
|
|
|
- remove data
|
2017-10-26 20:18:38 +08:00
|
|
|
when:
|
2019-05-22 16:02:42 +08:00
|
|
|
- reboot_osd_node | bool
|
2017-10-26 20:18:38 +08:00
|
|
|
- remove_osd_mountpoints.failed is defined
|
2016-03-23 01:29:00 +08:00
|
|
|
|
2018-05-18 23:56:03 +08:00
|
|
|
- name: wipe table on dm-crypt devices
|
|
|
|
command: dmsetup wipe_table --force "{{ item }}"
|
|
|
|
with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
|
2018-10-30 03:28:29 +08:00
|
|
|
when: encrypted_ceph_partuuid.stdout_lines | length > 0
|
2018-05-18 23:56:03 +08:00
|
|
|
|
2017-01-19 22:28:44 +08:00
|
|
|
- name: delete dm-crypt devices if any
|
2017-09-01 00:22:34 +08:00
|
|
|
command: dmsetup remove --retry --force {{ item }}
|
2017-01-27 01:28:30 +08:00
|
|
|
with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
|
2018-10-30 03:28:29 +08:00
|
|
|
when: encrypted_ceph_partuuid.stdout_lines | length > 0
|
2017-01-19 22:28:44 +08:00
|
|
|
|
2018-05-18 23:56:03 +08:00
|
|
|
- name: get payload_offset
|
2020-10-02 17:23:42 +08:00
|
|
|
shell: |
|
|
|
|
set -o pipefail;
|
|
|
|
cryptsetup luksDump /dev/disk/by-partuuid/{{ item }} | awk '/Payload offset:/ { print $3 }'
|
2018-05-18 23:56:03 +08:00
|
|
|
register: payload_offset
|
|
|
|
with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
|
2018-10-30 03:28:29 +08:00
|
|
|
when: encrypted_ceph_partuuid.stdout_lines | length > 0
|
2018-05-18 23:56:03 +08:00
|
|
|
|
|
|
|
- name: get physical sector size
|
|
|
|
command: blockdev --getpbsz /dev/disk/by-partuuid/{{ item }}
|
2020-10-02 18:55:53 +08:00
|
|
|
changed_when: false
|
2018-05-18 23:56:03 +08:00
|
|
|
with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
|
2018-10-30 03:28:29 +08:00
|
|
|
when: encrypted_ceph_partuuid.stdout_lines | length > 0
|
2018-05-18 23:56:03 +08:00
|
|
|
register: phys_sector_size
|
|
|
|
|
|
|
|
- name: wipe dmcrypt device
|
|
|
|
command: dd if=/dev/zero of=/dev/disk/by-partuuid/{{ item.0 }} bs={{ item.1.stdout }} count={{ item.2.stdout }} oflag=direct
|
2020-10-02 18:55:53 +08:00
|
|
|
changed_when: false
|
2018-05-18 23:56:03 +08:00
|
|
|
with_together:
|
|
|
|
- "{{ encrypted_ceph_partuuid.stdout_lines }}"
|
|
|
|
- "{{ payload_offset.results }}"
|
|
|
|
- "{{ phys_sector_size.results }}"
|
|
|
|
|
2017-08-29 05:27:01 +08:00
|
|
|
- name: get ceph data partitions
|
|
|
|
shell: |
|
2017-12-13 22:23:47 +08:00
|
|
|
blkid -o device -t PARTLABEL="ceph data"
|
2020-10-02 18:55:53 +08:00
|
|
|
changed_when: false
|
2017-08-29 05:27:01 +08:00
|
|
|
failed_when: false
|
|
|
|
register: ceph_data_partition_to_erase_path
|
|
|
|
|
2017-09-01 00:22:34 +08:00
|
|
|
- name: get ceph lockbox partitions
|
|
|
|
shell: |
|
2017-12-13 22:23:47 +08:00
|
|
|
blkid -o device -t PARTLABEL="ceph lockbox"
|
2020-10-02 18:55:53 +08:00
|
|
|
changed_when: false
|
2017-09-01 00:22:34 +08:00
|
|
|
failed_when: false
|
|
|
|
register: ceph_lockbox_partition_to_erase_path
|
|
|
|
|
2018-12-12 00:52:26 +08:00
|
|
|
- name: see if ceph-volume is installed
|
2020-10-02 18:55:53 +08:00
|
|
|
command: command -v ceph-volume
|
|
|
|
changed_when: false
|
2018-12-12 00:52:26 +08:00
|
|
|
failed_when: false
|
|
|
|
register: ceph_volume_present
|
|
|
|
|
2018-09-22 03:46:30 +08:00
|
|
|
- name: zap and destroy osds created by ceph-volume with lvm_volumes
|
2018-03-15 00:32:19 +08:00
|
|
|
ceph_volume:
|
|
|
|
data: "{{ item.data }}"
|
|
|
|
data_vg: "{{ item.data_vg|default(omit) }}"
|
|
|
|
journal: "{{ item.journal|default(omit) }}"
|
|
|
|
journal_vg: "{{ item.journal_vg|default(omit) }}"
|
|
|
|
db: "{{ item.db|default(omit) }}"
|
|
|
|
db_vg: "{{ item.db_vg|default(omit) }}"
|
|
|
|
wal: "{{ item.wal|default(omit) }}"
|
|
|
|
wal_vg: "{{ item.wal_vg|default(omit) }}"
|
2018-04-04 00:55:36 +08:00
|
|
|
action: "zap"
|
2018-03-15 00:32:19 +08:00
|
|
|
environment:
|
2020-08-06 00:02:48 +08:00
|
|
|
CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
|
2017-08-23 22:12:40 +08:00
|
|
|
with_items: "{{ lvm_volumes }}"
|
|
|
|
when:
|
2019-04-02 16:43:01 +08:00
|
|
|
- lvm_volumes | default([]) | length > 0
|
2018-12-12 00:52:26 +08:00
|
|
|
- ceph_volume_present.rc == 0
|
2017-08-23 22:12:40 +08:00
|
|
|
|
2018-09-22 03:46:30 +08:00
|
|
|
- name: zap and destroy osds created by ceph-volume with devices
|
|
|
|
ceph_volume:
|
|
|
|
data: "{{ item }}"
|
|
|
|
action: "zap"
|
|
|
|
environment:
|
2020-08-06 00:02:48 +08:00
|
|
|
CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
|
2018-09-22 03:46:30 +08:00
|
|
|
with_items: "{{ devices | default([]) }}"
|
|
|
|
when:
|
2019-04-02 16:43:01 +08:00
|
|
|
- devices | default([]) | length > 0
|
2018-12-12 00:52:26 +08:00
|
|
|
- ceph_volume_present.rc == 0
|
2018-09-22 03:46:30 +08:00
|
|
|
|
2017-09-29 14:04:17 +08:00
|
|
|
- name: get ceph block partitions
|
|
|
|
shell: |
|
2017-12-13 22:23:47 +08:00
|
|
|
blkid -o device -t PARTLABEL="ceph block"
|
2020-10-02 18:55:53 +08:00
|
|
|
changed_when: false
|
2017-09-29 14:04:17 +08:00
|
|
|
failed_when: false
|
|
|
|
register: ceph_block_partition_to_erase_path
|
|
|
|
|
2016-12-23 03:47:22 +08:00
|
|
|
- name: get ceph journal partitions
|
|
|
|
shell: |
|
2017-12-13 22:23:47 +08:00
|
|
|
blkid -o device -t PARTLABEL="ceph journal"
|
2020-10-02 18:55:53 +08:00
|
|
|
changed_when: false
|
2016-12-23 03:47:22 +08:00
|
|
|
failed_when: false
|
|
|
|
register: ceph_journal_partition_to_erase_path
|
|
|
|
|
2017-08-22 00:08:18 +08:00
|
|
|
- name: get ceph db partitions
|
|
|
|
shell: |
|
2017-12-13 22:23:47 +08:00
|
|
|
blkid -o device -t PARTLABEL="ceph block.db"
|
2020-10-02 18:55:53 +08:00
|
|
|
changed_when: false
|
2017-08-22 00:08:18 +08:00
|
|
|
failed_when: false
|
|
|
|
register: ceph_db_partition_to_erase_path
|
|
|
|
|
|
|
|
- name: get ceph wal partitions
|
|
|
|
shell: |
|
2017-12-13 22:23:47 +08:00
|
|
|
blkid -o device -t PARTLABEL="ceph block.wal"
|
2020-10-02 18:55:53 +08:00
|
|
|
changed_when: false
|
2017-08-22 00:08:18 +08:00
|
|
|
failed_when: false
|
|
|
|
register: ceph_wal_partition_to_erase_path
|
|
|
|
|
2017-12-13 22:24:33 +08:00
|
|
|
- name: set_fact combined_devices_list
|
|
|
|
set_fact:
|
|
|
|
combined_devices_list: "{{ ceph_data_partition_to_erase_path.stdout_lines +
|
|
|
|
ceph_lockbox_partition_to_erase_path.stdout_lines +
|
|
|
|
ceph_block_partition_to_erase_path.stdout_lines +
|
|
|
|
ceph_journal_partition_to_erase_path.stdout_lines +
|
|
|
|
ceph_db_partition_to_erase_path.stdout_lines +
|
|
|
|
ceph_wal_partition_to_erase_path.stdout_lines }}"
|
|
|
|
|
|
|
|
- name: resolve parent device
|
purge_cluster: fix bug when building device list
there is some leftover on devices when purging osds because of a invalid
device list construction.
typical error:
```
changed: [osd3] => (item=/dev/sda sda1) => {
"changed": true,
"cmd": "# if the disk passed is a raw device AND the boot system disk\n if parted -s \"/dev/sda sda1\" print | grep -sq boot; then\n echo \"Looks like /dev/sda sda1 has a boot partition,\"\n echo \"if you want to delete specific partitions point to the partition instead of the raw device\"\n echo \"Do not use your system disk!\"\n exit 1\n fi\n echo sgdisk -Z \"/dev/sda sda1\"\n echo dd if=/dev/zero of=\"/dev/sda sda1\" bs=1M count=200\n echo udevadm settle --timeout=600",
"delta": "0:00:00.015188",
"end": "2018-05-16 12:41:40.408597",
"item": "/dev/sda sda1",
"rc": 0,
"start": "2018-05-16 12:41:40.393409"
}
STDOUT:
sgdisk -Z /dev/sda sda1
dd if=/dev/zero of=/dev/sda sda1 bs=1M count=200
udevadm settle --timeout=600
STDERR:
Error: Could not stat device /dev/sda sda1 - No such file or directory.
```
the devices list in the task `resolve parent device` isn't built
properly because the command used to resolve the parent device doesn't
return the expected output
eg:
```
changed: [osd3] => (item=/dev/sda1) => {
"changed": true,
"cmd": "echo /dev/$(lsblk -no pkname \"/dev/sda1\")",
"delta": "0:00:00.013634",
"end": "2018-05-16 12:41:09.068166",
"item": "/dev/sda1",
"rc": 0,
"start": "2018-05-16 12:41:09.054532"
}
STDOUT:
/dev/sda sda1
```
For instance, it will result with a devices list like:
`['/dev/sda sda1', '/dev/sdb', '/dev/sdc sdc1']`
where we expect to have:
`['/dev/sda', '/dev/sdb', '/dev/sdc']`
Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1492242
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
2018-05-16 22:04:25 +08:00
|
|
|
command: lsblk --nodeps -no pkname "{{ item }}"
|
2017-12-13 22:24:33 +08:00
|
|
|
register: tmp_resolved_parent_device
|
2020-10-02 18:55:53 +08:00
|
|
|
changed_when: false
|
2019-04-01 23:46:15 +08:00
|
|
|
with_items: "{{ combined_devices_list }}"
|
2017-12-13 22:24:33 +08:00
|
|
|
|
|
|
|
- name: set_fact resolved_parent_device
|
|
|
|
set_fact:
|
|
|
|
resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
|
|
|
|
|
2018-05-16 23:34:38 +08:00
|
|
|
- name: wipe partitions
|
|
|
|
shell: |
|
2020-10-02 17:23:42 +08:00
|
|
|
set -o pipefail;
|
2018-05-16 23:34:38 +08:00
|
|
|
wipefs --all "{{ item }}"
|
2019-03-05 15:44:25 +08:00
|
|
|
dd if=/dev/zero of="{{ item }}" bs=1 count=4096
|
2020-10-02 18:55:53 +08:00
|
|
|
changed_when: false
|
2018-05-16 23:34:38 +08:00
|
|
|
with_items: "{{ combined_devices_list }}"
|
|
|
|
|
2017-09-01 00:22:34 +08:00
|
|
|
- name: zap ceph journal/block db/block wal partitions
|
2016-08-26 23:39:43 +08:00
|
|
|
shell: |
|
2020-10-02 17:23:42 +08:00
|
|
|
set -o pipefail;
|
2016-08-26 23:39:43 +08:00
|
|
|
# if the disk passed is a raw device AND the boot system disk
|
2018-05-16 23:34:38 +08:00
|
|
|
if parted -s /dev/"{{ item }}" print | grep -sq boot; then
|
purge_cluster: fix bug when building device list
there is some leftover on devices when purging osds because of a invalid
device list construction.
typical error:
```
changed: [osd3] => (item=/dev/sda sda1) => {
"changed": true,
"cmd": "# if the disk passed is a raw device AND the boot system disk\n if parted -s \"/dev/sda sda1\" print | grep -sq boot; then\n echo \"Looks like /dev/sda sda1 has a boot partition,\"\n echo \"if you want to delete specific partitions point to the partition instead of the raw device\"\n echo \"Do not use your system disk!\"\n exit 1\n fi\n echo sgdisk -Z \"/dev/sda sda1\"\n echo dd if=/dev/zero of=\"/dev/sda sda1\" bs=1M count=200\n echo udevadm settle --timeout=600",
"delta": "0:00:00.015188",
"end": "2018-05-16 12:41:40.408597",
"item": "/dev/sda sda1",
"rc": 0,
"start": "2018-05-16 12:41:40.393409"
}
STDOUT:
sgdisk -Z /dev/sda sda1
dd if=/dev/zero of=/dev/sda sda1 bs=1M count=200
udevadm settle --timeout=600
STDERR:
Error: Could not stat device /dev/sda sda1 - No such file or directory.
```
the devices list in the task `resolve parent device` isn't built
properly because the command used to resolve the parent device doesn't
return the expected output
eg:
```
changed: [osd3] => (item=/dev/sda1) => {
"changed": true,
"cmd": "echo /dev/$(lsblk -no pkname \"/dev/sda1\")",
"delta": "0:00:00.013634",
"end": "2018-05-16 12:41:09.068166",
"item": "/dev/sda1",
"rc": 0,
"start": "2018-05-16 12:41:09.054532"
}
STDOUT:
/dev/sda sda1
```
For instance, it will result with a devices list like:
`['/dev/sda sda1', '/dev/sdb', '/dev/sdc sdc1']`
where we expect to have:
`['/dev/sda', '/dev/sdb', '/dev/sdc']`
Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1492242
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
2018-05-16 22:04:25 +08:00
|
|
|
echo "Looks like /dev/{{ item }} has a boot partition,"
|
2016-08-26 23:39:43 +08:00
|
|
|
echo "if you want to delete specific partitions point to the partition instead of the raw device"
|
|
|
|
echo "Do not use your system disk!"
|
|
|
|
exit 1
|
|
|
|
fi
|
2018-05-16 23:34:38 +08:00
|
|
|
sgdisk -Z --clear --mbrtogpt -g -- /dev/"{{ item }}"
|
|
|
|
dd if=/dev/zero of=/dev/"{{ item }}" bs=1M count=200
|
|
|
|
parted -s /dev/"{{ item }}" mklabel gpt
|
|
|
|
partprobe /dev/"{{ item }}"
|
2017-12-13 22:24:33 +08:00
|
|
|
udevadm settle --timeout=600
|
2019-04-01 23:46:15 +08:00
|
|
|
with_items: "{{ resolved_parent_device }}"
|
2020-10-02 18:55:53 +08:00
|
|
|
changed_when: false
|
2016-10-06 12:32:38 +08:00
|
|
|
|
|
|
|
- name: purge ceph mon cluster
|
|
|
|
|
|
|
|
vars:
|
2016-11-01 19:39:21 +08:00
|
|
|
mon_group_name: mons
|
2016-10-06 12:32:38 +08:00
|
|
|
|
2019-04-01 23:46:15 +08:00
|
|
|
hosts: "{{ mon_group_name|default('mons') }}"
|
2016-10-06 12:32:38 +08:00
|
|
|
|
2017-08-05 02:18:11 +08:00
|
|
|
gather_facts: false # already gathered previously
|
2016-10-06 12:32:38 +08:00
|
|
|
|
|
|
|
become: true
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
|
|
|
|
- name: stop ceph mons with systemd
|
|
|
|
service:
|
2018-12-04 05:59:17 +08:00
|
|
|
name: "ceph-{{ item }}@{{ ansible_hostname }}"
|
2016-10-06 12:32:38 +08:00
|
|
|
state: stopped
|
|
|
|
enabled: no
|
2017-10-07 04:52:53 +08:00
|
|
|
failed_when: false
|
2018-12-04 05:59:17 +08:00
|
|
|
with_items:
|
|
|
|
- mon
|
|
|
|
- mgr
|
2016-10-06 12:32:38 +08:00
|
|
|
|
|
|
|
|
|
|
|
- name: remove monitor store and bootstrap keys
|
|
|
|
file:
|
2017-09-14 07:07:04 +08:00
|
|
|
path: "{{ item }}"
|
2016-10-06 12:32:38 +08:00
|
|
|
state: absent
|
2017-09-14 07:07:04 +08:00
|
|
|
with_items:
|
|
|
|
- /var/lib/ceph/mon
|
|
|
|
- /var/lib/ceph/bootstrap-mds
|
|
|
|
- /var/lib/ceph/bootstrap-osd
|
|
|
|
- /var/lib/ceph/bootstrap-rgw
|
|
|
|
- /var/lib/ceph/bootstrap-rbd
|
|
|
|
- /var/lib/ceph/bootstrap-mgr
|
|
|
|
- /var/lib/ceph/tmp
|
|
|
|
|
2020-07-03 16:21:49 +08:00
|
|
|
- name: purge ceph-crash daemons
|
|
|
|
hosts:
|
|
|
|
- "{{ mon_group_name | default('mons') }}"
|
|
|
|
- "{{ osd_group_name | default('osds') }}"
|
|
|
|
- "{{ mds_group_name | default('mdss') }}"
|
|
|
|
- "{{ rgw_group_name | default('rgws') }}"
|
|
|
|
- "{{ rbdmirror_group_name | default('rbdmirrors') }}"
|
|
|
|
- "{{ mgr_group_name | default('mgrs') }}"
|
|
|
|
gather_facts: false
|
|
|
|
become: true
|
|
|
|
tasks:
|
|
|
|
- name: stop ceph-crash service
|
|
|
|
service:
|
|
|
|
name: ceph-crash.service
|
|
|
|
state: stopped
|
|
|
|
enabled: no
|
|
|
|
failed_when: false
|
|
|
|
|
|
|
|
- name: remove /var/lib/ceph/crash
|
|
|
|
file:
|
|
|
|
path: /var/lib/ceph/crash
|
|
|
|
state: absent
|
|
|
|
|
2016-10-06 12:32:38 +08:00
|
|
|
|
|
|
|
- name: final cleanup - check any running ceph, purge ceph packages, purge config and remove data
|
|
|
|
|
|
|
|
vars:
|
2017-01-27 18:33:37 +08:00
|
|
|
# When set to true both groups of packages are purged.
|
|
|
|
# This can cause problem with qemu-kvm
|
2016-10-06 12:32:38 +08:00
|
|
|
purge_all_packages: true
|
|
|
|
|
|
|
|
ceph_packages:
|
|
|
|
- ceph
|
|
|
|
- ceph-common
|
|
|
|
- ceph-fs-common
|
|
|
|
- ceph-fuse
|
|
|
|
- ceph-mds
|
2017-09-24 00:02:49 +08:00
|
|
|
- ceph-mgr
|
2016-10-06 12:32:38 +08:00
|
|
|
- ceph-release
|
|
|
|
- ceph-radosgw
|
2017-02-14 23:24:02 +08:00
|
|
|
- calamari-server
|
2018-12-06 02:59:47 +08:00
|
|
|
- ceph-grafana-dashboards
|
2016-10-06 12:32:38 +08:00
|
|
|
|
|
|
|
ceph_remaining_packages:
|
|
|
|
- libcephfs1
|
2016-12-09 23:42:04 +08:00
|
|
|
- libcephfs2
|
2016-10-06 12:32:38 +08:00
|
|
|
- librados2
|
|
|
|
- libradosstriper1
|
|
|
|
- librbd1
|
2019-04-13 03:30:35 +08:00
|
|
|
- python-ceph-argparse
|
2016-10-06 12:32:38 +08:00
|
|
|
- python-cephfs
|
|
|
|
- python-rados
|
|
|
|
- python-rbd
|
|
|
|
|
2019-03-01 15:51:43 +08:00
|
|
|
extra_packages:
|
|
|
|
- keepalived
|
|
|
|
- haproxy
|
|
|
|
|
2016-10-06 12:32:38 +08:00
|
|
|
hosts:
|
2017-03-08 00:16:09 +08:00
|
|
|
- "{{ mon_group_name|default('mons') }}"
|
|
|
|
- "{{ osd_group_name|default('osds') }}"
|
|
|
|
- "{{ mds_group_name|default('mdss') }}"
|
|
|
|
- "{{ rgw_group_name|default('rgws') }}"
|
|
|
|
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
|
|
|
|
- "{{ nfs_group_name|default('nfss') }}"
|
|
|
|
- "{{ client_group_name|default('clients') }}"
|
2017-09-24 00:02:49 +08:00
|
|
|
- "{{ mgr_group_name|default('mgrs') }}"
|
2018-12-06 02:59:47 +08:00
|
|
|
- grafana-server
|
2016-10-06 12:32:38 +08:00
|
|
|
|
|
|
|
gather_facts: false # Already gathered previously
|
|
|
|
|
|
|
|
become: true
|
|
|
|
|
|
|
|
handlers:
|
2017-09-01 00:22:34 +08:00
|
|
|
- name: get osd data and lockbox mount points
|
2018-08-13 13:23:48 +08:00
|
|
|
shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'"
|
2017-09-01 00:22:34 +08:00
|
|
|
register: mounted_osd
|
|
|
|
changed_when: false
|
|
|
|
listen: "remove data"
|
|
|
|
|
|
|
|
- name: umount osd data partition
|
|
|
|
shell: umount {{ item }}
|
|
|
|
with_items: "{{ mounted_osd.stdout_lines }}"
|
|
|
|
listen: "remove data"
|
|
|
|
|
2016-10-06 12:32:38 +08:00
|
|
|
- name: remove data
|
2018-09-27 17:33:51 +08:00
|
|
|
shell: rm -rf /var/lib/ceph/*
|
2017-09-01 00:22:34 +08:00
|
|
|
listen: "remove data"
|
2016-10-06 12:32:38 +08:00
|
|
|
|
|
|
|
tasks:
|
2017-01-27 22:40:41 +08:00
|
|
|
|
2016-02-15 23:01:00 +08:00
|
|
|
- name: purge ceph packages with yum
|
|
|
|
yum:
|
2018-11-01 00:07:25 +08:00
|
|
|
name: "{{ ceph_packages }}"
|
2016-02-10 20:13:39 +08:00
|
|
|
state: absent
|
2017-01-18 17:53:21 +08:00
|
|
|
when: ansible_pkg_mgr == 'yum'
|
2016-02-15 23:51:16 +08:00
|
|
|
|
|
|
|
- name: purge ceph packages with dnf
|
|
|
|
dnf:
|
2018-11-01 00:07:25 +08:00
|
|
|
name: "{{ ceph_packages }}"
|
2016-02-15 23:51:16 +08:00
|
|
|
state: absent
|
2017-01-18 17:53:21 +08:00
|
|
|
when: ansible_pkg_mgr == 'dnf'
|
2016-02-10 20:13:39 +08:00
|
|
|
|
2016-02-15 23:01:00 +08:00
|
|
|
- name: purge ceph packages with apt
|
|
|
|
apt:
|
2018-11-01 00:07:25 +08:00
|
|
|
name: "{{ ceph_packages }}"
|
2016-02-15 23:01:00 +08:00
|
|
|
state: absent
|
2018-10-31 22:46:13 +08:00
|
|
|
purge: true
|
2017-01-18 17:53:21 +08:00
|
|
|
when: ansible_pkg_mgr == 'apt'
|
2016-02-15 23:01:00 +08:00
|
|
|
|
|
|
|
- name: purge remaining ceph packages with yum
|
|
|
|
yum:
|
2018-11-01 00:07:25 +08:00
|
|
|
name: "{{ ceph_remaining_packages }}"
|
2016-02-10 20:13:39 +08:00
|
|
|
state: absent
|
|
|
|
when:
|
2017-01-18 17:53:21 +08:00
|
|
|
- ansible_pkg_mgr == 'yum'
|
2019-05-22 16:02:42 +08:00
|
|
|
- purge_all_packages | bool
|
2016-02-15 23:51:16 +08:00
|
|
|
|
|
|
|
- name: purge remaining ceph packages with dnf
|
|
|
|
dnf:
|
2018-11-01 00:07:25 +08:00
|
|
|
name: "{{ ceph_remaining_packages }}"
|
2016-02-15 23:51:16 +08:00
|
|
|
state: absent
|
|
|
|
when:
|
2017-01-18 17:53:21 +08:00
|
|
|
- ansible_pkg_mgr == 'dnf'
|
2019-05-22 16:02:42 +08:00
|
|
|
- purge_all_packages | bool
|
2016-02-15 23:01:00 +08:00
|
|
|
|
|
|
|
- name: purge remaining ceph packages with apt
|
|
|
|
apt:
|
2018-11-01 00:07:25 +08:00
|
|
|
name: "{{ ceph_remaining_packages }}"
|
2016-02-15 23:01:00 +08:00
|
|
|
state: absent
|
|
|
|
when:
|
2017-01-18 17:53:21 +08:00
|
|
|
- ansible_pkg_mgr == 'apt'
|
2019-05-22 16:02:42 +08:00
|
|
|
- purge_all_packages | bool
|
2016-02-10 20:13:39 +08:00
|
|
|
|
2019-03-01 15:51:43 +08:00
|
|
|
- name: purge extra packages with yum
|
|
|
|
yum:
|
|
|
|
name: "{{ extra_packages }}"
|
|
|
|
state: absent
|
|
|
|
when:
|
|
|
|
- ansible_pkg_mgr == 'yum'
|
|
|
|
- purge_all_packages == true
|
|
|
|
|
|
|
|
- name: purge extra packages with dnf
|
|
|
|
dnf:
|
|
|
|
name: "{{ extra_packages }}"
|
|
|
|
state: absent
|
|
|
|
when:
|
|
|
|
- ansible_pkg_mgr == 'dnf'
|
|
|
|
- purge_all_packages == true
|
|
|
|
|
|
|
|
- name: purge extra packages with apt
|
|
|
|
apt:
|
|
|
|
name: "{{ extra_packages }}"
|
|
|
|
state: absent
|
|
|
|
when:
|
|
|
|
- ansible_pkg_mgr == 'apt'
|
|
|
|
- purge_all_packages == true
|
|
|
|
|
2020-09-11 23:30:33 +08:00
|
|
|
- name: remove config and any ceph socket left
|
2016-02-10 20:13:39 +08:00
|
|
|
file:
|
2019-03-01 15:51:43 +08:00
|
|
|
path: "{{ item }}"
|
|
|
|
state: absent
|
|
|
|
with_items:
|
|
|
|
- /etc/ceph
|
|
|
|
- /etc/keepalived
|
|
|
|
- /etc/haproxy
|
2020-09-11 23:30:33 +08:00
|
|
|
- /run/ceph
|
2016-02-10 20:13:39 +08:00
|
|
|
|
|
|
|
- name: remove logs
|
|
|
|
file:
|
|
|
|
path: /var/log/ceph
|
|
|
|
state: absent
|
|
|
|
|
2016-03-23 01:29:00 +08:00
|
|
|
- name: request data removal
|
2018-01-31 16:31:11 +08:00
|
|
|
local_action:
|
|
|
|
module: command
|
2020-10-02 18:55:53 +08:00
|
|
|
echo requesting data removal # noqa 301
|
2016-08-10 10:53:07 +08:00
|
|
|
become: false
|
2019-04-01 23:46:15 +08:00
|
|
|
notify: remove data
|
2016-03-23 01:29:00 +08:00
|
|
|
|
2016-03-29 07:53:01 +08:00
|
|
|
- name: purge dnf cache
|
|
|
|
command: dnf clean all
|
2017-01-18 17:53:21 +08:00
|
|
|
when: ansible_pkg_mgr == 'dnf'
|
2016-03-29 07:53:01 +08:00
|
|
|
|
2017-01-27 18:33:37 +08:00
|
|
|
- name: purge rpm cache in /tmp
|
2016-04-28 01:36:32 +08:00
|
|
|
file:
|
|
|
|
path: /tmp/rh-storage-repo
|
|
|
|
state: absent
|
2016-04-27 01:01:01 +08:00
|
|
|
|
2016-03-29 07:53:01 +08:00
|
|
|
- name: clean apt
|
2019-03-01 21:45:48 +08:00
|
|
|
command: apt-get clean
|
2017-01-18 17:53:21 +08:00
|
|
|
when: ansible_pkg_mgr == 'apt'
|
2016-06-02 18:08:19 +08:00
|
|
|
|
2019-06-07 01:51:16 +08:00
|
|
|
- name: purge ceph repo file in /etc/yum.repos.d
|
2016-06-02 18:08:19 +08:00
|
|
|
file:
|
2019-06-07 01:51:16 +08:00
|
|
|
path: '/etc/yum.repos.d/{{ item }}.repo'
|
2016-06-02 18:08:19 +08:00
|
|
|
state: absent
|
2019-06-07 01:51:16 +08:00
|
|
|
with_items:
|
|
|
|
- ceph-dev
|
|
|
|
- ceph_stable
|
|
|
|
- rh_storage
|
2017-01-18 17:53:21 +08:00
|
|
|
when: ansible_os_family == 'RedHat'
|
2016-10-06 12:32:38 +08:00
|
|
|
|
2017-04-25 20:35:13 +08:00
|
|
|
- name: check for anything running ceph
|
|
|
|
command: "ps -u ceph -U ceph"
|
|
|
|
register: check_for_running_ceph
|
2020-10-02 18:55:53 +08:00
|
|
|
changed_when: false
|
2017-04-25 20:35:13 +08:00
|
|
|
failed_when: check_for_running_ceph.rc == 0
|
|
|
|
|
2017-11-16 18:49:18 +08:00
|
|
|
- name: find ceph systemd unit files to remove
|
|
|
|
find:
|
|
|
|
paths: "/etc/systemd/system"
|
|
|
|
pattern: "ceph*"
|
2018-09-28 16:58:56 +08:00
|
|
|
recurse: true
|
|
|
|
file_type: any
|
2017-11-16 18:49:18 +08:00
|
|
|
register: systemd_files
|
|
|
|
|
2017-07-25 18:04:57 +08:00
|
|
|
- name: remove ceph systemd unit files
|
2017-10-07 04:54:34 +08:00
|
|
|
file:
|
2017-11-16 18:49:18 +08:00
|
|
|
path: "{{ item.path }}"
|
2017-10-07 04:54:34 +08:00
|
|
|
state: absent
|
2019-04-01 23:46:15 +08:00
|
|
|
with_items: "{{ systemd_files.files }}"
|
2017-07-25 18:04:57 +08:00
|
|
|
when: ansible_service_mgr == 'systemd'
|
|
|
|
|
2016-10-06 12:32:38 +08:00
|
|
|
|
|
|
|
- name: purge fetch directory
|
|
|
|
|
2019-04-01 23:46:15 +08:00
|
|
|
hosts: localhost
|
2016-10-06 12:32:38 +08:00
|
|
|
|
|
|
|
gather_facts: false
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
|
2017-02-08 04:42:42 +08:00
|
|
|
- name: set fetch_directory value if not set
|
|
|
|
set_fact:
|
|
|
|
fetch_directory: "fetch/"
|
|
|
|
when: fetch_directory is not defined
|
|
|
|
|
2016-10-06 12:32:38 +08:00
|
|
|
- name: purge fetch directory for localhost
|
|
|
|
file:
|
2020-10-06 13:53:06 +08:00
|
|
|
path: "{{ fetch_directory | default('fetch/') }}"
|
2016-10-06 12:32:38 +08:00
|
|
|
state: absent
|