purge: ensure no ceph kernel thread is present

This tries to first unmount any cephfs/nfs-ganesha mount point on client
nodes, then unmap any mapped rbd devices and finally it tries to remove
ceph kernel modules.
If it fails it means some resources are still busy and should be cleaned
manually before continuing to purge the cluster.
This is done early in the playbook so the cluster stays untouched until
everything is ready for that operation, otherwise if you try to redeploy
a cluster it could end up by getting confused by leftover from previous
deployment.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1337915

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/4158/head
Guillaume Abrioux 2019-06-21 16:10:16 +02:00
parent 45d46541cb
commit 20e4852888
2 changed files with 159 additions and 67 deletions

View File

@ -49,6 +49,72 @@
tasks:
- debug: msg="gather facts on all Ceph hosts for following reference"
- name: check there's no ceph kernel threads present
hosts: "{{ client_group_name|default('clients') }}"
become: true
any_errors_fatal: true
tasks:
- import_role:
name: ceph-defaults
- block:
- name: get nfs nodes ansible facts
setup:
delegate_to: "{{ item }}"
delegate_facts: True
with_items: "{{ groups[nfs_group_name] }}"
run_once: true
- name: get all nfs-ganesha mount points
command: grep "{{ hostvars[item]['ansible_all_ipv4_addresses'] | ipaddr(public_network) | first }}" /proc/mounts
register: nfs_ganesha_mount_points
failed_when: false
with_items: "{{ groups[nfs_group_name] }}"
- name: ensure nfs-ganesha mountpoint(s) are unmounted
mount:
path: "{{ item.split(' ')[1] }}"
state: unmounted
with_items:
- "{{ nfs_ganesha_mount_points.results | map(attribute='stdout_lines') | list }}"
when: item | length > 0
when: groups[nfs_group_name] | default([]) | length > 0
- name: ensure cephfs mountpoint(s) are unmounted
command: umount -a -t ceph
- name: ensure rbd devices are unmapped
command: rbdmap unmap-all
- name: unload ceph kernel modules
modprobe:
name: "{{ item }}"
state: absent
with_items:
- rbd
- ceph
- libceph
- name: purge ceph nfs cluster
vars:
nfs_group_name: nfss
hosts: "{{ nfs_group_name|default('nfss') }}"
gather_facts: false # Already gathered previously
become: true
tasks:
- name: stop ceph nfss with systemd
service:
name: nfs-ganesha
state: stopped
failed_when: false
when: ansible_service_mgr == 'systemd'
- name: purge node-exporter
hosts:
@ -269,27 +335,6 @@
failed_when: false
- name: purge ceph nfs cluster
vars:
nfs_group_name: nfss
hosts: "{{ nfs_group_name|default('nfss') }}"
gather_facts: false # Already gathered previously
become: true
tasks:
- name: stop ceph nfss with systemd
service:
name: nfs-ganesha
state: stopped
failed_when: false
when: ansible_service_mgr == 'systemd'
- name: purge ceph osd cluster
vars:

View File

@ -33,6 +33,99 @@
ceph_docker_registry: "docker.io"
when: ceph_docker_registry is not defined
- name: check there's no ceph kernel threads present
hosts: "{{ client_group_name|default('clients') }}"
become: true
any_errors_fatal: true
tasks:
- import_role:
name: ceph-defaults
- block:
- name: get nfs nodes ansible facts
setup:
delegate_to: "{{ item }}"
delegate_facts: True
with_items: "{{ groups[nfs_group_name] }}"
run_once: true
- name: get all nfs-ganesha mount points
command: grep "{{ hostvars[item]['ansible_all_ipv4_addresses'] | ipaddr(public_network) | first }}" /proc/mounts
register: nfs_ganesha_mount_points
failed_when: false
with_items: "{{ groups[nfs_group_name] }}"
- name: ensure nfs-ganesha mountpoint(s) are unmounted
mount:
path: "{{ item.split(' ')[1] }}"
state: unmounted
with_items:
- "{{ nfs_ganesha_mount_points.results | map(attribute='stdout_lines') | list }}"
when: item | length > 0
when: groups[nfs_group_name] | default([]) | length > 0
- name: ensure cephfs mountpoint are unmounted
command: umount -a -t ceph
- name: ensure rbd devices are unmapped
command: rbdmap unmap-all
- name: unload ceph kernel modules
modprobe:
name: "{{ item }}"
state: absent
with_items:
- rbd
- ceph
- libceph
- name: purge ceph nfs cluster
hosts: "{{ nfs_group_name|default('nfss') }}"
become: true
tasks:
- name: disable ceph nfs service
service:
name: "ceph-nfs@{{ ansible_hostname }}"
state: stopped
enabled: no
ignore_errors: true
- name: remove ceph nfs container
docker_container:
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
name: "ceph-nfs-{{ ansible_hostname }}"
state: absent
ignore_errors: true
- name: remove ceph nfs service
file:
path: /etc/systemd/system/ceph-nfs@.service
state: absent
- name: remove ceph nfs directories for "{{ ansible_hostname }}"
file:
path: "{{ item }}"
state: absent
with_items:
- /etc/ganesha
- /var/lib/nfs/ganesha
- /var/run/ganesha
- name: remove ceph nfs image
docker_image:
state: absent
repository: "{{ ceph_docker_registry }}"
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
tags: remove_img
- name: purge ceph mds cluster
hosts: "{{ mds_group_name|default('mdss') }}"
@ -243,52 +336,6 @@
tags: remove_img
- name: purge ceph nfs cluster
hosts: "{{ nfs_group_name|default('nfss') }}"
become: true
tasks:
- name: disable ceph nfs service
service:
name: "ceph-nfs@{{ ansible_hostname }}"
state: stopped
enabled: no
ignore_errors: true
- name: remove ceph nfs container
docker_container:
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
name: "ceph-nfs-{{ ansible_hostname }}"
state: absent
ignore_errors: true
- name: remove ceph nfs service
file:
path: /etc/systemd/system/ceph-nfs@.service
state: absent
- name: remove ceph nfs directories for "{{ ansible_hostname }}"
file:
path: "{{ item }}"
state: absent
with_items:
- /etc/ganesha
- /var/lib/nfs/ganesha
- /var/run/ganesha
- name: remove ceph nfs image
docker_image:
state: absent
repository: "{{ ceph_docker_registry }}"
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
tags: remove_img
- name: purge ceph osd cluster
hosts: "{{ osd_group_name | default('osds') }}"