purge-docker-cluster: add ceph-volume support

This commits adds the support for purging cluster that were deployed
with ceph-volume. It also separates nicely with a block intruction the
work to do when lvm is used or not.

Signed-off-by: Sébastien Han <seb@redhat.com>
(cherry picked from commit 1751885bc9)
pull/3484/head
Sébastien Han 2018-10-04 17:40:25 +02:00
parent 5c618d7084
commit f37c21a9d0
1 changed files with 135 additions and 92 deletions

View File

@ -309,6 +309,14 @@
tasks:
- import_role:
name: ceph-defaults
private: false
- import_role:
name: ceph-osd
private: false
- name: get all the running osds
shell: |
systemctl list-units | grep 'loaded[[:space:]]\+active' | grep -oE "ceph-osd@([0-9]{1,2}|[a-z]+).service"
@ -322,6 +330,15 @@
enabled: no
with_items: "{{ osd_units.stdout_lines }}"
- name: remove osd mountpoint tree
file:
path: /var/lib/ceph/osd/
state: absent
register: remove_osd_mountpoints
ignore_errors: true
- name: for ceph-disk based deployment
block:
- name: get prepare container
command: "docker ps -a -q --filter='name=ceph-osd-prepare'"
register: prepare_containers
@ -338,13 +355,6 @@
blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2
register: encrypted_ceph_partuuid
- name: remove osd mountpoint tree
file:
path: /var/lib/ceph/osd/
state: absent
register: remove_osd_mountpoints
ignore_errors: true
- name: get ceph data partitions
command: |
blkid -o device -t PARTLABEL="ceph data"
@ -433,6 +443,39 @@
file:
path: /etc/systemd/system/ceph-osd@.service
state: absent
when:
- osd_scenario != "lvm"
- name: for ceph-volume based deployments
block:
- name: zap and destroy osds created by ceph-volume with lvm_volumes
ceph_volume:
data: "{{ item.data }}"
data_vg: "{{ item.data_vg|default(omit) }}"
journal: "{{ item.journal|default(omit) }}"
journal_vg: "{{ item.journal_vg|default(omit) }}"
db: "{{ item.db|default(omit) }}"
db_vg: "{{ item.db_vg|default(omit) }}"
wal: "{{ item.wal|default(omit) }}"
wal_vg: "{{ item.wal_vg|default(omit) }}"
action: "zap"
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items: "{{ lvm_volumes }}"
- name: zap and destroy osds created by ceph-volume with devices
ceph_volume:
data: "{{ item }}"
action: "zap"
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items: "{{ devices | default([]) }}"
when:
- osd_scenario == "lvm"
- name: remove ceph osd image
docker_image: