shrink-osd: purge dedicated devices

Once the OSD is destroyed we also have to purge the associated devices,
this means purging journal, db , wal partitions too.

This now works for container and non-container.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1572933
Signed-off-by: Sébastien Han <seb@redhat.com>
pull/2907/head
Sébastien Han 2018-07-19 15:45:55 +02:00 committed by Guillaume Abrioux
parent 8fcd63cc50
commit 36fb3cdecb
1 changed files with 55 additions and 0 deletions

View File

@ -116,6 +116,29 @@
when: when:
- containerized_deployment - containerized_deployment
- name: find osd dedicated devices - container
shell: >
docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
list | grep osd.{{ item.0 }} | grep -Eo '/dev/([hsv]d[a-z]{1,2})[0-9]{1,2}|/dev/nvme[0-9]n[0-9]p[0-9]'
with_together:
- "{{ osd_to_kill.split(',') }}"
- "{{ osd_hosts }}"
register: osd_to_kill_disks_dedicated
delegate_to: "{{ item.1 }}"
when:
- containerized_deployment
- name: find osd dedicated devices - non container
shell: ceph-disk list | grep osd.{{ item.0 }} | grep -Eo '/dev/([hsv]d[a-z]{1,2})[0-9]{1,2}|/dev/nvme[0-9]n[0-9]p[0-9]'
with_together:
- "{{ osd_to_kill.split(',') }}"
- "{{ osd_hosts }}"
register: osd_to_kill_disks_dedicated_non_container
delegate_to: "{{ item.1 }}"
when:
- not containerized_deployment
# if nvme then osd_to_kill_disks is nvme0n1, we need nvme0 # if nvme then osd_to_kill_disks is nvme0n1, we need nvme0
# if ssd or hdd then osd_to_kill_disks is sda1, we need sda # if ssd or hdd then osd_to_kill_disks is sda1, we need sda
- name: stop osd services (container) - name: stop osd services (container)
@ -156,6 +179,23 @@
when: when:
- containerized_deployment - containerized_deployment
- name: zap ceph osd partitions from dedicated devices
shell: |
docker run --rm \
--privileged=true \
--name ceph-osd-zap-{{ ansible_hostname }}-{{ item.1.stdout }} \
-v /dev/:/dev/ \
-e OSD_DEVICE={{ item.1.stdout }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
zap_device
with_together:
- "{{ osd_hosts }}"
- "{{ osd_to_kill_disks_dedicated.results }}"
delegate_to: "{{ item.0 }}"
when:
- containerized_deployment
- item.1 | length > 0
- name: deactivating osd(s) - name: deactivating osd(s)
command: ceph-disk deactivate --cluster {{ cluster }} --deactivate-by-id {{ item.0 }} --mark-out command: ceph-disk deactivate --cluster {{ cluster }} --deactivate-by-id {{ item.0 }} --mark-out
run_once: true run_once: true
@ -192,6 +232,21 @@
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ osd_to_kill.split(',') }}" with_items: "{{ osd_to_kill.split(',') }}"
- name: zap dedicated partitions
shell: |
pkname=$(lsblk --nodeps -no PKNAME {{ item.1.stdout_lines }})
wipefs --all {{ item.1.stdout_lines }}
dd if=/dev/zero of={{ item.1.stdout_lines }} bs=1M count=10
partition_nb=$(echo {{ item.1.stdout_lines }} | grep -oE '[0-9]{1,2}$')
sgdisk --delete $partition_nb /dev/$pkname
with_together:
- "{{ osd_hosts }}"
- "{{ osd_to_kill_disks_dedicated_non_container.results }}"
delegate_to: "{{ item.0 }}"
when:
- not containerized_deployment
- item.1 | length > 0
- name: show ceph health - name: show ceph health
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s" command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"