shrink_osd: remove osd data directory

Otherwise it leaves an empty directory.
When shrinking and redeploying multiple OSDs you have no guarantee it
will reuse the same osd id.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 8933bfde33)
pull/5639/head
Guillaume Abrioux 2020-07-22 16:08:15 +02:00
parent 8632db7cb8
commit bd3439db75
1 changed files with 10 additions and 5 deletions

View File

@ -148,7 +148,7 @@
- name: umount osd lockbox
mount:
path: "/var/lib/ceph/osd-lockbox/{{ ceph_osd_data_json[item.2]['data']['uuid'] }}"
state: unmounted
state: absent
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}"
when:
@ -160,12 +160,10 @@
- name: umount osd data
mount:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
state: unmounted
state: absent
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}"
when:
- not containerized_deployment | bool
- item.2 not in _lvm_list.keys()
when: not containerized_deployment | bool
- name: get parent device for data partition
command: lsblk --noheadings --output PKNAME --nodeps "{{ ceph_osd_data_json[item.2]['data']['path'] }}"
@ -230,6 +228,13 @@
run_once: true
with_items: "{{ osd_to_kill.split(',') }}"
- name: remove osd data dir
file:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
state: absent
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}"
- name: show ceph health
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"