shrink_osd: remove osd data directory

Otherwise it leaves an empty directory.
When shrinking and redeploying multiple OSDs you have no guarantee it
will reuse the same osd id.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/5622/head
Guillaume Abrioux 2020-07-22 16:08:15 +02:00
parent 78e4faf077
commit 8933bfde33
1 changed files with 10 additions and 5 deletions

View File

@ -146,7 +146,7 @@
- name: umount osd lockbox
mount:
path: "/var/lib/ceph/osd-lockbox/{{ ceph_osd_data_json[item.2]['data']['uuid'] }}"
state: unmounted
state: absent
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}"
when:
@ -158,12 +158,10 @@
- name: umount osd data
mount:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
state: unmounted
state: absent
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}"
when:
- not containerized_deployment | bool
- item.2 not in _lvm_list.keys()
when: not containerized_deployment | bool
- name: get parent device for data partition
command: lsblk --noheadings --output PKNAME --nodeps "{{ ceph_osd_data_json[item.2]['data']['path'] }}"
@ -228,6 +226,13 @@
run_once: true
with_items: "{{ osd_to_kill.split(',') }}"
- name: remove osd data dir
file:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
state: absent
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}"
- name: show ceph health
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"