Refresh /etc/ceph/osd json files content before zapping the disks

If the physical disk to device path mapping has changed since the
last ceph-volume simple scan (e.g. addition or removal of disks),
a wrong disk could be deleted.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=2071035

Signed-off-by: Teoman ONAY <tonay@redhat.com>
(cherry picked from commit 64e08f2c0b)
pull/7251/head
Teoman ONAY 2022-07-04 11:54:41 +02:00 committed by Guillaume Abrioux
parent 392ddec2d7
commit 0981158e03
1 changed files with 22 additions and 2 deletions

View File

@ -101,6 +101,11 @@
- "{{ osd_hosts }}"
when: hostvars[item.0]['ansible_facts']['hostname'] == item.1
- name: set_fact host_list
set_fact:
host_list: "{{ host_list | default([]) | union([item.0]) }}"
loop: "{{ _osd_hosts }}"
- name: get ceph-volume lvm list data
ceph_volume:
cluster: "{{ cluster }}"
@ -109,14 +114,29 @@
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
register: _lvm_list_data
delegate_to: "{{ item.0 }}"
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item }}"
loop: "{{ host_list }}"
- name: set_fact _lvm_list
set_fact:
_lvm_list: "{{ _lvm_list | default({}) | combine(item.stdout | from_json) }}"
with_items: "{{ _lvm_list_data.results }}"
- name: refresh /etc/ceph/osd files non containerized_deployment
ceph_volume_simple_scan:
cluster: "{{ cluster }}"
force: true
delegate_to: "{{ item }}"
loop: "{{ host_list }}"
when: not containerized_deployment | bool
- name: refresh /etc/ceph/osd files containerized_deployment
command: "{{ container_binary }} exec ceph-osd-{{ item.2 }} ceph-volume simple scan --force /var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
changed_when: false
delegate_to: "{{ item.0 }}"
loop: "{{ _osd_hosts }}"
when: containerized_deployment | bool
- name: find /etc/ceph/osd files
find:
paths: /etc/ceph/osd