Refresh /etc/ceph/osd json files content before zapping the disks

If the physical disk to device path mapping has changed since the
last ceph-volume simple scan (e.g. addition or removal of disks),
a wrong disk could be deleted.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=2071035

Signed-off-by: Teoman ONAY <tonay@redhat.com>
(cherry picked from commit 64e08f2c0b)
pull/7307/head v4.0.70.8
Teoman ONAY 2022-07-04 11:54:41 +02:00 committed by Guillaume Abrioux
parent 6dc7bfdd3c
commit 6feb7646a1
1 changed files with 22 additions and 2 deletions

View File

@ -105,18 +105,38 @@
- "{{ osd_hosts }}"
when: hostvars[item.0]['ansible_facts']['hostname'] == item.1
- name: set_fact host_list
set_fact:
host_list: "{{ host_list | default([]) | union([item.0]) }}"
loop: "{{ _osd_hosts }}"
- name: get ceph-volume lvm list data
command: "{{ container_run_cmd }} lvm list --format json"
changed_when: false
register: _lvm_list_data
delegate_to: "{{ item.0 }}"
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item }}"
loop: "{{ host_list }}"
- name: set_fact _lvm_list
set_fact:
_lvm_list: "{{ _lvm_list | default({}) | combine(item.stdout | from_json) }}"
with_items: "{{ _lvm_list_data.results }}"
- name: refresh /etc/ceph/osd files non containerized_deployment
ceph_volume_simple_scan:
cluster: "{{ cluster }}"
force: true
delegate_to: "{{ item }}"
loop: "{{ host_list }}"
when: not containerized_deployment | bool
- name: refresh /etc/ceph/osd files containerized_deployment
command: "{{ container_binary }} exec ceph-osd-{{ item.2 }} ceph-volume simple scan --force /var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
changed_when: false
delegate_to: "{{ item.0 }}"
loop: "{{ _osd_hosts }}"
when: containerized_deployment | bool
- name: find /etc/ceph/osd files
find:
paths: /etc/ceph/osd