purge: zap and destroy db and wal devices for lvm batch

Those devices (db/wal) are never zapped in lvm batch deployment.
Iterating over `dedicated_devices` and `bluestore_wal_devices` fixes
this issue.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1922926

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 984191ac7f)
guits-quick-test2.8
Guillaume Abrioux 2021-02-01 16:51:07 +01:00
parent ba76102952
commit 1b424ad5e9
2 changed files with 11 additions and 10 deletions

View File

@ -512,10 +512,8 @@
action: "zap"
environment:
CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
with_items: "{{ lvm_volumes }}"
when:
- lvm_volumes | default([]) | length > 0
- ceph_volume_present.rc == 0
with_items: "{{ lvm_volumes | default([]) }}"
when: ceph_volume_present.rc == 0
- name: zap and destroy osds created by ceph-volume with devices
ceph_volume:
@ -523,10 +521,11 @@
action: "zap"
environment:
CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
with_items: "{{ devices | default([]) }}"
when:
- devices | default([]) | length > 0
- ceph_volume_present.rc == 0
with_items:
- "{{ devices | default([]) }}"
- "{{ dedicated_devices | default([]) }}"
- "{{ bluestore_wal_devices | default([]) }}"
when: ceph_volume_present.rc == 0
- name: get ceph block partitions
shell: |

View File

@ -317,8 +317,10 @@
CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items: "{{ devices | default([]) }}"
when: devices | default([]) | length > 0
with_items:
- "{{ devices | default([]) }}"
- "{{ dedicated_devices | default([]) }}"
- "{{ bluestore_wal_devices | default([]) }}"
- name: remove ceph osd service
file: