filestore-to-bluestore: fix osd_auto_discovery

When osd_auto_discovery is set then we need to refresh the
ansible_devices fact between after the filestore OSD purge
otherwise the devices fact won't be populated.
Also remove the gpt header on ceph_disk_osds_devices because
the devices is empty at this point for osd_auto_discovery.
Adding the bool filter when needed.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1729267

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
pull/4959/head
Dimitri Savineau 2020-01-21 16:37:10 -05:00 committed by Guillaume Abrioux
parent 483adb5d79
commit bb3eae0c80
5 changed files with 19 additions and 10 deletions

View File

@ -100,7 +100,9 @@
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
when: item.1.stdout == 'ceph data'
when:
- item.1.stdout == 'ceph data'
- (item.0.stdout | from_json).encrypted | default(False) | bool
- name: ensure dmcrypt for data device is closed
command: cryptsetup close "{{ (item.0.stdout | from_json).data.uuid }}"
@ -111,7 +113,7 @@
changed_when: false
when:
- item.1.stdout == 'ceph data'
- (item.0.stdout | from_json).encrypted | default(False)
- (item.0.stdout | from_json).encrypted | default(False) | bool
- name: ensure dmcrypt for journal device is closed
command: cryptsetup close "{{ (item.0.stdout | from_json).journal.uuid }}"
@ -122,7 +124,7 @@
changed_when: false
when:
- item.1.stdout == 'ceph data'
- (item.0.stdout | from_json).encrypted | default(False)
- (item.0.stdout | from_json).encrypted | default(False) | bool
- name: zap data devices
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm zap --destroy {{ (item.0.stdout | from_json).data.path }}"
@ -235,12 +237,17 @@
- name: remove gpt header
command: parted -s "{{ item }}" mklabel msdos
with_items: "{{ devices + dedicated_devices | default([]) }}"
with_items: "{{ (devices + dedicated_devices + ceph_disk_osds_devices | default([])) | unique }}"
- name: refresh ansible devices fact
setup:
filter: ansible_devices
when: osd_auto_discovery | bool
- import_role:
name: ceph-facts
- import_role:
name: ceph-defaults
- import_role:
name: ceph-facts
- import_role:
name: ceph-handler
- import_role:

View File

@ -7,3 +7,4 @@ osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/d
osd2 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=1024
osd3 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
osd4 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]" dmcrypt=true
osd5 osd_objectstore=filestore osd_auto_discovery=true journal_size=1024

View File

@ -5,7 +5,7 @@ docker: true
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 5
osd_vms: 6
mds_vms: 0
rgw_vms: 0
nfs_vms: 0

View File

@ -7,3 +7,4 @@ osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/d
osd2 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=1024
osd3 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
osd4 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]" dmcrypt=true
osd5 osd_objectstore=filestore osd_auto_discovery=true journal_size=1024

View File

@ -5,7 +5,7 @@ docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 5
osd_vms: 6
mds_vms: 0
rgw_vms: 0
nfs_vms: 0