mirror of https://github.com/ceph/ceph-ansible.git
filestore-to-bluestore: fix osd_auto_discovery
When osd_auto_discovery is set then we need to refresh the
ansible_devices fact between after the filestore OSD purge
otherwise the devices fact won't be populated.
Also remove the gpt header on ceph_disk_osds_devices because
the devices is empty at this point for osd_auto_discovery.
Adding the bool filter when needed.
Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1729267
Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
(cherry picked from commit bb3eae0c80
)
pull/4975/head
parent
e4965e9ea9
commit
0abea70e29
|
@ -100,7 +100,9 @@
|
||||||
with_together:
|
with_together:
|
||||||
- "{{ simple_scan.results }}"
|
- "{{ simple_scan.results }}"
|
||||||
- "{{ partlabel.results }}"
|
- "{{ partlabel.results }}"
|
||||||
when: item.1.stdout == 'ceph data'
|
when:
|
||||||
|
- item.1.stdout == 'ceph data'
|
||||||
|
- (item.0.stdout | from_json).encrypted | default(False) | bool
|
||||||
|
|
||||||
- name: ensure dmcrypt for data device is closed
|
- name: ensure dmcrypt for data device is closed
|
||||||
command: cryptsetup close "{{ (item.0.stdout | from_json).data.uuid }}"
|
command: cryptsetup close "{{ (item.0.stdout | from_json).data.uuid }}"
|
||||||
|
@ -111,7 +113,7 @@
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when:
|
when:
|
||||||
- item.1.stdout == 'ceph data'
|
- item.1.stdout == 'ceph data'
|
||||||
- (item.0.stdout | from_json).encrypted | default(False)
|
- (item.0.stdout | from_json).encrypted | default(False) | bool
|
||||||
|
|
||||||
- name: ensure dmcrypt for journal device is closed
|
- name: ensure dmcrypt for journal device is closed
|
||||||
command: cryptsetup close "{{ (item.0.stdout | from_json).journal.uuid }}"
|
command: cryptsetup close "{{ (item.0.stdout | from_json).journal.uuid }}"
|
||||||
|
@ -122,7 +124,7 @@
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when:
|
when:
|
||||||
- item.1.stdout == 'ceph data'
|
- item.1.stdout == 'ceph data'
|
||||||
- (item.0.stdout | from_json).encrypted | default(False)
|
- (item.0.stdout | from_json).encrypted | default(False) | bool
|
||||||
|
|
||||||
- name: zap data devices
|
- name: zap data devices
|
||||||
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm zap --destroy {{ (item.0.stdout | from_json).data.path }}"
|
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm zap --destroy {{ (item.0.stdout | from_json).data.path }}"
|
||||||
|
@ -235,12 +237,17 @@
|
||||||
|
|
||||||
- name: remove gpt header
|
- name: remove gpt header
|
||||||
command: parted -s "{{ item }}" mklabel msdos
|
command: parted -s "{{ item }}" mklabel msdos
|
||||||
with_items: "{{ devices + dedicated_devices | default([]) }}"
|
with_items: "{{ (devices + dedicated_devices + ceph_disk_osds_devices | default([])) | unique }}"
|
||||||
|
|
||||||
|
- name: refresh ansible devices fact
|
||||||
|
setup:
|
||||||
|
filter: ansible_devices
|
||||||
|
when: osd_auto_discovery | bool
|
||||||
|
|
||||||
- import_role:
|
|
||||||
name: ceph-facts
|
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-defaults
|
name: ceph-defaults
|
||||||
|
- import_role:
|
||||||
|
name: ceph-facts
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-handler
|
name: ceph-handler
|
||||||
- import_role:
|
- import_role:
|
||||||
|
|
|
@ -7,3 +7,4 @@ osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/d
|
||||||
osd2 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=1024
|
osd2 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=1024
|
||||||
osd3 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
|
osd3 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
|
||||||
osd4 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]" dmcrypt=true
|
osd4 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]" dmcrypt=true
|
||||||
|
osd5 osd_objectstore=filestore osd_auto_discovery=true journal_size=1024
|
|
@ -5,7 +5,7 @@ docker: true
|
||||||
|
|
||||||
# DEFINE THE NUMBER OF VMS TO RUN
|
# DEFINE THE NUMBER OF VMS TO RUN
|
||||||
mon_vms: 1
|
mon_vms: 1
|
||||||
osd_vms: 5
|
osd_vms: 6
|
||||||
mds_vms: 0
|
mds_vms: 0
|
||||||
rgw_vms: 0
|
rgw_vms: 0
|
||||||
nfs_vms: 0
|
nfs_vms: 0
|
||||||
|
|
|
@ -7,3 +7,4 @@ osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/d
|
||||||
osd2 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=1024
|
osd2 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=1024
|
||||||
osd3 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
|
osd3 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
|
||||||
osd4 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]" dmcrypt=true
|
osd4 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]" dmcrypt=true
|
||||||
|
osd5 osd_objectstore=filestore osd_auto_discovery=true journal_size=1024
|
|
@ -5,7 +5,7 @@ docker: false
|
||||||
|
|
||||||
# DEFINE THE NUMBER OF VMS TO RUN
|
# DEFINE THE NUMBER OF VMS TO RUN
|
||||||
mon_vms: 1
|
mon_vms: 1
|
||||||
osd_vms: 5
|
osd_vms: 6
|
||||||
mds_vms: 0
|
mds_vms: 0
|
||||||
rgw_vms: 0
|
rgw_vms: 0
|
||||||
nfs_vms: 0
|
nfs_vms: 0
|
||||||
|
|
Loading…
Reference in New Issue