mirror of https://github.com/ceph/ceph-ansible.git
filestore-to-bluestore: skip bluestore osd nodes
If the OSD node is already using bluestore OSDs then we should skip
all the remaining tasks to avoid purging OSD for nothing.
Instead we warn the user.
Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1790472
Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
(cherry picked from commit 83c5a1d7a8
)
pull/5038/head
parent
460d3557d7
commit
487be2675a
|
@ -21,245 +21,257 @@
|
|||
- import_role:
|
||||
name: ceph-defaults
|
||||
|
||||
- import_role:
|
||||
name: ceph-facts
|
||||
|
||||
- name: get ceph osd tree data
|
||||
command: "{{ container_exec_cmd }} ceph osd tree -f json"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
register: osd_tree
|
||||
run_once: true
|
||||
|
||||
- name: set_fact container_run_cmd
|
||||
- name: set_fact current_objectstore
|
||||
set_fact:
|
||||
container_run_cmd: "{{ container_binary + ' run --rm --privileged=true --ulimit nofile=1024:4096 --net=host --pid=host --ipc=host -v /dev:/dev -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -v /var/run:/var/run --entrypoint=' if containerized_deployment else '' }}ceph-volume {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else '' }}"
|
||||
current_objectstore: '{{ osd_objectstore }}'
|
||||
|
||||
- name: get ceph-volume lvm inventory data
|
||||
command: "{{ container_run_cmd }} --cluster {{ cluster }} inventory --format json"
|
||||
register: ceph_volume_inventory
|
||||
- name: warn user about osd already using bluestore
|
||||
debug:
|
||||
msg: 'WARNING: {{ inventory_hostname }} is already using bluestore. Skipping all tasks.'
|
||||
when: current_objectstore == 'bluestore'
|
||||
|
||||
- name: set_fact inventory
|
||||
set_fact:
|
||||
inventory: "{{ ceph_volume_inventory.stdout | from_json }}"
|
||||
|
||||
- name: set_fact ceph_disk_osds
|
||||
set_fact:
|
||||
ceph_disk_osds_devices: "{{ ceph_disk_osds_devices | default([]) + [item.path] }}"
|
||||
with_items: "{{ inventory }}"
|
||||
when:
|
||||
- not item.available | bool
|
||||
- "'Used by ceph-disk' in item.rejected_reasons"
|
||||
|
||||
- name: ceph-disk prepared OSDs related tasks
|
||||
when: ceph_disk_osds_devices | default([]) | length > 0
|
||||
- name: shrink and redeploy filestore osds
|
||||
when: current_objectstore == 'filestore'
|
||||
block:
|
||||
- name: get partlabel
|
||||
command: blkid "{{ item + 'p' if item is match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') else item }}"1 -s PARTLABEL -o value
|
||||
register: partlabel
|
||||
with_items: "{{ ceph_disk_osds_devices | default([]) }}"
|
||||
- import_role:
|
||||
name: ceph-facts
|
||||
|
||||
- name: get simple scan data
|
||||
command: "{{ container_run_cmd }} --cluster {{ cluster }} simple scan {{ item.item + 'p1' if item.item is match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') else item.item + '1' }} --stdout"
|
||||
register: simple_scan
|
||||
with_items: "{{ partlabel.results | default([]) }}"
|
||||
when: item.stdout == 'ceph data'
|
||||
ignore_errors: true
|
||||
|
||||
- name: mark out osds
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ (item.0.stdout | from_json).whoami }}"
|
||||
with_together:
|
||||
- "{{ simple_scan.results }}"
|
||||
- "{{ partlabel.results }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
when: item.1.stdout == 'ceph data'
|
||||
|
||||
- name: stop and disable old osd services
|
||||
service:
|
||||
name: "ceph-osd@{{ (item.0.stdout | from_json).whoami }}"
|
||||
state: stopped
|
||||
enabled: no
|
||||
with_together:
|
||||
- "{{ simple_scan.results }}"
|
||||
- "{{ partlabel.results }}"
|
||||
when: item.1.stdout == 'ceph data'
|
||||
|
||||
- name: umount osd data
|
||||
mount:
|
||||
path: "/var/lib/ceph/osd/{{ cluster }}-{{ (item.0.stdout | from_json).whoami }}"
|
||||
state: unmounted
|
||||
with_together:
|
||||
- "{{ simple_scan.results }}"
|
||||
- "{{ partlabel.results }}"
|
||||
when: item.1.stdout == 'ceph data'
|
||||
|
||||
- name: umount osd lockbox
|
||||
mount:
|
||||
path: "/var/lib/ceph/osd-lockbox/{{ (item.0.stdout | from_json).data.uuid }}"
|
||||
state: unmounted
|
||||
with_together:
|
||||
- "{{ simple_scan.results }}"
|
||||
- "{{ partlabel.results }}"
|
||||
when:
|
||||
- item.1.stdout == 'ceph data'
|
||||
- (item.0.stdout | from_json).encrypted | default(False) | bool
|
||||
|
||||
- name: ensure dmcrypt for data device is closed
|
||||
command: cryptsetup close "{{ (item.0.stdout | from_json).data.uuid }}"
|
||||
with_together:
|
||||
- "{{ simple_scan.results }}"
|
||||
- "{{ partlabel.results }}"
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
when:
|
||||
- item.1.stdout == 'ceph data'
|
||||
- (item.0.stdout | from_json).encrypted | default(False) | bool
|
||||
|
||||
- name: ensure dmcrypt for journal device is closed
|
||||
command: cryptsetup close "{{ (item.0.stdout | from_json).journal.uuid }}"
|
||||
with_together:
|
||||
- "{{ simple_scan.results }}"
|
||||
- "{{ partlabel.results }}"
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
when:
|
||||
- item.1.stdout == 'ceph data'
|
||||
- (item.0.stdout | from_json).encrypted | default(False) | bool
|
||||
|
||||
- name: zap data devices
|
||||
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm zap --destroy {{ (item.0.stdout | from_json).data.path }}"
|
||||
with_together:
|
||||
- "{{ simple_scan.results }}"
|
||||
- "{{ partlabel.results }}"
|
||||
when: item.1.stdout == 'ceph data'
|
||||
|
||||
- name: zap journal devices
|
||||
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm zap --destroy {{ (item.0.stdout | from_json).journal.path }}"
|
||||
with_together:
|
||||
- "{{ simple_scan.results }}"
|
||||
- "{{ partlabel.results }}"
|
||||
when:
|
||||
- item.1.stdout == 'ceph data'
|
||||
- (item.0.stdout | from_json).journal.path is defined
|
||||
|
||||
- name: get ceph-volume lvm list data
|
||||
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm list --format json"
|
||||
register: ceph_volume_lvm_list
|
||||
|
||||
- name: set_fact _lvm_list
|
||||
set_fact:
|
||||
_lvm_list: "{{ _lvm_list | default([]) + item.value }}"
|
||||
with_dict: "{{ (ceph_volume_lvm_list.stdout | from_json) }}"
|
||||
|
||||
- name: ceph-volume prepared OSDs related tasks
|
||||
block:
|
||||
- name: mark out osds
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ item }}"
|
||||
with_items: "{{ (ceph_volume_lvm_list.stdout | from_json).keys() | list }}"
|
||||
- name: get ceph osd tree data
|
||||
command: "{{ container_exec_cmd }} ceph osd tree -f json"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
register: osd_tree
|
||||
run_once: true
|
||||
|
||||
- name: stop and disable old osd services
|
||||
service:
|
||||
name: "ceph-osd@{{ item }}"
|
||||
state: stopped
|
||||
enabled: no
|
||||
with_items: "{{ (ceph_volume_lvm_list.stdout | from_json).keys() | list }}"
|
||||
|
||||
- name: mark down osds
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd down {{ item }}"
|
||||
with_items: "{{ (ceph_volume_lvm_list.stdout | from_json).keys() | list }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
|
||||
- name: ensure all dmcrypt for data and journal are closed
|
||||
command: cryptsetup close "{{ item['lv_uuid'] }}"
|
||||
with_items: "{{ _lvm_list }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: item['tags'].get('ceph.encrypted', 0) | int == 1
|
||||
|
||||
- name: set_fact osd_fsid_list
|
||||
- name: set_fact container_run_cmd
|
||||
set_fact:
|
||||
osd_fsid_list: "{{ osd_fsid_list | default([]) + [{'osd_fsid': item.tags['ceph.osd_fsid'], 'destroy': (item.lv_name.startswith('osd-data-') and item.vg_name.startswith('ceph-')) | ternary(true, false)}] }}"
|
||||
with_items: "{{ _lvm_list }}"
|
||||
when: item.type == 'data'
|
||||
container_run_cmd: "{{ container_binary + ' run --rm --privileged=true --ulimit nofile=1024:4096 --net=host --pid=host --ipc=host -v /dev:/dev -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -v /var/run:/var/run --entrypoint=' if containerized_deployment else '' }}ceph-volume {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else '' }}"
|
||||
|
||||
- name: zap ceph-volume prepared OSDs
|
||||
ceph_volume:
|
||||
action: "zap"
|
||||
osd_fsid: "{{ item.osd_fsid }}"
|
||||
destroy: "{{ item.destroy }}"
|
||||
environment:
|
||||
CEPH_VOLUME_DEBUG: 1
|
||||
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
|
||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||
loop: "{{ osd_fsid_list }}"
|
||||
when: osd_fsid_list is defined
|
||||
- name: get ceph-volume lvm inventory data
|
||||
command: "{{ container_run_cmd }} --cluster {{ cluster }} inventory --format json"
|
||||
register: ceph_volume_inventory
|
||||
|
||||
- name: ensure all dm are closed
|
||||
command: dmsetup remove "{{ item['lv_path'] }}"
|
||||
with_items: "{{ _lvm_list }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
- name: set_fact inventory
|
||||
set_fact:
|
||||
inventory: "{{ ceph_volume_inventory.stdout | from_json }}"
|
||||
|
||||
- name: set_fact ceph_disk_osds
|
||||
set_fact:
|
||||
ceph_disk_osds_devices: "{{ ceph_disk_osds_devices | default([]) + [item.path] }}"
|
||||
with_items: "{{ inventory }}"
|
||||
when:
|
||||
- item['lv_path'] is defined
|
||||
# Do not close mappers for non 'lvm batch' devices
|
||||
- devices | default([]) | length > 0
|
||||
- not item.available | bool
|
||||
- "'Used by ceph-disk' in item.rejected_reasons"
|
||||
|
||||
- name: ensure all pv are removed
|
||||
command: "pvremove --yes {{ item.devices[0] }}"
|
||||
with_items: "{{ _lvm_list }}"
|
||||
failed_when: false
|
||||
when:
|
||||
- item.type == 'data'
|
||||
- item.lv_name.startswith('osd-data-') | bool
|
||||
- item.vg_name.startswith('ceph-') | bool
|
||||
when: _lvm_list is defined
|
||||
- name: ceph-disk prepared OSDs related tasks
|
||||
when: ceph_disk_osds_devices | default([]) | length > 0
|
||||
block:
|
||||
- name: get partlabel
|
||||
command: blkid "{{ item + 'p' if item is match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') else item }}"1 -s PARTLABEL -o value
|
||||
register: partlabel
|
||||
with_items: "{{ ceph_disk_osds_devices | default([]) }}"
|
||||
|
||||
- name: set_fact osd_ids
|
||||
set_fact:
|
||||
osd_ids: "{{ osd_ids | default([]) + [item] }}"
|
||||
with_items:
|
||||
- "{{ ((osd_tree.stdout | from_json).nodes | selectattr('name', 'match', inventory_hostname) | map(attribute='children') | list) }}"
|
||||
- name: get simple scan data
|
||||
command: "{{ container_run_cmd }} --cluster {{ cluster }} simple scan {{ item.item + 'p1' if item.item is match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') else item.item + '1' }} --stdout"
|
||||
register: simple_scan
|
||||
with_items: "{{ partlabel.results | default([]) }}"
|
||||
when: item.stdout == 'ceph data'
|
||||
ignore_errors: true
|
||||
|
||||
- name: mark out osds
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ (item.0.stdout | from_json).whoami }}"
|
||||
with_together:
|
||||
- "{{ simple_scan.results }}"
|
||||
- "{{ partlabel.results }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
when: item.1.stdout == 'ceph data'
|
||||
|
||||
- name: stop and disable old osd services
|
||||
service:
|
||||
name: "ceph-osd@{{ (item.0.stdout | from_json).whoami }}"
|
||||
state: stopped
|
||||
enabled: no
|
||||
with_together:
|
||||
- "{{ simple_scan.results }}"
|
||||
- "{{ partlabel.results }}"
|
||||
when: item.1.stdout == 'ceph data'
|
||||
|
||||
- name: umount osd data
|
||||
mount:
|
||||
path: "/var/lib/ceph/osd/{{ cluster }}-{{ (item.0.stdout | from_json).whoami }}"
|
||||
state: unmounted
|
||||
with_together:
|
||||
- "{{ simple_scan.results }}"
|
||||
- "{{ partlabel.results }}"
|
||||
when: item.1.stdout == 'ceph data'
|
||||
|
||||
- name: umount osd lockbox
|
||||
mount:
|
||||
path: "/var/lib/ceph/osd-lockbox/{{ (item.0.stdout | from_json).data.uuid }}"
|
||||
state: unmounted
|
||||
with_together:
|
||||
- "{{ simple_scan.results }}"
|
||||
- "{{ partlabel.results }}"
|
||||
when:
|
||||
- item.1.stdout == 'ceph data'
|
||||
- (item.0.stdout | from_json).encrypted | default(False) | bool
|
||||
|
||||
- name: ensure dmcrypt for data device is closed
|
||||
command: cryptsetup close "{{ (item.0.stdout | from_json).data.uuid }}"
|
||||
with_together:
|
||||
- "{{ simple_scan.results }}"
|
||||
- "{{ partlabel.results }}"
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
when:
|
||||
- item.1.stdout == 'ceph data'
|
||||
- (item.0.stdout | from_json).encrypted | default(False) | bool
|
||||
|
||||
- name: ensure dmcrypt for journal device is closed
|
||||
command: cryptsetup close "{{ (item.0.stdout | from_json).journal.uuid }}"
|
||||
with_together:
|
||||
- "{{ simple_scan.results }}"
|
||||
- "{{ partlabel.results }}"
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
when:
|
||||
- item.1.stdout == 'ceph data'
|
||||
- (item.0.stdout | from_json).encrypted | default(False) | bool
|
||||
|
||||
- name: zap data devices
|
||||
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm zap --destroy {{ (item.0.stdout | from_json).data.path }}"
|
||||
with_together:
|
||||
- "{{ simple_scan.results }}"
|
||||
- "{{ partlabel.results }}"
|
||||
when: item.1.stdout == 'ceph data'
|
||||
|
||||
- name: zap journal devices
|
||||
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm zap --destroy {{ (item.0.stdout | from_json).journal.path }}"
|
||||
with_together:
|
||||
- "{{ simple_scan.results }}"
|
||||
- "{{ partlabel.results }}"
|
||||
when:
|
||||
- item.1.stdout == 'ceph data'
|
||||
- (item.0.stdout | from_json).journal.path is defined
|
||||
|
||||
- name: get ceph-volume lvm list data
|
||||
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm list --format json"
|
||||
register: ceph_volume_lvm_list
|
||||
|
||||
- name: set_fact _lvm_list
|
||||
set_fact:
|
||||
_lvm_list: "{{ _lvm_list | default([]) + item.value }}"
|
||||
with_dict: "{{ (ceph_volume_lvm_list.stdout | from_json) }}"
|
||||
|
||||
- name: ceph-volume prepared OSDs related tasks
|
||||
block:
|
||||
- name: mark out osds
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ item }}"
|
||||
with_items: "{{ (ceph_volume_lvm_list.stdout | from_json).keys() | list }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
|
||||
- name: stop and disable old osd services
|
||||
service:
|
||||
name: "ceph-osd@{{ item }}"
|
||||
state: stopped
|
||||
enabled: no
|
||||
with_items: "{{ (ceph_volume_lvm_list.stdout | from_json).keys() | list }}"
|
||||
|
||||
- name: mark down osds
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd down {{ item }}"
|
||||
with_items: "{{ (ceph_volume_lvm_list.stdout | from_json).keys() | list }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
|
||||
- name: ensure all dmcrypt for data and journal are closed
|
||||
command: cryptsetup close "{{ item['lv_uuid'] }}"
|
||||
with_items: "{{ _lvm_list }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: item['tags'].get('ceph.encrypted', 0) | int == 1
|
||||
|
||||
- name: set_fact osd_fsid_list
|
||||
set_fact:
|
||||
osd_fsid_list: "{{ osd_fsid_list | default([]) + [{'osd_fsid': item.tags['ceph.osd_fsid'], 'destroy': (item.lv_name.startswith('osd-data-') and item.vg_name.startswith('ceph-')) | ternary(true, false)}] }}"
|
||||
with_items: "{{ _lvm_list }}"
|
||||
when: item.type == 'data'
|
||||
|
||||
- name: zap ceph-volume prepared OSDs
|
||||
ceph_volume:
|
||||
action: "zap"
|
||||
osd_fsid: "{{ item.osd_fsid }}"
|
||||
destroy: "{{ item.destroy }}"
|
||||
environment:
|
||||
CEPH_VOLUME_DEBUG: 1
|
||||
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
|
||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||
loop: "{{ osd_fsid_list }}"
|
||||
when: osd_fsid_list is defined
|
||||
|
||||
- name: ensure all dm are closed
|
||||
command: dmsetup remove "{{ item['lv_path'] }}"
|
||||
with_items: "{{ _lvm_list }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when:
|
||||
- item['lv_path'] is defined
|
||||
# Do not close mappers for non 'lvm batch' devices
|
||||
- devices | default([]) | length > 0
|
||||
|
||||
- name: ensure all pv are removed
|
||||
command: "pvremove --yes {{ item.devices[0] }}"
|
||||
with_items: "{{ _lvm_list }}"
|
||||
failed_when: false
|
||||
when:
|
||||
- item.type == 'data'
|
||||
- item.lv_name.startswith('osd-data-') | bool
|
||||
- item.vg_name.startswith('ceph-') | bool
|
||||
when: _lvm_list is defined
|
||||
|
||||
- name: set_fact osd_ids
|
||||
set_fact:
|
||||
osd_ids: "{{ osd_ids | default([]) + [item] }}"
|
||||
with_items:
|
||||
- "{{ ((osd_tree.stdout | from_json).nodes | selectattr('name', 'match', inventory_hostname) | map(attribute='children') | list) }}"
|
||||
|
||||
|
||||
- name: purge osd(s) from the cluster
|
||||
command: >
|
||||
{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it
|
||||
run_once: true
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
with_items: "{{ osd_ids }}"
|
||||
- name: purge osd(s) from the cluster
|
||||
command: >
|
||||
{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it
|
||||
run_once: true
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
with_items: "{{ osd_ids }}"
|
||||
|
||||
- name: purge /var/lib/ceph/osd directories
|
||||
file:
|
||||
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}"
|
||||
state: absent
|
||||
with_items: "{{ osd_ids }}"
|
||||
- name: purge /var/lib/ceph/osd directories
|
||||
file:
|
||||
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}"
|
||||
state: absent
|
||||
with_items: "{{ osd_ids }}"
|
||||
|
||||
- name: remove gpt header
|
||||
command: parted -s "{{ item }}" mklabel msdos
|
||||
with_items: "{{ (devices + dedicated_devices + ceph_disk_osds_devices | default([])) | unique }}"
|
||||
- name: remove gpt header
|
||||
command: parted -s "{{ item }}" mklabel msdos
|
||||
with_items: "{{ (devices + dedicated_devices + ceph_disk_osds_devices | default([])) | unique }}"
|
||||
|
||||
- name: refresh ansible devices fact
|
||||
setup:
|
||||
filter: ansible_devices
|
||||
when: osd_auto_discovery | bool
|
||||
- name: refresh ansible devices fact
|
||||
setup:
|
||||
filter: ansible_devices
|
||||
when: osd_auto_discovery | bool
|
||||
|
||||
- import_role:
|
||||
name: ceph-defaults
|
||||
- import_role:
|
||||
name: ceph-facts
|
||||
- import_role:
|
||||
name: ceph-handler
|
||||
- import_role:
|
||||
name: ceph-container-common
|
||||
when: containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
vars:
|
||||
osd_objectstore: bluestore
|
||||
- import_role:
|
||||
name: ceph-osd
|
||||
vars:
|
||||
osd_objectstore: bluestore
|
||||
- name: force osd_objectstore to bluestore
|
||||
set_fact:
|
||||
osd_objectstore: bluestore
|
||||
|
||||
- import_role:
|
||||
name: ceph-defaults
|
||||
- import_role:
|
||||
name: ceph-facts
|
||||
- import_role:
|
||||
name: ceph-handler
|
||||
- import_role:
|
||||
name: ceph-container-common
|
||||
when: containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
- import_role:
|
||||
name: ceph-osd
|
||||
|
|
Loading…
Reference in New Issue