mirror of https://github.com/ceph/ceph-ansible.git
ceph-osd: add support for bluestore
With Jewel comes a new store to store Ceph object: BlueStore. Adding an extra scenario might seem like a useless duplication however the ultimate goal is remove the other roles later. Thus this is easier to add new role instead of modifying existing one. Once we drop the support for release older than Jewel we will just remove all the previous scenario files. Signed-off-by: Sébastien Han <seb@redhat.com>pull/649/head
parent
9098eb0a8a
commit
225e066db2
|
@ -118,6 +118,11 @@ dummy:
|
||||||
# - /var/lib/ceph/osd/mydir2
|
# - /var/lib/ceph/osd/mydir2
|
||||||
|
|
||||||
|
|
||||||
|
# V. Fith scenario: this will partition disks for BlueStore
|
||||||
|
# Use 'true' to enable this scenario
|
||||||
|
#bluestore: false
|
||||||
|
|
||||||
|
|
||||||
##########
|
##########
|
||||||
# DOCKER #
|
# DOCKER #
|
||||||
##########
|
##########
|
||||||
|
|
|
@ -110,6 +110,11 @@ osd_directory: false
|
||||||
# - /var/lib/ceph/osd/mydir2
|
# - /var/lib/ceph/osd/mydir2
|
||||||
|
|
||||||
|
|
||||||
|
# V. Fith scenario: this will partition disks for BlueStore
|
||||||
|
# Use 'true' to enable this scenario
|
||||||
|
bluestore: false
|
||||||
|
|
||||||
|
|
||||||
##########
|
##########
|
||||||
# DOCKER #
|
# DOCKER #
|
||||||
##########
|
##########
|
||||||
|
|
|
@ -11,5 +11,10 @@
|
||||||
- include: ./scenarios/osd_directory.yml
|
- include: ./scenarios/osd_directory.yml
|
||||||
when: osd_directory and not osd_containerized_deployment
|
when: osd_directory and not osd_containerized_deployment
|
||||||
|
|
||||||
|
- include: ./scenarios/bluestore.yml
|
||||||
|
when:
|
||||||
|
osd_objectstore == 'bluestore' and
|
||||||
|
not osd_containerized_deployment
|
||||||
|
|
||||||
- include: ./docker/main.yml
|
- include: ./docker/main.yml
|
||||||
when: osd_containerized_deployment
|
when: osd_containerized_deployment
|
||||||
|
|
|
@ -0,0 +1,40 @@
|
||||||
|
|
||||||
|
---
|
||||||
|
## SCENARIO 4: BLUESTORE
|
||||||
|
|
||||||
|
- include: ../check_devices.yml
|
||||||
|
|
||||||
|
# NOTE (leseb): the prepare process must be parallelized somehow...
|
||||||
|
# if you have 64 disks with 4TB each, this will take a while
|
||||||
|
# since Ansible will sequential process the loop
|
||||||
|
|
||||||
|
# NOTE (alahouze): if the device is a partition, the parted command below has
|
||||||
|
# failed, this is why we check if the device is a partition too.
|
||||||
|
- name: automatic prepare osd disk(s) without partitions
|
||||||
|
command: ceph-disk prepare --bluestore "/dev/{{ item.key }}"
|
||||||
|
ignore_errors: true
|
||||||
|
register: prepared_osds
|
||||||
|
with_dict: ansible_devices
|
||||||
|
when:
|
||||||
|
ansible_devices is defined and
|
||||||
|
item.value.removable == "0" and
|
||||||
|
item.value.partitions|count == 0 and
|
||||||
|
bluestore and
|
||||||
|
osd_auto_discovery
|
||||||
|
|
||||||
|
- name: manually prepare osd disk(s)
|
||||||
|
command: ceph-disk prepare --bluestore "{{ item.2 }}"
|
||||||
|
ignore_errors: true
|
||||||
|
with_together:
|
||||||
|
- combined_parted_results.results
|
||||||
|
- combined_ispartition_results.results
|
||||||
|
- devices
|
||||||
|
when:
|
||||||
|
not item.0.get("skipped") and
|
||||||
|
not item.1.get("skipped") and
|
||||||
|
item.0.get("rc", 0) != 0 and
|
||||||
|
item.1.get("rc", 0) != 0 and
|
||||||
|
bluestore and not
|
||||||
|
osd_auto_discovery
|
||||||
|
|
||||||
|
- include: ../activate_osds.yml
|
Loading…
Reference in New Issue