Merge pull request #649 from ceph/bluestore-ceph-disk

ceph-osd: add support for bluestore
pull/653/head
Leseb 2016-03-25 17:00:41 +01:00
commit f27fada28a
5 changed files with 62 additions and 2 deletions

View File

@ -118,6 +118,11 @@ dummy:
# - /var/lib/ceph/osd/mydir2
# V. Fith scenario: this will partition disks for BlueStore
# Use 'true' to enable this scenario
#bluestore: false
##########
# DOCKER #
##########

View File

@ -36,6 +36,7 @@
msg: "journal_size must be configured. See http://ceph.com/docs/master/rados/configuration/osd-config-ref/"
when:
journal_size|int == 0 and
osd_objectstore != 'bluestore' and
osd_group_name in group_names
- name: make sure monitor_interface or monitor_address is configured
@ -68,7 +69,8 @@
osd_group_name in group_names and
not journal_collocation and
not raw_multi_journal and
not osd_directory
not osd_directory and
not bluestore
- name: verify only one osd scenario was chosen
fail:
@ -78,7 +80,10 @@
osd_group_name in group_names and
((journal_collocation and raw_multi_journal) or
(journal_collocation and osd_directory) or
(raw_multi_journal and osd_directory))
(raw_multi_journal and osd_directory) or
(bluestore and journal_collocation) or
(bluestore and raw_multi_journal) or
(bluestore and osd_directory))
- name: verify devices have been provided
fail:

View File

@ -110,6 +110,11 @@ osd_directory: false
# - /var/lib/ceph/osd/mydir2
# V. Fith scenario: this will partition disks for BlueStore
# Use 'true' to enable this scenario
bluestore: false
##########
# DOCKER #
##########

View File

@ -11,5 +11,10 @@
- include: ./scenarios/osd_directory.yml
when: osd_directory and not osd_containerized_deployment
- include: ./scenarios/bluestore.yml
when:
osd_objectstore == 'bluestore' and
not osd_containerized_deployment
- include: ./docker/main.yml
when: osd_containerized_deployment

View File

@ -0,0 +1,40 @@
---
## SCENARIO 4: BLUESTORE
- include: ../check_devices.yml
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: automatic prepare osd disk(s) without partitions
command: ceph-disk prepare --bluestore "/dev/{{ item.key }}"
ignore_errors: true
register: prepared_osds
with_dict: ansible_devices
when:
ansible_devices is defined and
item.value.removable == "0" and
item.value.partitions|count == 0 and
bluestore and
osd_auto_discovery
- name: manually prepare osd disk(s)
command: ceph-disk prepare --bluestore "{{ item.2 }}"
ignore_errors: true
with_together:
- combined_parted_results.results
- combined_ispartition_results.results
- devices
when:
not item.0.get("skipped") and
not item.1.get("skipped") and
item.0.get("rc", 0) != 0 and
item.1.get("rc", 0) != 0 and
bluestore and not
osd_auto_discovery
- include: ../activate_osds.yml