diff --git a/group_vars/osds.sample b/group_vars/osds.sample index 11f0caee1..3c34a60ac 100644 --- a/group_vars/osds.sample +++ b/group_vars/osds.sample @@ -118,6 +118,11 @@ dummy: # - /var/lib/ceph/osd/mydir2 +# V. Fith scenario: this will partition disks for BlueStore +# Use 'true' to enable this scenario +#bluestore: false + + ########## # DOCKER # ########## diff --git a/roles/ceph-common/tasks/checks/check_mandatory_vars.yml b/roles/ceph-common/tasks/checks/check_mandatory_vars.yml index cafd1fcbc..1926077b4 100644 --- a/roles/ceph-common/tasks/checks/check_mandatory_vars.yml +++ b/roles/ceph-common/tasks/checks/check_mandatory_vars.yml @@ -36,6 +36,7 @@ msg: "journal_size must be configured. See http://ceph.com/docs/master/rados/configuration/osd-config-ref/" when: journal_size|int == 0 and + osd_objectstore != 'bluestore' and osd_group_name in group_names - name: make sure monitor_interface or monitor_address is configured @@ -68,7 +69,8 @@ osd_group_name in group_names and not journal_collocation and not raw_multi_journal and - not osd_directory + not osd_directory and + not bluestore - name: verify only one osd scenario was chosen fail: @@ -78,7 +80,10 @@ osd_group_name in group_names and ((journal_collocation and raw_multi_journal) or (journal_collocation and osd_directory) or - (raw_multi_journal and osd_directory)) + (raw_multi_journal and osd_directory) or + (bluestore and journal_collocation) or + (bluestore and raw_multi_journal) or + (bluestore and osd_directory)) - name: verify devices have been provided fail: diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index 0b86c1fe3..30ffc5f3f 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -110,6 +110,11 @@ osd_directory: false # - /var/lib/ceph/osd/mydir2 +# V. Fith scenario: this will partition disks for BlueStore +# Use 'true' to enable this scenario +bluestore: false + + ########## # DOCKER # ########## diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index fcda24dde..cfad81e96 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -11,5 +11,10 @@ - include: ./scenarios/osd_directory.yml when: osd_directory and not osd_containerized_deployment +- include: ./scenarios/bluestore.yml + when: + osd_objectstore == 'bluestore' and + not osd_containerized_deployment + - include: ./docker/main.yml when: osd_containerized_deployment diff --git a/roles/ceph-osd/tasks/scenarios/bluestore.yml b/roles/ceph-osd/tasks/scenarios/bluestore.yml new file mode 100644 index 000000000..0ef9c95f6 --- /dev/null +++ b/roles/ceph-osd/tasks/scenarios/bluestore.yml @@ -0,0 +1,40 @@ + +--- +## SCENARIO 4: BLUESTORE + +- include: ../check_devices.yml + +# NOTE (leseb): the prepare process must be parallelized somehow... +# if you have 64 disks with 4TB each, this will take a while +# since Ansible will sequential process the loop + +# NOTE (alahouze): if the device is a partition, the parted command below has +# failed, this is why we check if the device is a partition too. +- name: automatic prepare osd disk(s) without partitions + command: ceph-disk prepare --bluestore "/dev/{{ item.key }}" + ignore_errors: true + register: prepared_osds + with_dict: ansible_devices + when: + ansible_devices is defined and + item.value.removable == "0" and + item.value.partitions|count == 0 and + bluestore and + osd_auto_discovery + +- name: manually prepare osd disk(s) + command: ceph-disk prepare --bluestore "{{ item.2 }}" + ignore_errors: true + with_together: + - combined_parted_results.results + - combined_ispartition_results.results + - devices + when: + not item.0.get("skipped") and + not item.1.get("skipped") and + item.0.get("rc", 0) != 0 and + item.1.get("rc", 0) != 0 and + bluestore and not + osd_auto_discovery + +- include: ../activate_osds.yml