From 225e066db26089d64ea5910f1498062f1b4d7381 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Fri, 25 Mar 2016 14:15:29 +0100 Subject: [PATCH] ceph-osd: add support for bluestore MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With Jewel comes a new store to store Ceph object: BlueStore. Adding an extra scenario might seem like a useless duplication however the ultimate goal is remove the other roles later. Thus this is easier to add new role instead of modifying existing one. Once we drop the support for release older than Jewel we will just remove all the previous scenario files. Signed-off-by: Sébastien Han --- group_vars/osds.sample | 5 +++ roles/ceph-osd/defaults/main.yml | 5 +++ roles/ceph-osd/tasks/main.yml | 5 +++ roles/ceph-osd/tasks/scenarios/bluestore.yml | 40 ++++++++++++++++++++ 4 files changed, 55 insertions(+) create mode 100644 roles/ceph-osd/tasks/scenarios/bluestore.yml diff --git a/group_vars/osds.sample b/group_vars/osds.sample index 11f0caee1..3c34a60ac 100644 --- a/group_vars/osds.sample +++ b/group_vars/osds.sample @@ -118,6 +118,11 @@ dummy: # - /var/lib/ceph/osd/mydir2 +# V. Fith scenario: this will partition disks for BlueStore +# Use 'true' to enable this scenario +#bluestore: false + + ########## # DOCKER # ########## diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index 0b86c1fe3..30ffc5f3f 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -110,6 +110,11 @@ osd_directory: false # - /var/lib/ceph/osd/mydir2 +# V. Fith scenario: this will partition disks for BlueStore +# Use 'true' to enable this scenario +bluestore: false + + ########## # DOCKER # ########## diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index fcda24dde..cfad81e96 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -11,5 +11,10 @@ - include: ./scenarios/osd_directory.yml when: osd_directory and not osd_containerized_deployment +- include: ./scenarios/bluestore.yml + when: + osd_objectstore == 'bluestore' and + not osd_containerized_deployment + - include: ./docker/main.yml when: osd_containerized_deployment diff --git a/roles/ceph-osd/tasks/scenarios/bluestore.yml b/roles/ceph-osd/tasks/scenarios/bluestore.yml new file mode 100644 index 000000000..0ef9c95f6 --- /dev/null +++ b/roles/ceph-osd/tasks/scenarios/bluestore.yml @@ -0,0 +1,40 @@ + +--- +## SCENARIO 4: BLUESTORE + +- include: ../check_devices.yml + +# NOTE (leseb): the prepare process must be parallelized somehow... +# if you have 64 disks with 4TB each, this will take a while +# since Ansible will sequential process the loop + +# NOTE (alahouze): if the device is a partition, the parted command below has +# failed, this is why we check if the device is a partition too. +- name: automatic prepare osd disk(s) without partitions + command: ceph-disk prepare --bluestore "/dev/{{ item.key }}" + ignore_errors: true + register: prepared_osds + with_dict: ansible_devices + when: + ansible_devices is defined and + item.value.removable == "0" and + item.value.partitions|count == 0 and + bluestore and + osd_auto_discovery + +- name: manually prepare osd disk(s) + command: ceph-disk prepare --bluestore "{{ item.2 }}" + ignore_errors: true + with_together: + - combined_parted_results.results + - combined_ispartition_results.results + - devices + when: + not item.0.get("skipped") and + not item.1.get("skipped") and + item.0.get("rc", 0) != 0 and + item.1.get("rc", 0) != 0 and + bluestore and not + osd_auto_discovery + +- include: ../activate_osds.yml