diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index 1070e46f7..fe4283450 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -187,6 +187,19 @@ dedicated_devices: [] # /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3" bluestore_wal_devices: "{{ dedicated_devices }}" +# VII. Use ceph-volume to create OSDs from logical volumes. +# Use 'lvm_osds:true' to enable this scenario. Currently we only support dedicated journals +# when using lvm, not collocated journals. +# lvm_volumes is a dictionary whose key/value pair represent a data lv and a journal pair. +# Journals can be either a lv, device or partition. You can not use the same journal for many data lvs. +# For example: +# lvm_volumes: +# data-lv1: journal-lv1 +# data-lv2: /dev/sda +# data:lv3: /dev/sdb1 +lvm_osds: false +lvm_volumes: {} + ########## # DOCKER # diff --git a/roles/ceph-osd/tasks/check_mandatory_vars.yml b/roles/ceph-osd/tasks/check_mandatory_vars.yml index c3ea103b0..353be99ee 100644 --- a/roles/ceph-osd/tasks/check_mandatory_vars.yml +++ b/roles/ceph-osd/tasks/check_mandatory_vars.yml @@ -46,6 +46,26 @@ - not osd_auto_discovery - devices|length == 0 +- name: verify lvm_volumes have been provided + fail: + msg: "please provide lvm_volumes to your osd scenario" + when: + - osd_group_name is defined + - osd_group_name in group_names + - lvm_osds + - not osd_auto_discovery + - lvm_volumes|length == 0 + +- name: make sure the lvm_volumes variable is a dictionary + fail: + msg: "lvm_volumes: must be a dictionary" + when: + - osd_group_name is defined + - osd_group_name in group_names + - not osd_auto_discovery + - lvm_osds + - lvm_volumes is not mapping + - name: make sure the devices variable is a list fail: msg: "devices: must be a list, not a string, i.e. [ \"/dev/sda\" ]" @@ -87,3 +107,13 @@ - not containerized_deployment - osd_objectstore == 'bluestore' - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous + +- name: check if lvm_osds is supported by the selected ceph version + fail: + msg: "lvm_osds is not supported by the selected Ceph version, use Luminous or above." + when: + - osd_group_name is defined + - osd_group_name in group_names + - not containerized_deployment + - lvm_osds + - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index 5fd6185e0..299d7f933 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -20,6 +20,14 @@ # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False +- name: create lvm OSDs with ceph-volume + include: ./scenarios/lvm.yml + when: + - lvm_osds + - not containerized_deployment + # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) + static: False + - include: ./docker/main.yml when: containerized_deployment # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) diff --git a/roles/ceph-osd/tasks/scenarios/lvm.yml b/roles/ceph-osd/tasks/scenarios/lvm.yml new file mode 100644 index 000000000..7d439aa33 --- /dev/null +++ b/roles/ceph-osd/tasks/scenarios/lvm.yml @@ -0,0 +1,5 @@ +--- + +- name: use ceph-volume to create filestore OSDs with dedicated journals + command: "ceph-volume lvm create --filestore --data {{ item.key }} --journal {{ item.value }}" + with_dict: "{{ lvm_volumes }}"