From e597628be9edbc6ba058c38448ac0171fb4bb324 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Thu, 3 Aug 2017 10:08:34 -0500 Subject: [PATCH] lvm: update scenario for new osd_scenario variable Signed-off-by: Andrew Schoen --- docs/source/osds/scenarios.rst | 18 ++++++++++-------- group_vars/osds.yml.sample | 6 +++--- roles/ceph-osd/defaults/main.yml | 7 ++++--- roles/ceph-osd/tasks/check_mandatory_vars.yml | 16 ++++++++-------- roles/ceph-osd/tasks/main.yml | 2 +- roles/ceph-osd/tasks/start_osds.yml | 2 +- .../centos/7/lvm-osds/group_vars/all | 2 +- 7 files changed, 28 insertions(+), 25 deletions(-) diff --git a/docs/source/osds/scenarios.rst b/docs/source/osds/scenarios.rst index 03c46a2d1..6ba834c1d 100644 --- a/docs/source/osds/scenarios.rst +++ b/docs/source/osds/scenarios.rst @@ -1,26 +1,28 @@ OSD Scenarios ============= -lvm_osds --------- +lvm +--- This OSD scenario uses ``ceph-volume`` to create OSDs from logical volumes and -is only available when the ceph release is Luminous or greater. +is only available when the ceph release is Luminous or newer. .. note:: - The creation of the logical volumes is not supported by ceph-ansible, ceph-volume + The creation of the logical volumes is not supported by ``ceph-ansible``, ``ceph-volume`` only creates OSDs from existing logical volumes. -Use ``lvm_osds:true`` to enable this scenario. Currently we only support dedicated journals +Use ``osd_scenario: lvm`` to enable this scenario. Currently we only support dedicated journals when using lvm, not collocated journals. To configure this scenario use the ``lvm_volumes`` config option. ``lvm_volumes`` is a dictionary whose key/value pairs represent a data lv and a journal pair. Journals can be either a lv, device or partition. You can not use the same journal for many data lvs. -For example, a configuration to use ``lvm_osds`` would look like:: - - lvm_osds: true +.. note:: + Any logical volume or logical group used in ``lvm_volumes`` must be a name and not a path. +For example, a configuration to use the ``lvm`` osd scenario would look like:: + + osd_scenario: lvm lvm_volumes: data-lv1: journal-lv1 data-lv2: /dev/sda diff --git a/group_vars/osds.yml.sample b/group_vars/osds.yml.sample index 67bff173a..2cf8e1292 100644 --- a/group_vars/osds.yml.sample +++ b/group_vars/osds.yml.sample @@ -114,6 +114,7 @@ dummy: #valid_osd_scenarios: # - collocated # - non-collocated +# - lvm # II. Second scenario: non-collocated @@ -195,8 +196,8 @@ dummy: # /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3" #bluestore_wal_devices: "{{ dedicated_devices }}" -# VII. Use ceph-volume to create OSDs from logical volumes. -# Use 'lvm_osds:true' to enable this scenario. Currently we only support dedicated journals +# III. Use ceph-volume to create OSDs from logical volumes. +# Use 'osd_scenario: lvm' to enable this scenario. Currently we only support dedicated journals # when using lvm, not collocated journals. # lvm_volumes is a dictionary whose key/value pair represent a data lv and a journal pair. # Journals can be either a lv, device or partition. You can not use the same journal for many data lvs. @@ -205,7 +206,6 @@ dummy: # data-lv1: journal-lv1 # data-lv2: /dev/sda # data:lv3: /dev/sdb1 -#lvm_osds: false #lvm_volumes: {} diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index fe4283450..30a087cf6 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -106,6 +106,7 @@ osd_scenario: dummy valid_osd_scenarios: - collocated - non-collocated + - lvm # II. Second scenario: non-collocated @@ -187,17 +188,17 @@ dedicated_devices: [] # /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3" bluestore_wal_devices: "{{ dedicated_devices }}" -# VII. Use ceph-volume to create OSDs from logical volumes. -# Use 'lvm_osds:true' to enable this scenario. Currently we only support dedicated journals +# III. Use ceph-volume to create OSDs from logical volumes. +# Use 'osd_scenario: lvm' to enable this scenario. Currently we only support dedicated journals # when using lvm, not collocated journals. # lvm_volumes is a dictionary whose key/value pair represent a data lv and a journal pair. +# Any logical volume or logical group used must be a name and not a path. # Journals can be either a lv, device or partition. You can not use the same journal for many data lvs. # For example: # lvm_volumes: # data-lv1: journal-lv1 # data-lv2: /dev/sda # data:lv3: /dev/sdb1 -lvm_osds: false lvm_volumes: {} diff --git a/roles/ceph-osd/tasks/check_mandatory_vars.yml b/roles/ceph-osd/tasks/check_mandatory_vars.yml index 0cd1ddbf3..516542c0a 100644 --- a/roles/ceph-osd/tasks/check_mandatory_vars.yml +++ b/roles/ceph-osd/tasks/check_mandatory_vars.yml @@ -46,23 +46,23 @@ - not osd_auto_discovery - devices|length == 0 -- name: check if lvm_osds is supported by the selected ceph version +- name: check if osd_scenario lvm is supported by the selected ceph version fail: - msg: "lvm_osds is not supported by the selected Ceph version, use Luminous or above." + msg: "osd_scenario lvm is not supported by the selected Ceph version, use Luminous or newer." when: - osd_group_name is defined - osd_group_name in group_names - not containerized_deployment - - lvm_osds + - osd_scenario == "lvm" - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous -- name: verify osd_objectstore is 'filestore' when using lvm_osds +- name: verify osd_objectstore is 'filestore' when using the lvm osd_scenario fail: - msg: "the lvm_osds scenario currently only works for filestore, not bluestore" + msg: "the lvm osd_scenario currently only works for filestore, not bluestore" when: - osd_group_name is defined - osd_group_name in group_names - - lvm_osds + - osd_scenario == "lvm" - not osd_auto_discovery - osd_objectstore != 'filestore' @@ -72,7 +72,7 @@ when: - osd_group_name is defined - osd_group_name in group_names - - lvm_osds + - osd_scenario == "lvm" - not osd_auto_discovery - lvm_volumes|length == 0 @@ -83,7 +83,7 @@ - osd_group_name is defined - osd_group_name in group_names - not osd_auto_discovery - - lvm_osds + - osd_scenario == "lvm" - lvm_volumes is not mapping - name: make sure the devices variable is a list diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index 2dc9d5e84..1784e27fc 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -23,7 +23,7 @@ - name: create lvm osds with ceph-volume include: ./scenarios/lvm.yml when: - - lvm_osds + - osd_scenario == 'lvm' - not containerized_deployment # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False diff --git a/roles/ceph-osd/tasks/start_osds.yml b/roles/ceph-osd/tasks/start_osds.yml index 4b2e1fcad..b0978fc81 100644 --- a/roles/ceph-osd/tasks/start_osds.yml +++ b/roles/ceph-osd/tasks/start_osds.yml @@ -10,7 +10,7 @@ always_run: true register: osd_id -- name: start and add that the osd service(s) to the init sequence +- name: ensure osd daemons are started service: name: ceph-osd@{{ item }} state: started diff --git a/tests/functional/centos/7/lvm-osds/group_vars/all b/tests/functional/centos/7/lvm-osds/group_vars/all index d3fe977ee..36d6bbf7b 100644 --- a/tests/functional/centos/7/lvm-osds/group_vars/all +++ b/tests/functional/centos/7/lvm-osds/group_vars/all @@ -7,7 +7,7 @@ cluster_network: "192.168.4.0/24" monitor_interface: eth1 journal_size: 100 osd_objectstore: "filestore" -lvm_osds: true +osd_scenario: lvm copy_admin_key: true # test-volume is created by tests/functional/lvm_setup.yml from /dev/sda lvm_volumes: