lvm: update scenario for new osd_scenario variable

Signed-off-by: Andrew Schoen <aschoen@redhat.com>
pull/1716/head
Andrew Schoen 2017-08-03 10:08:34 -05:00
parent 3b5a06bb3c
commit e597628be9
7 changed files with 28 additions and 25 deletions

View File

@ -1,26 +1,28 @@
OSD Scenarios
=============
lvm_osds
--------
lvm
---
This OSD scenario uses ``ceph-volume`` to create OSDs from logical volumes and
is only available when the ceph release is Luminous or greater.
is only available when the ceph release is Luminous or newer.
.. note::
The creation of the logical volumes is not supported by ceph-ansible, ceph-volume
The creation of the logical volumes is not supported by ``ceph-ansible``, ``ceph-volume``
only creates OSDs from existing logical volumes.
Use ``lvm_osds:true`` to enable this scenario. Currently we only support dedicated journals
Use ``osd_scenario: lvm`` to enable this scenario. Currently we only support dedicated journals
when using lvm, not collocated journals.
To configure this scenario use the ``lvm_volumes`` config option. ``lvm_volumes`` is a dictionary whose
key/value pairs represent a data lv and a journal pair. Journals can be either a lv, device or partition.
You can not use the same journal for many data lvs.
For example, a configuration to use ``lvm_osds`` would look like::
lvm_osds: true
.. note::
Any logical volume or logical group used in ``lvm_volumes`` must be a name and not a path.
For example, a configuration to use the ``lvm`` osd scenario would look like::
osd_scenario: lvm
lvm_volumes:
data-lv1: journal-lv1
data-lv2: /dev/sda

View File

@ -114,6 +114,7 @@ dummy:
#valid_osd_scenarios:
# - collocated
# - non-collocated
# - lvm
# II. Second scenario: non-collocated
@ -195,8 +196,8 @@ dummy:
# /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3"
#bluestore_wal_devices: "{{ dedicated_devices }}"
# VII. Use ceph-volume to create OSDs from logical volumes.
# Use 'lvm_osds:true' to enable this scenario. Currently we only support dedicated journals
# III. Use ceph-volume to create OSDs from logical volumes.
# Use 'osd_scenario: lvm' to enable this scenario. Currently we only support dedicated journals
# when using lvm, not collocated journals.
# lvm_volumes is a dictionary whose key/value pair represent a data lv and a journal pair.
# Journals can be either a lv, device or partition. You can not use the same journal for many data lvs.
@ -205,7 +206,6 @@ dummy:
# data-lv1: journal-lv1
# data-lv2: /dev/sda
# data:lv3: /dev/sdb1
#lvm_osds: false
#lvm_volumes: {}

View File

@ -106,6 +106,7 @@ osd_scenario: dummy
valid_osd_scenarios:
- collocated
- non-collocated
- lvm
# II. Second scenario: non-collocated
@ -187,17 +188,17 @@ dedicated_devices: []
# /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3"
bluestore_wal_devices: "{{ dedicated_devices }}"
# VII. Use ceph-volume to create OSDs from logical volumes.
# Use 'lvm_osds:true' to enable this scenario. Currently we only support dedicated journals
# III. Use ceph-volume to create OSDs from logical volumes.
# Use 'osd_scenario: lvm' to enable this scenario. Currently we only support dedicated journals
# when using lvm, not collocated journals.
# lvm_volumes is a dictionary whose key/value pair represent a data lv and a journal pair.
# Any logical volume or logical group used must be a name and not a path.
# Journals can be either a lv, device or partition. You can not use the same journal for many data lvs.
# For example:
# lvm_volumes:
# data-lv1: journal-lv1
# data-lv2: /dev/sda
# data:lv3: /dev/sdb1
lvm_osds: false
lvm_volumes: {}

View File

@ -46,23 +46,23 @@
- not osd_auto_discovery
- devices|length == 0
- name: check if lvm_osds is supported by the selected ceph version
- name: check if osd_scenario lvm is supported by the selected ceph version
fail:
msg: "lvm_osds is not supported by the selected Ceph version, use Luminous or above."
msg: "osd_scenario lvm is not supported by the selected Ceph version, use Luminous or newer."
when:
- osd_group_name is defined
- osd_group_name in group_names
- not containerized_deployment
- lvm_osds
- osd_scenario == "lvm"
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- name: verify osd_objectstore is 'filestore' when using lvm_osds
- name: verify osd_objectstore is 'filestore' when using the lvm osd_scenario
fail:
msg: "the lvm_osds scenario currently only works for filestore, not bluestore"
msg: "the lvm osd_scenario currently only works for filestore, not bluestore"
when:
- osd_group_name is defined
- osd_group_name in group_names
- lvm_osds
- osd_scenario == "lvm"
- not osd_auto_discovery
- osd_objectstore != 'filestore'
@ -72,7 +72,7 @@
when:
- osd_group_name is defined
- osd_group_name in group_names
- lvm_osds
- osd_scenario == "lvm"
- not osd_auto_discovery
- lvm_volumes|length == 0
@ -83,7 +83,7 @@
- osd_group_name is defined
- osd_group_name in group_names
- not osd_auto_discovery
- lvm_osds
- osd_scenario == "lvm"
- lvm_volumes is not mapping
- name: make sure the devices variable is a list

View File

@ -23,7 +23,7 @@
- name: create lvm osds with ceph-volume
include: ./scenarios/lvm.yml
when:
- lvm_osds
- osd_scenario == 'lvm'
- not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False

View File

@ -10,7 +10,7 @@
always_run: true
register: osd_id
- name: start and add that the osd service(s) to the init sequence
- name: ensure osd daemons are started
service:
name: ceph-osd@{{ item }}
state: started

View File

@ -7,7 +7,7 @@ cluster_network: "192.168.4.0/24"
monitor_interface: eth1
journal_size: 100
osd_objectstore: "filestore"
lvm_osds: true
osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda
lvm_volumes: