adds a new 'lvm_osds' osd scenario

This scenario will create OSDs using ceph-volume and is only available
in ceph releases greater than Luminous.

Signed-off-by: Andrew Schoen <aschoen@redhat.com>
pull/1716/head
Andrew Schoen 2017-07-19 10:05:42 -05:00
parent ff8bb3d1d7
commit b93794bed4
4 changed files with 56 additions and 0 deletions

View File

@ -187,6 +187,19 @@ dedicated_devices: []
# /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3" # /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3"
bluestore_wal_devices: "{{ dedicated_devices }}" bluestore_wal_devices: "{{ dedicated_devices }}"
# VII. Use ceph-volume to create OSDs from logical volumes.
# Use 'lvm_osds:true' to enable this scenario. Currently we only support dedicated journals
# when using lvm, not collocated journals.
# lvm_volumes is a dictionary whose key/value pair represent a data lv and a journal pair.
# Journals can be either a lv, device or partition. You can not use the same journal for many data lvs.
# For example:
# lvm_volumes:
# data-lv1: journal-lv1
# data-lv2: /dev/sda
# data:lv3: /dev/sdb1
lvm_osds: false
lvm_volumes: {}
########## ##########
# DOCKER # # DOCKER #

View File

@ -46,6 +46,26 @@
- not osd_auto_discovery - not osd_auto_discovery
- devices|length == 0 - devices|length == 0
- name: verify lvm_volumes have been provided
fail:
msg: "please provide lvm_volumes to your osd scenario"
when:
- osd_group_name is defined
- osd_group_name in group_names
- lvm_osds
- not osd_auto_discovery
- lvm_volumes|length == 0
- name: make sure the lvm_volumes variable is a dictionary
fail:
msg: "lvm_volumes: must be a dictionary"
when:
- osd_group_name is defined
- osd_group_name in group_names
- not osd_auto_discovery
- lvm_osds
- lvm_volumes is not mapping
- name: make sure the devices variable is a list - name: make sure the devices variable is a list
fail: fail:
msg: "devices: must be a list, not a string, i.e. [ \"/dev/sda\" ]" msg: "devices: must be a list, not a string, i.e. [ \"/dev/sda\" ]"
@ -87,3 +107,13 @@
- not containerized_deployment - not containerized_deployment
- osd_objectstore == 'bluestore' - osd_objectstore == 'bluestore'
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- name: check if lvm_osds is supported by the selected ceph version
fail:
msg: "lvm_osds is not supported by the selected Ceph version, use Luminous or above."
when:
- osd_group_name is defined
- osd_group_name in group_names
- not containerized_deployment
- lvm_osds
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous

View File

@ -20,6 +20,14 @@
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False static: False
- name: create lvm OSDs with ceph-volume
include: ./scenarios/lvm.yml
when:
- lvm_osds
- not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./docker/main.yml - include: ./docker/main.yml
when: containerized_deployment when: containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)

View File

@ -0,0 +1,5 @@
---
- name: use ceph-volume to create filestore OSDs with dedicated journals
command: "ceph-volume lvm create --filestore --data {{ item.key }} --journal {{ item.value }}"
with_dict: "{{ lvm_volumes }}"