osd: simplify scenarios

There is only two main scenarios now:

* collocated: everything remains on the same device:
  - data, db, wal for bluestore
  - data and journal for filestore
* non-collocated: dedicated device for some of the component

Signed-off-by: Sébastien Han <seb@redhat.com>
pull/1725/head
Sébastien Han 2017-07-27 17:05:59 +02:00
parent 6e379157ec
commit 30991b1c0a
32 changed files with 253 additions and 272 deletions

4
Vagrantfile vendored
View File

@ -86,7 +86,7 @@ ansible_provision = proc do |ansible|
else
ansible.extra_vars = ansible.extra_vars.merge({
devices: settings['disks'],
journal_collocation: 'true',
osd_scenario: 'collocated',
monitor_interface: ETH,
os_tuning_params: settings['os_tuning_params'],
pool_default_size: '2',
@ -100,7 +100,7 @@ ansible_provision = proc do |ansible|
ansible.extra_vars = ansible.extra_vars.merge({
cluster_network: "#{CLUSTER_SUBNET}.0/16",
devices: ['/dev/sdc'], # hardcode leftover disk
journal_collocation: 'true',
osd_scenario: 'collocated',
monitor_address_block: "#{PUBLIC_SUBNET}.0/16",
public_network: "#{PUBLIC_SUBNET}.0/16",
})

View File

@ -118,7 +118,7 @@ function cp_var {
function populate_vars {
sed -i "s/[#]*osd_auto_discovery: .*/osd_auto_discovery: true/" group_vars/osds.yml
sed -i "s/[#]*journal_collocation: .*/journal_collocation: true/" group_vars/osds.yml
sed -i "s/[#]*osd_scenario: .*/osd_scenario: collocated/" group_vars/osds.yml
sed -i "s/[#]*monitor_address: .*/monitor_address: ${IP}/" group_vars/all.yml
sed -i "s/[#]*journal_size: .*/journal_size: 100/" group_vars/all.yml
sed -i "s|[#]*public_network: .*|public_network: ${SUBNET}|" group_vars/all.yml

View File

@ -85,51 +85,68 @@ dummy:
#
#osd_auto_discovery: false
# Encrypt your OSD device using dmcrypt
# If set to True, no matter which osd_objecstore and osd_scenario you use the data will be encrypted
#dmcrypt: false
# !! WARNING !!
# #
# # /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
# #
# # !! WARNING !!
#
# I. First scenario: journal and osd_data on the same device
# Use 'true' to enable this scenario
# This will collocate both journal and data on the same disk
# creating a partition at the beginning of the device
# List devices under 'devices' variable above or choose 'osd_auto_discovery'
# I. First scenario: collocated
#
# To enable this scenario do: osd_scenario: collocated
#
#
# If osd_objectstore: bluestore is enabled both rocksdb DB and WAL will be stored
# on the device. So the device will get 2 partitions:
# - One for 'data', also called 'block'
# - One for block, db, and wal data
# If osd_objectstore: filestore is enabled both 'ceph data' and 'ceph journal' partitions
# will be stored on the same device.
#
# If osd_objectstore: bluestore is enabled 'ceph data', 'ceph block', 'ceph block.db', 'ceph block.wal' will be stored
# on the same device. The device will get 2 partitions:
# - One for 'data', called 'ceph data'
# - One for 'ceph block', 'ceph block.db', 'ceph block.wal' called 'ceph block'
#
# Example of what you will get:
# [root@ceph-osd0 ~]# blkid /dev/sda*
# /dev/sda: PTTYPE="gpt"
# /dev/sda1: UUID="9c43e346-dd6e-431f-92d8-cbed4ccb25f6" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="749c71c9-ed8f-4930-82a7-a48a3bcdb1c7"
# /dev/sda2: PARTLABEL="ceph block" PARTUUID="e6ca3e1d-4702-4569-abfa-e285de328e9d"
#journal_collocation: false
#
#osd_scenario: dummy
#valid_osd_scenarios:
# - collocated
# - non-collocated
# II. Second scenario: N journal devices for N OSDs
# Use 'true' for 'raw_multi_journal' to enable this scenario
# List devices under 'devices' variable above and
# write journal devices for those under 'raw_journal_devices'
# In the following example:
# * sdb and sdc will get sdf as a journal
# * sdd and sde will get sdg as a journal
# While starting you have 2 options:
# 1. Pre-allocate all the devices
# 2. Progressively add new devices
#raw_multi_journal: false
#raw_journal_devices:
# - /dev/sdf
# - /dev/sdf
# - /dev/sdg
# - /dev/sdg
# II. Second scenario: non-collocated
#
# To enable this scenario do: osd_scenario: non-collocated
#
# If osd_objectstore: filestore is enabled 'ceph data' and 'ceph journal' partitions
# will be stored on different devices:
# - 'ceph data' will be stored on the device listed in 'devices'
# - 'ceph journal' will be stored on the device listed in 'dedicated_devices'
#
# Let's take an example, imagine 'devices' was declared like this:
#
# devices:
# - /dev/sda
# - /dev/sdb
# - /dev/sdc
# - /dev/sdd
#
# And 'dedicated_devices' was declared like this:
#
# dedicated_devices:
# - /dev/sdf
# - /dev/sdf
# - /dev/sdg
# - /dev/sdg
#
# This will result in the following mapping:
# - /dev/sda will have /dev/sdf1 as journal
# - /dev/sdb will have /dev/sdf2 as a journal
# - /dev/sdc will have /dev/sdg1 as a journal
# - /dev/sdd will have /dev/sdg2 as a journal
#
#
# NOTE(leseb):
# On a containerized scenario we only support A SINGLE journal
@ -137,15 +154,17 @@ dummy:
# This is a limitation we plan to fix at some point.
#
#
# If osd_objectstore: bluestore is enabled, both rocksdb DB and WAL will be stored
# on a dedicated device. So the following will happen:
# If osd_objectstore: bluestore is enabled, both 'ceph block.db' and 'ceph block.wal' partitions will be stored
# on a dedicated device.
#
# So the following will happen:
# - The devices listed in 'devices' will get 2 partitions, one for 'block' and one for 'data'.
# 'data' is only 100MB big and do not store any of your data, it's just a bunch of Ceph metadata.
# 'block' will store all your data.
# - The devices in 'raw_journal_devices' will get 1 partition for RocksDB DB, called 'block.db'
# 'block' will store all your actual data.
# - The devices in 'dedicated_devices' will get 1 partition for RocksDB DB, called 'block.db'
# and one for RocksDB WAL, called 'block.wal'
#
# By default raw_journal_devices will represent block.db
# By default dedicated_devices will represent block.db
#
# Example of what you will get:
# [root@ceph-osd0 ~]# blkid /dev/sd*
@ -155,17 +174,15 @@ dummy:
# /dev/sdb: PTTYPE="gpt"
# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="af5b2d74-4c08-42cf-be57-7248c739e217"
# /dev/sdb2: PARTLABEL="ceph block.wal" PARTUUID="af3f8327-9aa9-4c2b-a497-cf0fe96d126a"
#raw_journal_devices: []
#dedicated_devices: []
# IV. This will partition disks for BlueStore
# To enable bluestore just set:
# osd_objectstore: bluestore
# More device granularity for Bluestore
#
# If osd_objectstore: bluestore is enabled.
# By default, if bluestore_wal_devices is empty, it will get the content of raw_journal_devices.
# If set, then you will have a dedicated partition on a specific device (bluestore_wal_devices)
# for block.wal
# ONLY if osd_objectstore: bluestore is enabled.
#
# By default, if 'bluestore_wal_devices' is empty, it will get the content of 'dedicated_devices'.
# If set, then you will have a dedicated partition on a specific device for block.wal.
#
# Example of what you will get:
# [root@ceph-osd0 ~]# blkid /dev/sd*
@ -176,22 +193,7 @@ dummy:
# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="0734f6b6-cc94-49e9-93de-ba7e1d5b79e3"
# /dev/sdc: PTTYPE="gpt"
# /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3"
#bluestore_wal_devices: "{{ raw_journal_devices }}"
# V. Encrypt osd data and/or journal devices with dm-crypt.
# Keys are stored into the monitors k/v store
# Use 'true' to enable this scenario
# Both journal and data are stored on the same dm-crypt encrypted device
#dmcrypt_journal_collocation: false
# VI. Encrypt osd data and/or journal devices with dm-crypt.
# Keys are stored into the monitors k/v store
# Use 'true' to enable this scenario
# Journal and osd data are separated, each with their own dm-crypt device
# You must use raw_journal_devices and set your journal devices
#dmcrypt_dedicated_journal: false
#bluestore_wal_devices: "{{ dedicated_devices }}"
##########
@ -201,22 +203,22 @@ dummy:
#ceph_config_keys: [] # DON'T TOUCH ME
# PREPARE DEVICE
# Make sure you only pass a single device to raw_journal_devices, otherwise this will fail horribly.
# Make sure you only pass a single device to dedicated_devices, otherwise this will fail horribly.
# This is why we use [0] in the example.
#
# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
#
# Examples:
# Journal collocated on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_FILESTORE=1
# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1
# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
# Encrypted OSD on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1
# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
#
# Bluestore OSD collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1
# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }}
# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }}
# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }}
# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }}
# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1
# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
#
#
#ceph_osd_docker_devices: "{{ devices }}"

View File

@ -343,7 +343,7 @@
with_items: "{{ ceph_journal_partition_to_erase_path.stdout_lines | default([]) }}"
when:
- ceph_journal_partlabels.rc == 0
- (raw_multi_journal is defined and raw_multi_journal) or (dmcrypt_dedicated_journal is defined and dmcrypt_dedicated_journal)
- osd_scenario == 'non-collocated'
- name: purge ceph mon cluster

View File

@ -270,7 +270,7 @@
zap_device
with_items:
- "{{ ceph_osd_docker_devices }}"
- "{{ raw_journal_devices|default([]) }}"
- "{{ dedicated_devices|default([]) }}"
- name: wait until the zap containers die
shell: |
@ -288,7 +288,7 @@
state: absent
with_items:
- "{{ ceph_osd_docker_devices }}"
- "{{ raw_journal_devices|default([]) }}"
- "{{ dedicated_devices|default([]) }}"
- name: remove ceph osd service
file:

View File

@ -77,51 +77,68 @@ devices: []
#
osd_auto_discovery: false
# Encrypt your OSD device using dmcrypt
# If set to True, no matter which osd_objecstore and osd_scenario you use the data will be encrypted
dmcrypt: false
# !! WARNING !!
# #
# # /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
# #
# # !! WARNING !!
#
# I. First scenario: journal and osd_data on the same device
# Use 'true' to enable this scenario
# This will collocate both journal and data on the same disk
# creating a partition at the beginning of the device
# List devices under 'devices' variable above or choose 'osd_auto_discovery'
# I. First scenario: collocated
#
# To enable this scenario do: osd_scenario: collocated
#
#
# If osd_objectstore: bluestore is enabled both rocksdb DB and WAL will be stored
# on the device. So the device will get 2 partitions:
# - One for 'data', also called 'block'
# - One for block, db, and wal data
# If osd_objectstore: filestore is enabled both 'ceph data' and 'ceph journal' partitions
# will be stored on the same device.
#
# If osd_objectstore: bluestore is enabled 'ceph data', 'ceph block', 'ceph block.db', 'ceph block.wal' will be stored
# on the same device. The device will get 2 partitions:
# - One for 'data', called 'ceph data'
# - One for 'ceph block', 'ceph block.db', 'ceph block.wal' called 'ceph block'
#
# Example of what you will get:
# [root@ceph-osd0 ~]# blkid /dev/sda*
# /dev/sda: PTTYPE="gpt"
# /dev/sda1: UUID="9c43e346-dd6e-431f-92d8-cbed4ccb25f6" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="749c71c9-ed8f-4930-82a7-a48a3bcdb1c7"
# /dev/sda2: PARTLABEL="ceph block" PARTUUID="e6ca3e1d-4702-4569-abfa-e285de328e9d"
journal_collocation: false
#
osd_scenario: dummy
valid_osd_scenarios:
- collocated
- non-collocated
# II. Second scenario: N journal devices for N OSDs
# Use 'true' for 'raw_multi_journal' to enable this scenario
# List devices under 'devices' variable above and
# write journal devices for those under 'raw_journal_devices'
# In the following example:
# * sdb and sdc will get sdf as a journal
# * sdd and sde will get sdg as a journal
# While starting you have 2 options:
# 1. Pre-allocate all the devices
# 2. Progressively add new devices
raw_multi_journal: false
#raw_journal_devices:
# - /dev/sdf
# - /dev/sdf
# - /dev/sdg
# - /dev/sdg
# II. Second scenario: non-collocated
#
# To enable this scenario do: osd_scenario: non-collocated
#
# If osd_objectstore: filestore is enabled 'ceph data' and 'ceph journal' partitions
# will be stored on different devices:
# - 'ceph data' will be stored on the device listed in 'devices'
# - 'ceph journal' will be stored on the device listed in 'dedicated_devices'
#
# Let's take an example, imagine 'devices' was declared like this:
#
# devices:
# - /dev/sda
# - /dev/sdb
# - /dev/sdc
# - /dev/sdd
#
# And 'dedicated_devices' was declared like this:
#
# dedicated_devices:
# - /dev/sdf
# - /dev/sdf
# - /dev/sdg
# - /dev/sdg
#
# This will result in the following mapping:
# - /dev/sda will have /dev/sdf1 as journal
# - /dev/sdb will have /dev/sdf2 as a journal
# - /dev/sdc will have /dev/sdg1 as a journal
# - /dev/sdd will have /dev/sdg2 as a journal
#
#
# NOTE(leseb):
# On a containerized scenario we only support A SINGLE journal
@ -129,15 +146,17 @@ raw_multi_journal: false
# This is a limitation we plan to fix at some point.
#
#
# If osd_objectstore: bluestore is enabled, both rocksdb DB and WAL will be stored
# on a dedicated device. So the following will happen:
# If osd_objectstore: bluestore is enabled, both 'ceph block.db' and 'ceph block.wal' partitions will be stored
# on a dedicated device.
#
# So the following will happen:
# - The devices listed in 'devices' will get 2 partitions, one for 'block' and one for 'data'.
# 'data' is only 100MB big and do not store any of your data, it's just a bunch of Ceph metadata.
# 'block' will store all your data.
# - The devices in 'raw_journal_devices' will get 1 partition for RocksDB DB, called 'block.db'
# 'block' will store all your actual data.
# - The devices in 'dedicated_devices' will get 1 partition for RocksDB DB, called 'block.db'
# and one for RocksDB WAL, called 'block.wal'
#
# By default raw_journal_devices will represent block.db
# By default dedicated_devices will represent block.db
#
# Example of what you will get:
# [root@ceph-osd0 ~]# blkid /dev/sd*
@ -147,17 +166,15 @@ raw_multi_journal: false
# /dev/sdb: PTTYPE="gpt"
# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="af5b2d74-4c08-42cf-be57-7248c739e217"
# /dev/sdb2: PARTLABEL="ceph block.wal" PARTUUID="af3f8327-9aa9-4c2b-a497-cf0fe96d126a"
raw_journal_devices: []
dedicated_devices: []
# IV. This will partition disks for BlueStore
# To enable bluestore just set:
# osd_objectstore: bluestore
# More device granularity for Bluestore
#
# If osd_objectstore: bluestore is enabled.
# By default, if bluestore_wal_devices is empty, it will get the content of raw_journal_devices.
# If set, then you will have a dedicated partition on a specific device (bluestore_wal_devices)
# for block.wal
# ONLY if osd_objectstore: bluestore is enabled.
#
# By default, if 'bluestore_wal_devices' is empty, it will get the content of 'dedicated_devices'.
# If set, then you will have a dedicated partition on a specific device for block.wal.
#
# Example of what you will get:
# [root@ceph-osd0 ~]# blkid /dev/sd*
@ -168,22 +185,7 @@ raw_journal_devices: []
# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="0734f6b6-cc94-49e9-93de-ba7e1d5b79e3"
# /dev/sdc: PTTYPE="gpt"
# /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3"
bluestore_wal_devices: "{{ raw_journal_devices }}"
# V. Encrypt osd data and/or journal devices with dm-crypt.
# Keys are stored into the monitors k/v store
# Use 'true' to enable this scenario
# Both journal and data are stored on the same dm-crypt encrypted device
dmcrypt_journal_collocation: false
# VI. Encrypt osd data and/or journal devices with dm-crypt.
# Keys are stored into the monitors k/v store
# Use 'true' to enable this scenario
# Journal and osd data are separated, each with their own dm-crypt device
# You must use raw_journal_devices and set your journal devices
dmcrypt_dedicated_journal: false
bluestore_wal_devices: "{{ dedicated_devices }}"
##########
@ -193,22 +195,22 @@ dmcrypt_dedicated_journal: false
ceph_config_keys: [] # DON'T TOUCH ME
# PREPARE DEVICE
# Make sure you only pass a single device to raw_journal_devices, otherwise this will fail horribly.
# Make sure you only pass a single device to dedicated_devices, otherwise this will fail horribly.
# This is why we use [0] in the example.
#
# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
#
# Examples:
# Journal collocated on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_FILESTORE=1
# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1
# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
# Encrypted OSD on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1
# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
#
# Bluestore OSD collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1
# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }}
# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }}
# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }}
# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }}
# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1
# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
#
#
ceph_osd_docker_devices: "{{ devices }}"

View File

@ -11,7 +11,7 @@
- item.value.removable == "0"
- item.value.partitions|count == 0
- item.value.holders|count == 0
- journal_collocation
- osd_scenario == 'collocated'
- osd_auto_discovery
- name: activate osd(s) when device is a disk
@ -26,7 +26,7 @@
- not item.0.get("skipped")
- item.0.get("rc", 0) != 0
- not osd_auto_discovery
- raw_multi_journal
- osd_scenario == 'non-collocated'
- name: automatically activate osd disk(s) without partitions (dmcrypt)
command: ceph-disk activate --dmcrypt "/dev/{{ item.key }}"
@ -38,7 +38,8 @@
- item.value.partitions|count == 0
- item.value.holders|count == 0
- osd_auto_discovery
- dmcrypt_journal_collocation
- dmcrypt
- osd_scenario == 'collocated'
- name: activate osd(s) when device is a disk (dmcrypt)
command: ceph-disk activate --dmcrypt {{ item.1 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1
@ -52,13 +53,14 @@
- not item.0.get("skipped")
- item.0.get("rc", 0) != 0
- not osd_auto_discovery
- dmcrypt_dedicated_journal
- dmcrypt
- osd_scenario == 'non-collocated'
# NOTE (leseb): we must do this because of
# https://github.com/ansible/ansible/issues/4297
- name: combine ispartition results
set_fact:
combined_activate_osd_disk_results: "{{ activate_osd_disk if not dmcrypt_journal_collocation else activate_osd_disk_dmcrypt }}"
combined_activate_osd_disk_results: "{{ activate_osd_disk if osd_scenario != 'collocated' else activate_osd_disk_dmcrypt }}"
- name: fail if ceph-disk cannot create an OSD
fail:

View File

@ -24,7 +24,7 @@
- name: check the journal device is partition
shell: "readlink -f {{ item }} | egrep '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p)[0-9]{1,2}|fio[a-z]{1,2}[0-9]{1,2}$'"
with_items: "{{ raw_journal_devices }}"
with_items: "{{ dedicated_devices }}"
changed_when: false
failed_when: false
always_run: true
@ -34,22 +34,22 @@
shell: "parted --script {{ item.1 }} print > /dev/null 2>&1"
with_together:
- "{{ journal_ispartition_results.results }}"
- "{{ raw_journal_devices|unique }}"
- "{{ dedicated_devices|unique }}"
changed_when: false
failed_when: false
always_run: true
register: journal_partition_status
when:
- (raw_multi_journal or dmcrypt_dedicated_journal)
- osd_scenario == 'non-collocated'
- item.0.rc != 0
- name: fix partitions gpt header or labels of the journal devices
shell: "sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }} || sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }}"
with_together:
- "{{ journal_partition_status.results }}"
- "{{ raw_journal_devices|unique }}"
- "{{ dedicated_devices|unique }}"
changed_when: false
when:
- (raw_multi_journal or dmcrypt_dedicated_journal)
- osd_scenario == 'non-collocated'
- not item.0.get("skipped")
- item.0.get("rc", 0) != 0

View File

@ -26,24 +26,16 @@
- osd_group_name is defined
- osd_group_name in group_names
- not containerized_deployment
- not journal_collocation
- not raw_multi_journal
- not dmcrypt_journal_collocation
- not dmcrypt_dedicated_journal
- osd_scenario == 'dummy'
- name: verify only one osd scenario was chosen
- name: make sure a valid osd scenario was chosen
fail:
msg: "please select only one osd scenario"
msg: "please choose an osd scenario, valid scenarios are {{ valid_osd_scenarios }}"
when:
- osd_group_name is defined
- osd_group_name in group_names
- not containerized_deployment
- (journal_collocation and raw_multi_journal)
or (dmcrypt_journal_collocation and journal_collocation)
or (dmcrypt_journal_collocation and raw_multi_journal)
or (dmcrypt_dedicated_journal and journal_collocation)
or (dmcrypt_dedicated_journal and raw_multi_journal)
or (dmcrypt_dedicated_journal and dmcrypt_journal_collocation)
- not osd_scenario in valid_osd_scenarios
- name: verify devices have been provided
fail:
@ -51,7 +43,6 @@
when:
- osd_group_name is defined
- osd_group_name in group_names
- (journal_collocation or containerized_deployment)
- not osd_auto_discovery
- devices|length == 0
@ -71,20 +62,20 @@
- osd_group_name is defined
- osd_group_name in group_names
- not containerized_deployment
- raw_multi_journal
- raw_journal_devices|length == 0
- osd_scenario == 'non-collocated'
- dedicated_devices|length == 0
or devices|length == 0
- name: make sure the raw_journal_devices variable is a list
- name: make sure the dedicated_devices variable is a list
fail:
msg: "raw_journal_devices: must be a list, not a string, i.e. [ \"/dev/sda\" ]"
msg: "dedicated_devices: must be a list, not a string, i.e. [ \"/dev/sda\" ]"
when:
- osd_group_name is defined
- osd_group_name in group_names
- not containerized_deployment
- raw_multi_journal
- raw_journal_devices is string
- raw_journal_devices|length == 0
- osd_scenario == 'non-collocated'
- dedicated_devices is string
- dedicated_devices|length == 0
or devices|length == 0
- name: check if bluestore is supported by the selected ceph version

View File

@ -6,23 +6,16 @@
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./scenarios/journal_collocation.yml
- include: ./scenarios/collocated.yml
when:
- (journal_collocation or dmcrypt_journal_collocation)
- osd_scenario == 'collocated'
- not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./scenarios/raw_multi_journal.yml
- include: ./scenarios/non-collocated.yml
when:
- raw_multi_journal
- not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./scenarios/dmcrypt-dedicated-journal.yml
when:
- dmcrypt_dedicated_journal
- osd_scenario == 'non-collocated'
- not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False

View File

@ -30,7 +30,7 @@
set_fact:
copy_admin_key: true
when:
- dmcrypt_journal_collocation or dmcrypt_dedicated_journal
- osd_scenario == 'collocated'
- name: copy osd bootstrap key
copy:

View File

@ -11,42 +11,48 @@
ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore"
when:
- osd_objectstore == 'bluestore'
- journal_collocation
- osd_scenario == 'collocated'
- not dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --filestore"
when:
- osd_objectstore == 'filestore'
- journal_collocation
- osd_scenario == 'collocated'
- not dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }}"
when:
- osd_objectstore == 'filestore'
- journal_collocation
- osd_scenario == 'collocated'
- not dmcrypt
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt"
when:
- osd_objectstore == 'bluestore'
- dmcrypt_journal_collocation
- osd_scenario == 'collocated'
- dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt"
when:
- osd_objectstore == 'filestore'
- dmcrypt_journal_collocation
- osd_scenario == 'collocated'
- dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
when:
- osd_objectstore == 'filestore'
- dmcrypt_journal_collocation
- osd_scenario == 'collocated'
- dmcrypt
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
# NOTE (alahouze): if the device is a partition, the parted command below has

View File

@ -1,45 +0,0 @@
---
## SCENARIO 6: DMCRYPT N JOURNAL DEVICES FOR N OSDS
- include: ../check_devices.yml
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt"
when:
- osd_objectstore == 'bluestore'
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt"
when:
- osd_objectstore == 'filestore'
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
when:
- osd_objectstore == 'filestore'
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: prepare dmcrypt osd disk(s) with a dedicated journal device on "{{ osd_objectstore }}"
command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }} {{ item.3 }}"
with_together:
- "{{ parted_results.results }}"
- "{{ ispartition_results.results }}"
- "{{ devices }}"
- "{{ raw_journal_devices }}"
changed_when: false
when:
- not item.0.get("skipped")
- not item.1.get("skipped")
- item.0.get("rc", 0) != 0
- item.1.get("rc", 0) != 0
- not osd_auto_discovery
- include: ../activate_osds.yml

View File

@ -1,6 +1,4 @@
---
## SCENARIO 3: N JOURNAL DEVICES FOR N OSDS
- include: ../check_devices.yml
# NOTE (leseb): the prepare process must be parallelized somehow...
@ -11,37 +9,64 @@
ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore"
when:
- osd_objectstore == 'bluestore'
- not dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --filestore"
when:
- osd_objectstore == 'filestore'
- not dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }}"
when:
- osd_objectstore == 'filestore'
- not dmcrypt
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- name: prepare filestore osd disk(s) with a dedicated journal device
command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.1 }} {{ item.2 }}"
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt"
when:
- osd_objectstore == 'bluestore'
- dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt"
when:
- osd_objectstore == 'filestore'
- dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
when:
- osd_objectstore == 'filestore'
- dmcrypt
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- name: prepare filestore osd disk(s) non-collocated
command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }} {{ item.3 }}"
with_together:
- "{{ parted_results.results }}"
- "{{ ispartition_results.results }}"
- "{{ devices }}"
- "{{ raw_journal_devices }}"
- "{{ dedicated_devices }}"
changed_when: false
when:
- item.0.get("skipped") or item.0.get("rc", 0) != 0
- not item.1.get("skipped")
- item.1.get("rc", 0) != 0
- osd_objectstore == 'filestore'
- not osd_auto_discovery
- name: manually prepare bluestore osd disk(s) with a dedicated device for db and wal
- name: manually prepare "{{ osd_objectstore }}" osd disk(s) with a dedicated device for db and wal
command: "ceph-disk prepare {{ ceph_disk_cli_options }} --block.db {{ item.1 }} --block.wal {{ item.2 }} {{ item.3 }}"
with_together:
- "{{ parted_results.results }}"
- "{{ raw_journal_devices }}"
- "{{ dedicated_devices }}"
- "{{ bluestore_wal_devices }}"
- "{{ devices }}"
when:

View File

@ -44,8 +44,8 @@ create_dev_list $1
-v /etc/localtime:/etc/localtime:ro \
--device=/dev/${1} \
--device=/dev/${1}1 \
{% if raw_journal_devices|length > 0 -%}
-e OSD_JOURNAL={{ raw_journal_devices[0] }} \
{% if dedicated_devices|length > 0 -%}
-e OSD_JOURNAL={{ dedicated_devices[0] }} \
{% else -%}
--device=/dev/${1}2 \
{% endif -%}

View File

@ -31,7 +31,7 @@ def node(Ansible, Interface, Command, request):
if node_type == "mgrs" and ceph_stable_release == "jewel":
pytest.skip("mgr nodes can not be tested with ceph release jewel")
journal_collocation_test = ansible_vars.get("journal_collocation") or ansible_vars.get("dmcrypt_journal_collocation")
journal_collocation_test = ansible_vars.get("osd_scenario") == "collocated"
if request.node.get_marker("journal_collocation") and not journal_collocation_test:
pytest.skip("Scenario is not using journal collocation")

View File

@ -9,9 +9,9 @@ journal_size: 100
osd_objectstore: "bluestore"
devices:
- '/dev/sda'
raw_journal_devices:
dedicated_devices:
- '/dev/sdb'
raw_multi_journal: True
osd_scenario: non-collocated
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -5,11 +5,12 @@ public_network: "192.168.11.0/24"
cluster_network: "192.168.12.0/24"
journal_size: 100
monitor_interface: eth1
dmcrypt_dedicated_journal: true
osd_scenario: non-collocated
dmcrypt: true
osd_objectstore: "bluestore"
devices:
- '/dev/sda'
raw_journal_devices:
dedicated_devices:
- '/dev/sdb'
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }

View File

@ -5,7 +5,8 @@ public_network: "192.168.13.0/24"
cluster_network: "192.168.14.0/24"
journal_size: 100
monitor_interface: eth1
dmcrypt_journal_collocation: true
osd_scenario: collocated
dmcrypt: true
osd_objectstore: "bluestore"
devices:
- '/dev/sda'

View File

@ -7,7 +7,8 @@ public_network: "192.168.23.0/24"
cluster_network: "192.168.24.0/24"
journal_size: 100
monitor_interface: eth1
dmcrypt_journal_collocation: true
osd_scenario: collocated
dmcrypt: true
osd_objectstore: "bluestore"
devices:
- '/dev/sda'

View File

@ -16,8 +16,8 @@ ceph_rgw_civetweb_port: 8080
ceph_osd_docker_devices: "{{ devices }}"
devices:
- /dev/sda
raw_journal_devices:
dedicated_devices:
- /dev/sdb
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_BLUESTORE=1
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE=1
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -12,7 +12,7 @@ journal_size: 100
ceph_docker_on_openstack: False
public_network: "192.168.15.0/24"
cluster_network: "192.168.16.0/24"
journal_collocation: true
osd_scenario: collocated
ceph_rgw_civetweb_port: 8080
ceph_osd_docker_devices: "{{ devices }}"
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_FORCE_ZAP=1 -e OSD_BLUESTORE=1

View File

@ -9,7 +9,7 @@ journal_size: 100
devices:
- '/dev/sda'
- '/dev/sdb'
journal_collocation: True
osd_scenario: collocated
osd_objectstore: "bluestore"
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }

View File

@ -8,9 +8,9 @@ journal_size: 100
osd_objectstore: "filestore"
devices:
- '/dev/sda'
raw_journal_devices:
dedicated_devices:
- '/dev/sdb'
raw_multi_journal: True
osd_scenario: non-collocated
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -5,11 +5,11 @@ public_network: "192.168.11.0/24"
cluster_network: "192.168.12.0/24"
journal_size: 100
monitor_interface: eth1
dmcrypt_dedicated_journal: true
osd_scenario: non-collocated
osd_objectstore: "filestore"
devices:
- '/dev/sda'
raw_journal_devices:
dedicated_devices:
- '/dev/sdb'
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }

View File

@ -5,7 +5,7 @@ public_network: "192.168.13.0/24"
cluster_network: "192.168.14.0/24"
journal_size: 100
monitor_interface: eth1
dmcrypt_journal_collocation: true
osd_scenario: collocated
osd_objectstore: "filestore"
devices:
- '/dev/sda'

View File

@ -12,7 +12,8 @@ journal_size: 100
ceph_docker_on_openstack: False
public_network: "192.168.15.0/24"
cluster_network: "192.168.16.0/24"
dmcrypt_journal_collocation: true
osd_scenario: collocated
dmcrypt: true
ceph_rgw_civetweb_port: 8080
ceph_osd_docker_devices: "{{ devices }}"
devices:

View File

@ -14,9 +14,10 @@ public_network: "192.168.15.0/24"
cluster_network: "192.168.16.0/24"
ceph_rgw_civetweb_port: 8080
ceph_osd_docker_devices: "{{ devices }}"
osd_scenario: non-collocated
devices:
- /dev/sda
raw_journal_devices:
dedicated_devices:
- /dev/sdb
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }}
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }}
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -12,7 +12,7 @@ journal_size: 100
ceph_docker_on_openstack: False
public_network: "192.168.15.0/24"
cluster_network: "192.168.16.0/24"
journal_collocation: true
osd_scenario: collocated
ceph_rgw_civetweb_port: 8080
ceph_osd_docker_devices: "{{ devices }}"
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1

View File

@ -10,7 +10,7 @@ osd_objectstore: "filestore"
devices:
- '/dev/sda'
- '/dev/sdb'
journal_collocation: True
osd_scenario: collocated
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -8,9 +8,9 @@ monitor_interface: eth1
journal_size: 100
devices:
- '/dev/sdb'
raw_journal_devices:
dedicated_devices:
- '/dev/sdc'
raw_multi_journal: True
osd_scenario: non-collocated
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

10
tox.ini
View File

@ -88,15 +88,15 @@ deps=
ansible2.2: ansible==2.2.3
-r{toxinidir}/tests/requirements.txt
changedir=
# tests a 1 mon, 1 osd, 1 mds and 1 rgw xenial cluster using raw_multi_journal OSD scenario
# tests a 1 mon, 1 osd, 1 mds and 1 rgw xenial cluster using non-collocated OSD scenario
xenial_cluster: {toxinidir}/tests/functional/ubuntu/16.04/cluster
# tests a 1 mon 1 osd centos7 cluster using journal_collocation OSD scenario
# tests a 1 mon 1 osd centos7 cluster using collocated OSD scenario
journal_collocation: {toxinidir}/tests/functional/centos/7/jrn-col
# tests a 1 mon 1 osd centos7 cluster using dmcrypt_dedicated_journal OSD scenario
# tests a 1 mon 1 osd centos7 cluster using dmcrypt non-collocated OSD scenario
dmcrypt_journal: {toxinidir}/tests/functional/centos/7/crypt-ded-jrn
# tests a 1 mon 1 osd centos7 cluster using dmcrypt_journal_collocation OSD scenario
# tests a 1 mon 1 osd centos7 cluster using dmcrypt collocated OSD scenario
dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/crypt-jrn-col
# tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using raw_multi_journal OSD scenario
# tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using non-collocated OSD scenario
centos7_cluster: {toxinidir}/tests/functional/centos/7/cluster
# an alias for centos7_cluster, this makes the name better suited for rhcs testing
cluster: {toxinidir}/tests/functional/centos/7/cluster