From 30991b1c0ab5ba64abd2e3b820ed9fb5606b5fc5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Thu, 27 Jul 2017 17:05:59 +0200 Subject: [PATCH] osd: simplify scenarios MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is only two main scenarios now: * collocated: everything remains on the same device: - data, db, wal for bluestore - data and journal for filestore * non-collocated: dedicated device for some of the component Signed-off-by: Sébastien Han --- Vagrantfile | 4 +- ceph-aio-no-vagrant.sh | 2 +- group_vars/osds.yml.sample | 138 +++++++++--------- infrastructure-playbooks/purge-cluster.yml | 2 +- .../purge-docker-cluster.yml | 4 +- roles/ceph-osd/defaults/main.yml | 138 +++++++++--------- roles/ceph-osd/tasks/activate_osds.yml | 12 +- roles/ceph-osd/tasks/check_devices.yml | 10 +- roles/ceph-osd/tasks/check_mandatory_vars.yml | 31 ++-- roles/ceph-osd/tasks/main.yml | 15 +- roles/ceph-osd/tasks/pre_requisite.yml | 2 +- ...journal_collocation.yml => collocated.yml} | 18 ++- .../scenarios/dmcrypt-dedicated-journal.yml | 45 ------ ...w_multi_journal.yml => non-collocated.yml} | 39 ++++- roles/ceph-osd/templates/ceph-osd-run.sh.j2 | 4 +- tests/conftest.py | 2 +- .../centos/7/bluestore/group_vars/all | 4 +- .../centos/7/bs-crypt-ded-jrn/group_vars/all | 5 +- .../centos/7/bs-crypt-jrn-col/group_vars/all | 3 +- .../7/bs-dock-crypt-jrn-col/group_vars/all | 3 +- .../centos/7/bs-dock-ded-jrn/group_vars/all | 4 +- .../centos/7/bs-docker/group_vars/all | 2 +- .../centos/7/bs-jrn-col/group_vars/all | 2 +- .../centos/7/cluster/group_vars/all | 4 +- .../centos/7/crypt-ded-jrn/group_vars/all | 4 +- .../centos/7/crypt-jrn-col/group_vars/all | 2 +- .../7/docker-crypt-jrn-col/group_vars/all | 3 +- .../centos/7/docker-ded-jrn/group_vars/all | 5 +- .../functional/centos/7/docker/group_vars/all | 2 +- .../centos/7/jrn-col/group_vars/all | 2 +- .../ubuntu/16.04/cluster/group_vars/all | 4 +- tox.ini | 10 +- 32 files changed, 253 insertions(+), 272 deletions(-) rename roles/ceph-osd/tasks/scenarios/{journal_collocation.yml => collocated.yml} (89%) delete mode 100644 roles/ceph-osd/tasks/scenarios/dmcrypt-dedicated-journal.yml rename roles/ceph-osd/tasks/scenarios/{raw_multi_journal.yml => non-collocated.yml} (58%) diff --git a/Vagrantfile b/Vagrantfile index 3a48c1c3c..5fb08b5b0 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -86,7 +86,7 @@ ansible_provision = proc do |ansible| else ansible.extra_vars = ansible.extra_vars.merge({ devices: settings['disks'], - journal_collocation: 'true', + osd_scenario: 'collocated', monitor_interface: ETH, os_tuning_params: settings['os_tuning_params'], pool_default_size: '2', @@ -100,7 +100,7 @@ ansible_provision = proc do |ansible| ansible.extra_vars = ansible.extra_vars.merge({ cluster_network: "#{CLUSTER_SUBNET}.0/16", devices: ['/dev/sdc'], # hardcode leftover disk - journal_collocation: 'true', + osd_scenario: 'collocated', monitor_address_block: "#{PUBLIC_SUBNET}.0/16", public_network: "#{PUBLIC_SUBNET}.0/16", }) diff --git a/ceph-aio-no-vagrant.sh b/ceph-aio-no-vagrant.sh index 602588c3e..c116b5b5d 100755 --- a/ceph-aio-no-vagrant.sh +++ b/ceph-aio-no-vagrant.sh @@ -118,7 +118,7 @@ function cp_var { function populate_vars { sed -i "s/[#]*osd_auto_discovery: .*/osd_auto_discovery: true/" group_vars/osds.yml - sed -i "s/[#]*journal_collocation: .*/journal_collocation: true/" group_vars/osds.yml + sed -i "s/[#]*osd_scenario: .*/osd_scenario: collocated/" group_vars/osds.yml sed -i "s/[#]*monitor_address: .*/monitor_address: ${IP}/" group_vars/all.yml sed -i "s/[#]*journal_size: .*/journal_size: 100/" group_vars/all.yml sed -i "s|[#]*public_network: .*|public_network: ${SUBNET}|" group_vars/all.yml diff --git a/group_vars/osds.yml.sample b/group_vars/osds.yml.sample index 0a270399c..ae06510ff 100644 --- a/group_vars/osds.yml.sample +++ b/group_vars/osds.yml.sample @@ -85,51 +85,68 @@ dummy: # #osd_auto_discovery: false +# Encrypt your OSD device using dmcrypt +# If set to True, no matter which osd_objecstore and osd_scenario you use the data will be encrypted +#dmcrypt: false -# !! WARNING !! -# # -# # /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\ -# # -# # !! WARNING !! -# -# I. First scenario: journal and osd_data on the same device -# Use 'true' to enable this scenario -# This will collocate both journal and data on the same disk -# creating a partition at the beginning of the device -# List devices under 'devices' variable above or choose 'osd_auto_discovery' +# I. First scenario: collocated +# +# To enable this scenario do: osd_scenario: collocated # # -# If osd_objectstore: bluestore is enabled both rocksdb DB and WAL will be stored -# on the device. So the device will get 2 partitions: -# - One for 'data', also called 'block' -# - One for block, db, and wal data +# If osd_objectstore: filestore is enabled both 'ceph data' and 'ceph journal' partitions +# will be stored on the same device. +# +# If osd_objectstore: bluestore is enabled 'ceph data', 'ceph block', 'ceph block.db', 'ceph block.wal' will be stored +# on the same device. The device will get 2 partitions: +# - One for 'data', called 'ceph data' +# - One for 'ceph block', 'ceph block.db', 'ceph block.wal' called 'ceph block' # # Example of what you will get: # [root@ceph-osd0 ~]# blkid /dev/sda* # /dev/sda: PTTYPE="gpt" # /dev/sda1: UUID="9c43e346-dd6e-431f-92d8-cbed4ccb25f6" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="749c71c9-ed8f-4930-82a7-a48a3bcdb1c7" # /dev/sda2: PARTLABEL="ceph block" PARTUUID="e6ca3e1d-4702-4569-abfa-e285de328e9d" -#journal_collocation: false +# + +#osd_scenario: dummy +#valid_osd_scenarios: +# - collocated +# - non-collocated -# II. Second scenario: N journal devices for N OSDs -# Use 'true' for 'raw_multi_journal' to enable this scenario -# List devices under 'devices' variable above and -# write journal devices for those under 'raw_journal_devices' -# In the following example: -# * sdb and sdc will get sdf as a journal -# * sdd and sde will get sdg as a journal - -# While starting you have 2 options: -# 1. Pre-allocate all the devices -# 2. Progressively add new devices -#raw_multi_journal: false -#raw_journal_devices: -# - /dev/sdf -# - /dev/sdf -# - /dev/sdg -# - /dev/sdg +# II. Second scenario: non-collocated +# +# To enable this scenario do: osd_scenario: non-collocated +# +# If osd_objectstore: filestore is enabled 'ceph data' and 'ceph journal' partitions +# will be stored on different devices: +# - 'ceph data' will be stored on the device listed in 'devices' +# - 'ceph journal' will be stored on the device listed in 'dedicated_devices' +# +# Let's take an example, imagine 'devices' was declared like this: +# +# devices: +# - /dev/sda +# - /dev/sdb +# - /dev/sdc +# - /dev/sdd +# +# And 'dedicated_devices' was declared like this: +# +# dedicated_devices: +# - /dev/sdf +# - /dev/sdf +# - /dev/sdg +# - /dev/sdg +# +# This will result in the following mapping: +# - /dev/sda will have /dev/sdf1 as journal +# - /dev/sdb will have /dev/sdf2 as a journal +# - /dev/sdc will have /dev/sdg1 as a journal +# - /dev/sdd will have /dev/sdg2 as a journal +# # # NOTE(leseb): # On a containerized scenario we only support A SINGLE journal @@ -137,15 +154,17 @@ dummy: # This is a limitation we plan to fix at some point. # # -# If osd_objectstore: bluestore is enabled, both rocksdb DB and WAL will be stored -# on a dedicated device. So the following will happen: +# If osd_objectstore: bluestore is enabled, both 'ceph block.db' and 'ceph block.wal' partitions will be stored +# on a dedicated device. +# +# So the following will happen: # - The devices listed in 'devices' will get 2 partitions, one for 'block' and one for 'data'. # 'data' is only 100MB big and do not store any of your data, it's just a bunch of Ceph metadata. -# 'block' will store all your data. -# - The devices in 'raw_journal_devices' will get 1 partition for RocksDB DB, called 'block.db' +# 'block' will store all your actual data. +# - The devices in 'dedicated_devices' will get 1 partition for RocksDB DB, called 'block.db' # and one for RocksDB WAL, called 'block.wal' # -# By default raw_journal_devices will represent block.db +# By default dedicated_devices will represent block.db # # Example of what you will get: # [root@ceph-osd0 ~]# blkid /dev/sd* @@ -155,17 +174,15 @@ dummy: # /dev/sdb: PTTYPE="gpt" # /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="af5b2d74-4c08-42cf-be57-7248c739e217" # /dev/sdb2: PARTLABEL="ceph block.wal" PARTUUID="af3f8327-9aa9-4c2b-a497-cf0fe96d126a" -#raw_journal_devices: [] +#dedicated_devices: [] -# IV. This will partition disks for BlueStore -# To enable bluestore just set: -# osd_objectstore: bluestore +# More device granularity for Bluestore # -# If osd_objectstore: bluestore is enabled. -# By default, if bluestore_wal_devices is empty, it will get the content of raw_journal_devices. -# If set, then you will have a dedicated partition on a specific device (bluestore_wal_devices) -# for block.wal +# ONLY if osd_objectstore: bluestore is enabled. +# +# By default, if 'bluestore_wal_devices' is empty, it will get the content of 'dedicated_devices'. +# If set, then you will have a dedicated partition on a specific device for block.wal. # # Example of what you will get: # [root@ceph-osd0 ~]# blkid /dev/sd* @@ -176,22 +193,7 @@ dummy: # /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="0734f6b6-cc94-49e9-93de-ba7e1d5b79e3" # /dev/sdc: PTTYPE="gpt" # /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3" -#bluestore_wal_devices: "{{ raw_journal_devices }}" - - -# V. Encrypt osd data and/or journal devices with dm-crypt. -# Keys are stored into the monitors k/v store -# Use 'true' to enable this scenario -# Both journal and data are stored on the same dm-crypt encrypted device -#dmcrypt_journal_collocation: false - - -# VI. Encrypt osd data and/or journal devices with dm-crypt. -# Keys are stored into the monitors k/v store -# Use 'true' to enable this scenario -# Journal and osd data are separated, each with their own dm-crypt device -# You must use raw_journal_devices and set your journal devices -#dmcrypt_dedicated_journal: false +#bluestore_wal_devices: "{{ dedicated_devices }}" ########## @@ -201,22 +203,22 @@ dummy: #ceph_config_keys: [] # DON'T TOUCH ME # PREPARE DEVICE -# Make sure you only pass a single device to raw_journal_devices, otherwise this will fail horribly. +# Make sure you only pass a single device to dedicated_devices, otherwise this will fail horribly. # This is why we use [0] in the example. # # WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above # # Examples: # Journal collocated on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_FILESTORE=1 -# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1 +# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1 # Encrypted OSD on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1 -# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1 +# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1 # # Bluestore OSD collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }} +# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} +# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }} # Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1 -# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1 +# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1 # # #ceph_osd_docker_devices: "{{ devices }}" diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index d1196cfa7..1bdd86ad0 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -343,7 +343,7 @@ with_items: "{{ ceph_journal_partition_to_erase_path.stdout_lines | default([]) }}" when: - ceph_journal_partlabels.rc == 0 - - (raw_multi_journal is defined and raw_multi_journal) or (dmcrypt_dedicated_journal is defined and dmcrypt_dedicated_journal) + - osd_scenario == 'non-collocated' - name: purge ceph mon cluster diff --git a/infrastructure-playbooks/purge-docker-cluster.yml b/infrastructure-playbooks/purge-docker-cluster.yml index 03c623f61..4558fc30f 100644 --- a/infrastructure-playbooks/purge-docker-cluster.yml +++ b/infrastructure-playbooks/purge-docker-cluster.yml @@ -270,7 +270,7 @@ zap_device with_items: - "{{ ceph_osd_docker_devices }}" - - "{{ raw_journal_devices|default([]) }}" + - "{{ dedicated_devices|default([]) }}" - name: wait until the zap containers die shell: | @@ -288,7 +288,7 @@ state: absent with_items: - "{{ ceph_osd_docker_devices }}" - - "{{ raw_journal_devices|default([]) }}" + - "{{ dedicated_devices|default([]) }}" - name: remove ceph osd service file: diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index 6b7067c1a..1070e46f7 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -77,51 +77,68 @@ devices: [] # osd_auto_discovery: false +# Encrypt your OSD device using dmcrypt +# If set to True, no matter which osd_objecstore and osd_scenario you use the data will be encrypted +dmcrypt: false -# !! WARNING !! -# # -# # /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\ -# # -# # !! WARNING !! -# -# I. First scenario: journal and osd_data on the same device -# Use 'true' to enable this scenario -# This will collocate both journal and data on the same disk -# creating a partition at the beginning of the device -# List devices under 'devices' variable above or choose 'osd_auto_discovery' +# I. First scenario: collocated +# +# To enable this scenario do: osd_scenario: collocated # # -# If osd_objectstore: bluestore is enabled both rocksdb DB and WAL will be stored -# on the device. So the device will get 2 partitions: -# - One for 'data', also called 'block' -# - One for block, db, and wal data +# If osd_objectstore: filestore is enabled both 'ceph data' and 'ceph journal' partitions +# will be stored on the same device. +# +# If osd_objectstore: bluestore is enabled 'ceph data', 'ceph block', 'ceph block.db', 'ceph block.wal' will be stored +# on the same device. The device will get 2 partitions: +# - One for 'data', called 'ceph data' +# - One for 'ceph block', 'ceph block.db', 'ceph block.wal' called 'ceph block' # # Example of what you will get: # [root@ceph-osd0 ~]# blkid /dev/sda* # /dev/sda: PTTYPE="gpt" # /dev/sda1: UUID="9c43e346-dd6e-431f-92d8-cbed4ccb25f6" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="749c71c9-ed8f-4930-82a7-a48a3bcdb1c7" # /dev/sda2: PARTLABEL="ceph block" PARTUUID="e6ca3e1d-4702-4569-abfa-e285de328e9d" -journal_collocation: false +# + +osd_scenario: dummy +valid_osd_scenarios: + - collocated + - non-collocated -# II. Second scenario: N journal devices for N OSDs -# Use 'true' for 'raw_multi_journal' to enable this scenario -# List devices under 'devices' variable above and -# write journal devices for those under 'raw_journal_devices' -# In the following example: -# * sdb and sdc will get sdf as a journal -# * sdd and sde will get sdg as a journal - -# While starting you have 2 options: -# 1. Pre-allocate all the devices -# 2. Progressively add new devices -raw_multi_journal: false -#raw_journal_devices: -# - /dev/sdf -# - /dev/sdf -# - /dev/sdg -# - /dev/sdg +# II. Second scenario: non-collocated +# +# To enable this scenario do: osd_scenario: non-collocated +# +# If osd_objectstore: filestore is enabled 'ceph data' and 'ceph journal' partitions +# will be stored on different devices: +# - 'ceph data' will be stored on the device listed in 'devices' +# - 'ceph journal' will be stored on the device listed in 'dedicated_devices' +# +# Let's take an example, imagine 'devices' was declared like this: +# +# devices: +# - /dev/sda +# - /dev/sdb +# - /dev/sdc +# - /dev/sdd +# +# And 'dedicated_devices' was declared like this: +# +# dedicated_devices: +# - /dev/sdf +# - /dev/sdf +# - /dev/sdg +# - /dev/sdg +# +# This will result in the following mapping: +# - /dev/sda will have /dev/sdf1 as journal +# - /dev/sdb will have /dev/sdf2 as a journal +# - /dev/sdc will have /dev/sdg1 as a journal +# - /dev/sdd will have /dev/sdg2 as a journal +# # # NOTE(leseb): # On a containerized scenario we only support A SINGLE journal @@ -129,15 +146,17 @@ raw_multi_journal: false # This is a limitation we plan to fix at some point. # # -# If osd_objectstore: bluestore is enabled, both rocksdb DB and WAL will be stored -# on a dedicated device. So the following will happen: +# If osd_objectstore: bluestore is enabled, both 'ceph block.db' and 'ceph block.wal' partitions will be stored +# on a dedicated device. +# +# So the following will happen: # - The devices listed in 'devices' will get 2 partitions, one for 'block' and one for 'data'. # 'data' is only 100MB big and do not store any of your data, it's just a bunch of Ceph metadata. -# 'block' will store all your data. -# - The devices in 'raw_journal_devices' will get 1 partition for RocksDB DB, called 'block.db' +# 'block' will store all your actual data. +# - The devices in 'dedicated_devices' will get 1 partition for RocksDB DB, called 'block.db' # and one for RocksDB WAL, called 'block.wal' # -# By default raw_journal_devices will represent block.db +# By default dedicated_devices will represent block.db # # Example of what you will get: # [root@ceph-osd0 ~]# blkid /dev/sd* @@ -147,17 +166,15 @@ raw_multi_journal: false # /dev/sdb: PTTYPE="gpt" # /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="af5b2d74-4c08-42cf-be57-7248c739e217" # /dev/sdb2: PARTLABEL="ceph block.wal" PARTUUID="af3f8327-9aa9-4c2b-a497-cf0fe96d126a" -raw_journal_devices: [] +dedicated_devices: [] -# IV. This will partition disks for BlueStore -# To enable bluestore just set: -# osd_objectstore: bluestore +# More device granularity for Bluestore # -# If osd_objectstore: bluestore is enabled. -# By default, if bluestore_wal_devices is empty, it will get the content of raw_journal_devices. -# If set, then you will have a dedicated partition on a specific device (bluestore_wal_devices) -# for block.wal +# ONLY if osd_objectstore: bluestore is enabled. +# +# By default, if 'bluestore_wal_devices' is empty, it will get the content of 'dedicated_devices'. +# If set, then you will have a dedicated partition on a specific device for block.wal. # # Example of what you will get: # [root@ceph-osd0 ~]# blkid /dev/sd* @@ -168,22 +185,7 @@ raw_journal_devices: [] # /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="0734f6b6-cc94-49e9-93de-ba7e1d5b79e3" # /dev/sdc: PTTYPE="gpt" # /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3" -bluestore_wal_devices: "{{ raw_journal_devices }}" - - -# V. Encrypt osd data and/or journal devices with dm-crypt. -# Keys are stored into the monitors k/v store -# Use 'true' to enable this scenario -# Both journal and data are stored on the same dm-crypt encrypted device -dmcrypt_journal_collocation: false - - -# VI. Encrypt osd data and/or journal devices with dm-crypt. -# Keys are stored into the monitors k/v store -# Use 'true' to enable this scenario -# Journal and osd data are separated, each with their own dm-crypt device -# You must use raw_journal_devices and set your journal devices -dmcrypt_dedicated_journal: false +bluestore_wal_devices: "{{ dedicated_devices }}" ########## @@ -193,22 +195,22 @@ dmcrypt_dedicated_journal: false ceph_config_keys: [] # DON'T TOUCH ME # PREPARE DEVICE -# Make sure you only pass a single device to raw_journal_devices, otherwise this will fail horribly. +# Make sure you only pass a single device to dedicated_devices, otherwise this will fail horribly. # This is why we use [0] in the example. # # WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above # # Examples: # Journal collocated on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_FILESTORE=1 -# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1 +# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1 # Encrypted OSD on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1 -# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1 +# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1 # # Bluestore OSD collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }} +# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} +# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }} # Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1 -# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1 +# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1 # # ceph_osd_docker_devices: "{{ devices }}" diff --git a/roles/ceph-osd/tasks/activate_osds.yml b/roles/ceph-osd/tasks/activate_osds.yml index 4c1a1bd61..79b99c139 100644 --- a/roles/ceph-osd/tasks/activate_osds.yml +++ b/roles/ceph-osd/tasks/activate_osds.yml @@ -11,7 +11,7 @@ - item.value.removable == "0" - item.value.partitions|count == 0 - item.value.holders|count == 0 - - journal_collocation + - osd_scenario == 'collocated' - osd_auto_discovery - name: activate osd(s) when device is a disk @@ -26,7 +26,7 @@ - not item.0.get("skipped") - item.0.get("rc", 0) != 0 - not osd_auto_discovery - - raw_multi_journal + - osd_scenario == 'non-collocated' - name: automatically activate osd disk(s) without partitions (dmcrypt) command: ceph-disk activate --dmcrypt "/dev/{{ item.key }}" @@ -38,7 +38,8 @@ - item.value.partitions|count == 0 - item.value.holders|count == 0 - osd_auto_discovery - - dmcrypt_journal_collocation + - dmcrypt + - osd_scenario == 'collocated' - name: activate osd(s) when device is a disk (dmcrypt) command: ceph-disk activate --dmcrypt {{ item.1 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1 @@ -52,13 +53,14 @@ - not item.0.get("skipped") - item.0.get("rc", 0) != 0 - not osd_auto_discovery - - dmcrypt_dedicated_journal + - dmcrypt + - osd_scenario == 'non-collocated' # NOTE (leseb): we must do this because of # https://github.com/ansible/ansible/issues/4297 - name: combine ispartition results set_fact: - combined_activate_osd_disk_results: "{{ activate_osd_disk if not dmcrypt_journal_collocation else activate_osd_disk_dmcrypt }}" + combined_activate_osd_disk_results: "{{ activate_osd_disk if osd_scenario != 'collocated' else activate_osd_disk_dmcrypt }}" - name: fail if ceph-disk cannot create an OSD fail: diff --git a/roles/ceph-osd/tasks/check_devices.yml b/roles/ceph-osd/tasks/check_devices.yml index 4351d12e7..784bc3c84 100644 --- a/roles/ceph-osd/tasks/check_devices.yml +++ b/roles/ceph-osd/tasks/check_devices.yml @@ -24,7 +24,7 @@ - name: check the journal device is partition shell: "readlink -f {{ item }} | egrep '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p)[0-9]{1,2}|fio[a-z]{1,2}[0-9]{1,2}$'" - with_items: "{{ raw_journal_devices }}" + with_items: "{{ dedicated_devices }}" changed_when: false failed_when: false always_run: true @@ -34,22 +34,22 @@ shell: "parted --script {{ item.1 }} print > /dev/null 2>&1" with_together: - "{{ journal_ispartition_results.results }}" - - "{{ raw_journal_devices|unique }}" + - "{{ dedicated_devices|unique }}" changed_when: false failed_when: false always_run: true register: journal_partition_status when: - - (raw_multi_journal or dmcrypt_dedicated_journal) + - osd_scenario == 'non-collocated' - item.0.rc != 0 - name: fix partitions gpt header or labels of the journal devices shell: "sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }} || sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }}" with_together: - "{{ journal_partition_status.results }}" - - "{{ raw_journal_devices|unique }}" + - "{{ dedicated_devices|unique }}" changed_when: false when: - - (raw_multi_journal or dmcrypt_dedicated_journal) + - osd_scenario == 'non-collocated' - not item.0.get("skipped") - item.0.get("rc", 0) != 0 diff --git a/roles/ceph-osd/tasks/check_mandatory_vars.yml b/roles/ceph-osd/tasks/check_mandatory_vars.yml index 652cacbaa..c3ea103b0 100644 --- a/roles/ceph-osd/tasks/check_mandatory_vars.yml +++ b/roles/ceph-osd/tasks/check_mandatory_vars.yml @@ -26,24 +26,16 @@ - osd_group_name is defined - osd_group_name in group_names - not containerized_deployment - - not journal_collocation - - not raw_multi_journal - - not dmcrypt_journal_collocation - - not dmcrypt_dedicated_journal + - osd_scenario == 'dummy' -- name: verify only one osd scenario was chosen +- name: make sure a valid osd scenario was chosen fail: - msg: "please select only one osd scenario" + msg: "please choose an osd scenario, valid scenarios are {{ valid_osd_scenarios }}" when: - osd_group_name is defined - osd_group_name in group_names - not containerized_deployment - - (journal_collocation and raw_multi_journal) - or (dmcrypt_journal_collocation and journal_collocation) - or (dmcrypt_journal_collocation and raw_multi_journal) - or (dmcrypt_dedicated_journal and journal_collocation) - or (dmcrypt_dedicated_journal and raw_multi_journal) - or (dmcrypt_dedicated_journal and dmcrypt_journal_collocation) + - not osd_scenario in valid_osd_scenarios - name: verify devices have been provided fail: @@ -51,7 +43,6 @@ when: - osd_group_name is defined - osd_group_name in group_names - - (journal_collocation or containerized_deployment) - not osd_auto_discovery - devices|length == 0 @@ -71,20 +62,20 @@ - osd_group_name is defined - osd_group_name in group_names - not containerized_deployment - - raw_multi_journal - - raw_journal_devices|length == 0 + - osd_scenario == 'non-collocated' + - dedicated_devices|length == 0 or devices|length == 0 -- name: make sure the raw_journal_devices variable is a list +- name: make sure the dedicated_devices variable is a list fail: - msg: "raw_journal_devices: must be a list, not a string, i.e. [ \"/dev/sda\" ]" + msg: "dedicated_devices: must be a list, not a string, i.e. [ \"/dev/sda\" ]" when: - osd_group_name is defined - osd_group_name in group_names - not containerized_deployment - - raw_multi_journal - - raw_journal_devices is string - - raw_journal_devices|length == 0 + - osd_scenario == 'non-collocated' + - dedicated_devices is string + - dedicated_devices|length == 0 or devices|length == 0 - name: check if bluestore is supported by the selected ceph version diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index 11a7f0d7b..5fd6185e0 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -6,23 +6,16 @@ # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False -- include: ./scenarios/journal_collocation.yml +- include: ./scenarios/collocated.yml when: - - (journal_collocation or dmcrypt_journal_collocation) + - osd_scenario == 'collocated' - not containerized_deployment # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False -- include: ./scenarios/raw_multi_journal.yml +- include: ./scenarios/non-collocated.yml when: - - raw_multi_journal - - not containerized_deployment - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False - -- include: ./scenarios/dmcrypt-dedicated-journal.yml - when: - - dmcrypt_dedicated_journal + - osd_scenario == 'non-collocated' - not containerized_deployment # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False diff --git a/roles/ceph-osd/tasks/pre_requisite.yml b/roles/ceph-osd/tasks/pre_requisite.yml index 518c421b2..59b419243 100644 --- a/roles/ceph-osd/tasks/pre_requisite.yml +++ b/roles/ceph-osd/tasks/pre_requisite.yml @@ -30,7 +30,7 @@ set_fact: copy_admin_key: true when: - - dmcrypt_journal_collocation or dmcrypt_dedicated_journal + - osd_scenario == 'collocated' - name: copy osd bootstrap key copy: diff --git a/roles/ceph-osd/tasks/scenarios/journal_collocation.yml b/roles/ceph-osd/tasks/scenarios/collocated.yml similarity index 89% rename from roles/ceph-osd/tasks/scenarios/journal_collocation.yml rename to roles/ceph-osd/tasks/scenarios/collocated.yml index 05c37e6b6..b4119abeb 100644 --- a/roles/ceph-osd/tasks/scenarios/journal_collocation.yml +++ b/roles/ceph-osd/tasks/scenarios/collocated.yml @@ -11,42 +11,48 @@ ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore" when: - osd_objectstore == 'bluestore' - - journal_collocation + - osd_scenario == 'collocated' + - not dmcrypt - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous - set_fact: ceph_disk_cli_options: "--cluster {{ cluster }} --filestore" when: - osd_objectstore == 'filestore' - - journal_collocation + - osd_scenario == 'collocated' + - not dmcrypt - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous - set_fact: ceph_disk_cli_options: "--cluster {{ cluster }}" when: - osd_objectstore == 'filestore' - - journal_collocation + - osd_scenario == 'collocated' + - not dmcrypt - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous - set_fact: ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt" when: - osd_objectstore == 'bluestore' - - dmcrypt_journal_collocation + - osd_scenario == 'collocated' + - dmcrypt - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous - set_fact: ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt" when: - osd_objectstore == 'filestore' - - dmcrypt_journal_collocation + - osd_scenario == 'collocated' + - dmcrypt - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous - set_fact: ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt" when: - osd_objectstore == 'filestore' - - dmcrypt_journal_collocation + - osd_scenario == 'collocated' + - dmcrypt - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous # NOTE (alahouze): if the device is a partition, the parted command below has diff --git a/roles/ceph-osd/tasks/scenarios/dmcrypt-dedicated-journal.yml b/roles/ceph-osd/tasks/scenarios/dmcrypt-dedicated-journal.yml deleted file mode 100644 index cb58e0a1b..000000000 --- a/roles/ceph-osd/tasks/scenarios/dmcrypt-dedicated-journal.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -## SCENARIO 6: DMCRYPT N JOURNAL DEVICES FOR N OSDS - -- include: ../check_devices.yml - -- set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt" - when: - - osd_objectstore == 'bluestore' - - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous - -- set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt" - when: - - osd_objectstore == 'filestore' - - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous - -- set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt" - when: - - osd_objectstore == 'filestore' - - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous - -# NOTE (leseb): the prepare process must be parallelized somehow... -# if you have 64 disks with 4TB each, this will take a while -# since Ansible will sequential process the loop - -# NOTE (alahouze): if the device is a partition, the parted command below has -# failed, this is why we check if the device is a partition too. -- name: prepare dmcrypt osd disk(s) with a dedicated journal device on "{{ osd_objectstore }}" - command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }} {{ item.3 }}" - with_together: - - "{{ parted_results.results }}" - - "{{ ispartition_results.results }}" - - "{{ devices }}" - - "{{ raw_journal_devices }}" - changed_when: false - when: - - not item.0.get("skipped") - - not item.1.get("skipped") - - item.0.get("rc", 0) != 0 - - item.1.get("rc", 0) != 0 - - not osd_auto_discovery - -- include: ../activate_osds.yml diff --git a/roles/ceph-osd/tasks/scenarios/raw_multi_journal.yml b/roles/ceph-osd/tasks/scenarios/non-collocated.yml similarity index 58% rename from roles/ceph-osd/tasks/scenarios/raw_multi_journal.yml rename to roles/ceph-osd/tasks/scenarios/non-collocated.yml index 862af10af..5a181c7fa 100644 --- a/roles/ceph-osd/tasks/scenarios/raw_multi_journal.yml +++ b/roles/ceph-osd/tasks/scenarios/non-collocated.yml @@ -1,6 +1,4 @@ --- -## SCENARIO 3: N JOURNAL DEVICES FOR N OSDS - - include: ../check_devices.yml # NOTE (leseb): the prepare process must be parallelized somehow... @@ -11,37 +9,64 @@ ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore" when: - osd_objectstore == 'bluestore' + - not dmcrypt - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous - set_fact: ceph_disk_cli_options: "--cluster {{ cluster }} --filestore" when: - osd_objectstore == 'filestore' + - not dmcrypt - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous - set_fact: ceph_disk_cli_options: "--cluster {{ cluster }}" when: - osd_objectstore == 'filestore' + - not dmcrypt - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous -- name: prepare filestore osd disk(s) with a dedicated journal device - command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.1 }} {{ item.2 }}" +- set_fact: + ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt" + when: + - osd_objectstore == 'bluestore' + - dmcrypt + - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous + +- set_fact: + ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt" + when: + - osd_objectstore == 'filestore' + - dmcrypt + - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous + +- set_fact: + ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt" + when: + - osd_objectstore == 'filestore' + - dmcrypt + - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous + +- name: prepare filestore osd disk(s) non-collocated + command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }} {{ item.3 }}" with_together: - "{{ parted_results.results }}" + - "{{ ispartition_results.results }}" - "{{ devices }}" - - "{{ raw_journal_devices }}" + - "{{ dedicated_devices }}" changed_when: false when: - item.0.get("skipped") or item.0.get("rc", 0) != 0 + - not item.1.get("skipped") + - item.1.get("rc", 0) != 0 - osd_objectstore == 'filestore' - not osd_auto_discovery -- name: manually prepare bluestore osd disk(s) with a dedicated device for db and wal +- name: manually prepare "{{ osd_objectstore }}" osd disk(s) with a dedicated device for db and wal command: "ceph-disk prepare {{ ceph_disk_cli_options }} --block.db {{ item.1 }} --block.wal {{ item.2 }} {{ item.3 }}" with_together: - "{{ parted_results.results }}" - - "{{ raw_journal_devices }}" + - "{{ dedicated_devices }}" - "{{ bluestore_wal_devices }}" - "{{ devices }}" when: diff --git a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 index bd07772ad..fd00f69f3 100644 --- a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 +++ b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 @@ -44,8 +44,8 @@ create_dev_list $1 -v /etc/localtime:/etc/localtime:ro \ --device=/dev/${1} \ --device=/dev/${1}1 \ - {% if raw_journal_devices|length > 0 -%} - -e OSD_JOURNAL={{ raw_journal_devices[0] }} \ + {% if dedicated_devices|length > 0 -%} + -e OSD_JOURNAL={{ dedicated_devices[0] }} \ {% else -%} --device=/dev/${1}2 \ {% endif -%} diff --git a/tests/conftest.py b/tests/conftest.py index 0dd6c5b31..0bcc99238 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -31,7 +31,7 @@ def node(Ansible, Interface, Command, request): if node_type == "mgrs" and ceph_stable_release == "jewel": pytest.skip("mgr nodes can not be tested with ceph release jewel") - journal_collocation_test = ansible_vars.get("journal_collocation") or ansible_vars.get("dmcrypt_journal_collocation") + journal_collocation_test = ansible_vars.get("osd_scenario") == "collocated" if request.node.get_marker("journal_collocation") and not journal_collocation_test: pytest.skip("Scenario is not using journal collocation") diff --git a/tests/functional/centos/7/bluestore/group_vars/all b/tests/functional/centos/7/bluestore/group_vars/all index 222fa7aa2..716fd8121 100644 --- a/tests/functional/centos/7/bluestore/group_vars/all +++ b/tests/functional/centos/7/bluestore/group_vars/all @@ -9,9 +9,9 @@ journal_size: 100 osd_objectstore: "bluestore" devices: - '/dev/sda' -raw_journal_devices: +dedicated_devices: - '/dev/sdb' -raw_multi_journal: True +osd_scenario: non-collocated os_tuning_params: - { name: kernel.pid_max, value: 4194303 } - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/centos/7/bs-crypt-ded-jrn/group_vars/all b/tests/functional/centos/7/bs-crypt-ded-jrn/group_vars/all index 21ac1f7e9..067146318 100644 --- a/tests/functional/centos/7/bs-crypt-ded-jrn/group_vars/all +++ b/tests/functional/centos/7/bs-crypt-ded-jrn/group_vars/all @@ -5,11 +5,12 @@ public_network: "192.168.11.0/24" cluster_network: "192.168.12.0/24" journal_size: 100 monitor_interface: eth1 -dmcrypt_dedicated_journal: true +osd_scenario: non-collocated +dmcrypt: true osd_objectstore: "bluestore" devices: - '/dev/sda' -raw_journal_devices: +dedicated_devices: - '/dev/sdb' os_tuning_params: - { name: kernel.pid_max, value: 4194303 } diff --git a/tests/functional/centos/7/bs-crypt-jrn-col/group_vars/all b/tests/functional/centos/7/bs-crypt-jrn-col/group_vars/all index 437108972..53e2c2194 100644 --- a/tests/functional/centos/7/bs-crypt-jrn-col/group_vars/all +++ b/tests/functional/centos/7/bs-crypt-jrn-col/group_vars/all @@ -5,7 +5,8 @@ public_network: "192.168.13.0/24" cluster_network: "192.168.14.0/24" journal_size: 100 monitor_interface: eth1 -dmcrypt_journal_collocation: true +osd_scenario: collocated +dmcrypt: true osd_objectstore: "bluestore" devices: - '/dev/sda' diff --git a/tests/functional/centos/7/bs-dock-crypt-jrn-col/group_vars/all b/tests/functional/centos/7/bs-dock-crypt-jrn-col/group_vars/all index 6a2a585e1..ffb7de1e2 100644 --- a/tests/functional/centos/7/bs-dock-crypt-jrn-col/group_vars/all +++ b/tests/functional/centos/7/bs-dock-crypt-jrn-col/group_vars/all @@ -7,7 +7,8 @@ public_network: "192.168.23.0/24" cluster_network: "192.168.24.0/24" journal_size: 100 monitor_interface: eth1 -dmcrypt_journal_collocation: true +osd_scenario: collocated +dmcrypt: true osd_objectstore: "bluestore" devices: - '/dev/sda' diff --git a/tests/functional/centos/7/bs-dock-ded-jrn/group_vars/all b/tests/functional/centos/7/bs-dock-ded-jrn/group_vars/all index 09176dc58..3e1ebdc3e 100644 --- a/tests/functional/centos/7/bs-dock-ded-jrn/group_vars/all +++ b/tests/functional/centos/7/bs-dock-ded-jrn/group_vars/all @@ -16,8 +16,8 @@ ceph_rgw_civetweb_port: 8080 ceph_osd_docker_devices: "{{ devices }}" devices: - /dev/sda -raw_journal_devices: +dedicated_devices: - /dev/sdb -ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_BLUESTORE=1 +ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE=1 ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1 ceph_osd_docker_run_script_path: /var/tmp diff --git a/tests/functional/centos/7/bs-docker/group_vars/all b/tests/functional/centos/7/bs-docker/group_vars/all index bc10cd192..b6f6dfb76 100644 --- a/tests/functional/centos/7/bs-docker/group_vars/all +++ b/tests/functional/centos/7/bs-docker/group_vars/all @@ -12,7 +12,7 @@ journal_size: 100 ceph_docker_on_openstack: False public_network: "192.168.15.0/24" cluster_network: "192.168.16.0/24" -journal_collocation: true +osd_scenario: collocated ceph_rgw_civetweb_port: 8080 ceph_osd_docker_devices: "{{ devices }}" ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_FORCE_ZAP=1 -e OSD_BLUESTORE=1 diff --git a/tests/functional/centos/7/bs-jrn-col/group_vars/all b/tests/functional/centos/7/bs-jrn-col/group_vars/all index a977c9dbf..d1d299489 100644 --- a/tests/functional/centos/7/bs-jrn-col/group_vars/all +++ b/tests/functional/centos/7/bs-jrn-col/group_vars/all @@ -9,7 +9,7 @@ journal_size: 100 devices: - '/dev/sda' - '/dev/sdb' -journal_collocation: True +osd_scenario: collocated osd_objectstore: "bluestore" os_tuning_params: - { name: kernel.pid_max, value: 4194303 } diff --git a/tests/functional/centos/7/cluster/group_vars/all b/tests/functional/centos/7/cluster/group_vars/all index d3b914530..4ffcbb634 100644 --- a/tests/functional/centos/7/cluster/group_vars/all +++ b/tests/functional/centos/7/cluster/group_vars/all @@ -8,9 +8,9 @@ journal_size: 100 osd_objectstore: "filestore" devices: - '/dev/sda' -raw_journal_devices: +dedicated_devices: - '/dev/sdb' -raw_multi_journal: True +osd_scenario: non-collocated os_tuning_params: - { name: kernel.pid_max, value: 4194303 } - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/centos/7/crypt-ded-jrn/group_vars/all b/tests/functional/centos/7/crypt-ded-jrn/group_vars/all index 6313dcac6..18bd190fd 100644 --- a/tests/functional/centos/7/crypt-ded-jrn/group_vars/all +++ b/tests/functional/centos/7/crypt-ded-jrn/group_vars/all @@ -5,11 +5,11 @@ public_network: "192.168.11.0/24" cluster_network: "192.168.12.0/24" journal_size: 100 monitor_interface: eth1 -dmcrypt_dedicated_journal: true +osd_scenario: non-collocated osd_objectstore: "filestore" devices: - '/dev/sda' -raw_journal_devices: +dedicated_devices: - '/dev/sdb' os_tuning_params: - { name: kernel.pid_max, value: 4194303 } diff --git a/tests/functional/centos/7/crypt-jrn-col/group_vars/all b/tests/functional/centos/7/crypt-jrn-col/group_vars/all index a4d740c39..b9b8ee38e 100644 --- a/tests/functional/centos/7/crypt-jrn-col/group_vars/all +++ b/tests/functional/centos/7/crypt-jrn-col/group_vars/all @@ -5,7 +5,7 @@ public_network: "192.168.13.0/24" cluster_network: "192.168.14.0/24" journal_size: 100 monitor_interface: eth1 -dmcrypt_journal_collocation: true +osd_scenario: collocated osd_objectstore: "filestore" devices: - '/dev/sda' diff --git a/tests/functional/centos/7/docker-crypt-jrn-col/group_vars/all b/tests/functional/centos/7/docker-crypt-jrn-col/group_vars/all index 2af26d451..962da29a7 100644 --- a/tests/functional/centos/7/docker-crypt-jrn-col/group_vars/all +++ b/tests/functional/centos/7/docker-crypt-jrn-col/group_vars/all @@ -12,7 +12,8 @@ journal_size: 100 ceph_docker_on_openstack: False public_network: "192.168.15.0/24" cluster_network: "192.168.16.0/24" -dmcrypt_journal_collocation: true +osd_scenario: collocated +dmcrypt: true ceph_rgw_civetweb_port: 8080 ceph_osd_docker_devices: "{{ devices }}" devices: diff --git a/tests/functional/centos/7/docker-ded-jrn/group_vars/all b/tests/functional/centos/7/docker-ded-jrn/group_vars/all index 8bb92b26b..0d865fefb 100644 --- a/tests/functional/centos/7/docker-ded-jrn/group_vars/all +++ b/tests/functional/centos/7/docker-ded-jrn/group_vars/all @@ -14,9 +14,10 @@ public_network: "192.168.15.0/24" cluster_network: "192.168.16.0/24" ceph_rgw_civetweb_port: 8080 ceph_osd_docker_devices: "{{ devices }}" +osd_scenario: non-collocated devices: - /dev/sda -raw_journal_devices: +dedicated_devices: - /dev/sdb -ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} +ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} ceph_osd_docker_run_script_path: /var/tmp diff --git a/tests/functional/centos/7/docker/group_vars/all b/tests/functional/centos/7/docker/group_vars/all index 663b7dc24..6201198d9 100644 --- a/tests/functional/centos/7/docker/group_vars/all +++ b/tests/functional/centos/7/docker/group_vars/all @@ -12,7 +12,7 @@ journal_size: 100 ceph_docker_on_openstack: False public_network: "192.168.15.0/24" cluster_network: "192.168.16.0/24" -journal_collocation: true +osd_scenario: collocated ceph_rgw_civetweb_port: 8080 ceph_osd_docker_devices: "{{ devices }}" ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 diff --git a/tests/functional/centos/7/jrn-col/group_vars/all b/tests/functional/centos/7/jrn-col/group_vars/all index 21e121489..7219ea2bf 100644 --- a/tests/functional/centos/7/jrn-col/group_vars/all +++ b/tests/functional/centos/7/jrn-col/group_vars/all @@ -10,7 +10,7 @@ osd_objectstore: "filestore" devices: - '/dev/sda' - '/dev/sdb' -journal_collocation: True +osd_scenario: collocated os_tuning_params: - { name: kernel.pid_max, value: 4194303 } - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/ubuntu/16.04/cluster/group_vars/all b/tests/functional/ubuntu/16.04/cluster/group_vars/all index a90958343..8122d46c1 100644 --- a/tests/functional/ubuntu/16.04/cluster/group_vars/all +++ b/tests/functional/ubuntu/16.04/cluster/group_vars/all @@ -8,9 +8,9 @@ monitor_interface: eth1 journal_size: 100 devices: - '/dev/sdb' -raw_journal_devices: +dedicated_devices: - '/dev/sdc' -raw_multi_journal: True +osd_scenario: non-collocated os_tuning_params: - { name: kernel.pid_max, value: 4194303 } - { name: fs.file-max, value: 26234859 } diff --git a/tox.ini b/tox.ini index ece92ddcf..15758c546 100644 --- a/tox.ini +++ b/tox.ini @@ -88,15 +88,15 @@ deps= ansible2.2: ansible==2.2.3 -r{toxinidir}/tests/requirements.txt changedir= - # tests a 1 mon, 1 osd, 1 mds and 1 rgw xenial cluster using raw_multi_journal OSD scenario + # tests a 1 mon, 1 osd, 1 mds and 1 rgw xenial cluster using non-collocated OSD scenario xenial_cluster: {toxinidir}/tests/functional/ubuntu/16.04/cluster - # tests a 1 mon 1 osd centos7 cluster using journal_collocation OSD scenario + # tests a 1 mon 1 osd centos7 cluster using collocated OSD scenario journal_collocation: {toxinidir}/tests/functional/centos/7/jrn-col - # tests a 1 mon 1 osd centos7 cluster using dmcrypt_dedicated_journal OSD scenario + # tests a 1 mon 1 osd centos7 cluster using dmcrypt non-collocated OSD scenario dmcrypt_journal: {toxinidir}/tests/functional/centos/7/crypt-ded-jrn - # tests a 1 mon 1 osd centos7 cluster using dmcrypt_journal_collocation OSD scenario + # tests a 1 mon 1 osd centos7 cluster using dmcrypt collocated OSD scenario dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/crypt-jrn-col - # tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using raw_multi_journal OSD scenario + # tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using non-collocated OSD scenario centos7_cluster: {toxinidir}/tests/functional/centos/7/cluster # an alias for centos7_cluster, this makes the name better suited for rhcs testing cluster: {toxinidir}/tests/functional/centos/7/cluster