From 2fa8099fa7127c0df9a987ed9f6b73e1b62f1b56 Mon Sep 17 00:00:00 2001 From: Dimitri Savineau Date: Wed, 24 Apr 2019 12:21:38 -0400 Subject: [PATCH] osd: set default bluestore_wal_devices empty We only need to set the wal dedicated device when there's three tiers of storage used. Currently the block.wal partition will also be created on the same device than block.db. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1685253 Signed-off-by: Dimitri Savineau --- docs/source/osds/scenarios.rst | 3 +- group_vars/osds.yml.sample | 5 ++-- roles/ceph-osd/defaults/main.yml | 5 ++-- .../tasks/scenarios/non-collocated.yml | 29 +++++++++++++++++++ 4 files changed, 34 insertions(+), 8 deletions(-) diff --git a/docs/source/osds/scenarios.rst b/docs/source/osds/scenarios.rst index 87d17fae4..19cf4cb14 100644 --- a/docs/source/osds/scenarios.rst +++ b/docs/source/osds/scenarios.rst @@ -372,12 +372,11 @@ Example of what you will get: /dev/sda2: PARTLABEL="ceph block" PARTUUID="ea454807-983a-4cf2-899e-b2680643bc1c" /dev/sdb: PTTYPE="gpt" /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="af5b2d74-4c08-42cf-be57-7248c739e217" - /dev/sdb2: PARTLABEL="ceph block.wal" PARTUUID="af3f8327-9aa9-4c2b-a497-cf0fe96d126a" There is more device granularity for Bluestore ONLY if ``osd_objectstore: bluestore`` is enabled by setting the ``bluestore_wal_devices`` config option. -By default, if ``bluestore_wal_devices`` is empty, it will get the content of ``dedicated_devices``. +By default, if ``bluestore_wal_devices`` is empty. If set, then you will have a dedicated partition on a specific device for block.wal. Example of what you will get: diff --git a/group_vars/osds.yml.sample b/group_vars/osds.yml.sample index 937046793..15410d380 100644 --- a/group_vars/osds.yml.sample +++ b/group_vars/osds.yml.sample @@ -141,7 +141,6 @@ dummy: # /dev/sda2: PARTLABEL="ceph block" PARTUUID="ea454807-983a-4cf2-899e-b2680643bc1c" # /dev/sdb: PTTYPE="gpt" # /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="af5b2d74-4c08-42cf-be57-7248c739e217" -# /dev/sdb2: PARTLABEL="ceph block.wal" PARTUUID="af3f8327-9aa9-4c2b-a497-cf0fe96d126a" # # Note: This scenario uses the ceph-disk tool to provision OSDs #dedicated_devices: [] @@ -151,7 +150,7 @@ dummy: # # ONLY if osd_objectstore: bluestore is enabled. # -# By default, if 'bluestore_wal_devices' is empty, it will get the content of 'dedicated_devices'. +# By default, if 'bluestore_wal_devices' is empty. # If set, then you will have a dedicated partition on a specific device for block.wal. # # Set bluestore_wal_devices: [] to use the same partition for RocksDB and WAL. @@ -166,7 +165,7 @@ dummy: # /dev/sdc: PTTYPE="gpt" # /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3" # Note: This option uses the ceph-disk tool -#bluestore_wal_devices: "{{ dedicated_devices }}" +#bluestore_wal_devices: [] # III. Use ceph-volume to create OSDs from logical volumes. # Use 'osd_scenario: lvm' to enable this scenario. diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index e1bf915e3..7267b09b2 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -133,7 +133,6 @@ valid_osd_scenarios: # /dev/sda2: PARTLABEL="ceph block" PARTUUID="ea454807-983a-4cf2-899e-b2680643bc1c" # /dev/sdb: PTTYPE="gpt" # /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="af5b2d74-4c08-42cf-be57-7248c739e217" -# /dev/sdb2: PARTLABEL="ceph block.wal" PARTUUID="af3f8327-9aa9-4c2b-a497-cf0fe96d126a" # # Note: This scenario uses the ceph-disk tool to provision OSDs dedicated_devices: [] @@ -143,7 +142,7 @@ dedicated_devices: [] # # ONLY if osd_objectstore: bluestore is enabled. # -# By default, if 'bluestore_wal_devices' is empty, it will get the content of 'dedicated_devices'. +# By default, if 'bluestore_wal_devices' is empty. # If set, then you will have a dedicated partition on a specific device for block.wal. # # Set bluestore_wal_devices: [] to use the same partition for RocksDB and WAL. @@ -158,7 +157,7 @@ dedicated_devices: [] # /dev/sdc: PTTYPE="gpt" # /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3" # Note: This option uses the ceph-disk tool -bluestore_wal_devices: "{{ dedicated_devices }}" +bluestore_wal_devices: [] # III. Use ceph-volume to create OSDs from logical volumes. # Use 'osd_scenario: lvm' to enable this scenario. diff --git a/roles/ceph-osd/tasks/scenarios/non-collocated.yml b/roles/ceph-osd/tasks/scenarios/non-collocated.yml index 26df149a1..49e7bf8d6 100644 --- a/roles/ceph-osd/tasks/scenarios/non-collocated.yml +++ b/roles/ceph-osd/tasks/scenarios/non-collocated.yml @@ -29,6 +29,34 @@ - osd_objectstore == 'filestore' - item.0.partitions|length == 0 +- name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated with a dedicated device for db + shell: | + docker run --net=host \ + --pid=host \ + --privileged=true \ + --name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.1 | regex_replace('/dev/', '') }} \ + -v /etc/ceph:/etc/ceph:z \ + -v /var/lib/ceph/:/var/lib/ceph/:z \ + -v /dev:/dev \ + -v /etc/localtime:/etc/localtime:ro \ + -e DEBUG=verbose \ + -e CLUSTER={{ cluster }} \ + -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \ + -e OSD_DEVICE={{ item.1 }} \ + -e OSD_BLUESTORE_BLOCK_DB={{ item.2 }} \ + {{ docker_env_args }} \ + {{ ceph_osd_docker_prepare_env }} \ + {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} + with_together: + - "{{ parted_results.results | default([]) }}" + - "{{ devices }}" + - "{{ dedicated_devices }}" + when: + - containerized_deployment + - osd_objectstore == 'bluestore' + - item.0.partitions|length == 0 + - bluestore_wal_devices|length == 0 + - name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated with a dedicated device for db and wal shell: | docker run --net=host \ @@ -57,6 +85,7 @@ - containerized_deployment - osd_objectstore == 'bluestore' - item.0.partitions|length == 0 + - bluestore_wal_devices|length > 0 - name: prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) non-collocated command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.1 }} {{ item.2 }}"