mirror of https://github.com/ceph/ceph-ansible.git
add support for rocksdb and wal on the same partition in non-collocated
Signed-off-by: Kai Wembacher <kai@ktwe.de>
(cherry picked from commit a273ed7f60
)
pull/3457/merge
parent
3ed5de5cd1
commit
e2852eb40e
|
@ -128,8 +128,9 @@ dummy:
|
|||
# - The devices listed in 'devices' will get 2 partitions, one for 'block' and one for 'data'.
|
||||
# 'data' is only 100MB big and do not store any of your data, it's just a bunch of Ceph metadata.
|
||||
# 'block' will store all your actual data.
|
||||
# - The devices in 'dedicated_devices' will get 1 partition for RocksDB DB, called 'block.db'
|
||||
# and one for RocksDB WAL, called 'block.wal'
|
||||
# - The devices in 'dedicated_devices' will get one partition for RocksDB DB, called 'block.db'
|
||||
# and one for RocksDB WAL, called 'block.wal'. To use a single partition for RocksDB and WAL together
|
||||
# set bluestore_wal_devices to [].
|
||||
#
|
||||
# By default dedicated_devices will represent block.db
|
||||
#
|
||||
|
@ -153,6 +154,8 @@ dummy:
|
|||
# By default, if 'bluestore_wal_devices' is empty, it will get the content of 'dedicated_devices'.
|
||||
# If set, then you will have a dedicated partition on a specific device for block.wal.
|
||||
#
|
||||
# Set bluestore_wal_devices: [] to use the same partition for RocksDB and WAL.
|
||||
#
|
||||
# Example of what you will get:
|
||||
# [root@ceph-osd0 ~]# blkid /dev/sd*
|
||||
# /dev/sda: PTTYPE="gpt"
|
||||
|
|
|
@ -120,8 +120,9 @@ valid_osd_scenarios:
|
|||
# - The devices listed in 'devices' will get 2 partitions, one for 'block' and one for 'data'.
|
||||
# 'data' is only 100MB big and do not store any of your data, it's just a bunch of Ceph metadata.
|
||||
# 'block' will store all your actual data.
|
||||
# - The devices in 'dedicated_devices' will get 1 partition for RocksDB DB, called 'block.db'
|
||||
# and one for RocksDB WAL, called 'block.wal'
|
||||
# - The devices in 'dedicated_devices' will get one partition for RocksDB DB, called 'block.db'
|
||||
# and one for RocksDB WAL, called 'block.wal'. To use a single partition for RocksDB and WAL together
|
||||
# set bluestore_wal_devices to [].
|
||||
#
|
||||
# By default dedicated_devices will represent block.db
|
||||
#
|
||||
|
@ -145,6 +146,8 @@ dedicated_devices: []
|
|||
# By default, if 'bluestore_wal_devices' is empty, it will get the content of 'dedicated_devices'.
|
||||
# If set, then you will have a dedicated partition on a specific device for block.wal.
|
||||
#
|
||||
# Set bluestore_wal_devices: [] to use the same partition for RocksDB and WAL.
|
||||
#
|
||||
# Example of what you will get:
|
||||
# [root@ceph-osd0 ~]# blkid /dev/sd*
|
||||
# /dev/sda: PTTYPE="gpt"
|
||||
|
|
|
@ -70,6 +70,18 @@
|
|||
- not containerized_deployment
|
||||
- item.0.partitions|length == 0
|
||||
|
||||
- name: manually prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) with a dedicated device for db
|
||||
command: "ceph-disk prepare {{ ceph_disk_cli_options }} --block.db {{ item.1 }} {{ item.2 }}"
|
||||
with_together:
|
||||
- "{{ parted_results.results | default([]) }}"
|
||||
- "{{ dedicated_devices }}"
|
||||
- "{{ devices | unique }}"
|
||||
when:
|
||||
- osd_objectstore == 'bluestore'
|
||||
- not containerized_deployment
|
||||
- item.0.partitions|length == 0
|
||||
- bluestore_wal_devices|length == 0
|
||||
|
||||
- name: manually prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) with a dedicated device for db and wal
|
||||
command: "ceph-disk prepare {{ ceph_disk_cli_options }} --block.db {{ item.1 }} --block.wal {{ item.2 }} {{ item.3 }}"
|
||||
with_together:
|
||||
|
@ -80,4 +92,5 @@
|
|||
when:
|
||||
- osd_objectstore == 'bluestore'
|
||||
- not containerized_deployment
|
||||
- item.0.partitions|length == 0
|
||||
- item.0.partitions|length == 0
|
||||
- bluestore_wal_devices|length > 0
|
Loading…
Reference in New Issue