ceph_volume: fix multiple db/wal/journal devices

When using the lvm batch ceph-volume subcommand with dedicated devices
for filestore (journal) or bluestore (db/wal) then the list of devices
is convert to a string instead of being extended via an iterable.
This was working with only one dedicated device but starting with more
then the ceph_volume module fails.

TASK [ceph-osd : use ceph-volume lvm batch to create bluestore osds] **
fatal: [xxxxxx]: FAILED! => changed=true
  cmd:
  - ceph-volume
  - --cluster
  - ceph
  - lvm
  - batch
  - --bluestore
  - --yes
  - --prepare
  - --osds-per-device
  - '4'
  - /dev/nvme2n1
  - /dev/nvme3n1
  - /dev/nvme4n1
  - /dev/nvme5n1
  - /dev/nvme6n1
  - --db-devices
  - /dev/nvme0n1 /dev/nvme1n1
  - --report
  - --format=json
  msg: non-zero return code
  rc: 2
  stderr: |2-
     stderr: lsblk: /dev/nvme0n1 /dev/nvme1n1: not a block device
     stderr: error: /dev/nvme0n1 /dev/nvme1n1: No such file or directory
     stderr: Unknown device, --name=, --path=, or absolute path in /dev/ or /sys expected.
    usage: ceph-volume lvm batch [-h] [--db-devices [DB_DEVICES [DB_DEVICES ...]]]
                                 [--wal-devices [WAL_DEVICES [WAL_DEVICES ...]]]
                                 [--journal-devices [JOURNAL_DEVICES [JOURNAL_DEVICES ...]]]
                                 [--no-auto] [--bluestore] [--filestore]
                                 [--report] [--yes] [--format {json,pretty}]
                                 [--dmcrypt]
                                 [--crush-device-class CRUSH_DEVICE_CLASS]
                                 [--no-systemd]
                                 [--osds-per-device OSDS_PER_DEVICE]
                                 [--block-db-size BLOCK_DB_SIZE]
                                 [--block-wal-size BLOCK_WAL_SIZE]
                                 [--journal-size JOURNAL_SIZE] [--prepare]
                                 [--osd-ids [OSD_IDS [OSD_IDS ...]]]
                                 [DEVICES [DEVICES ...]]
    ceph-volume lvm batch: error: Unable to proceed with non-existing device: /dev/nvme0n1 /dev/nvme1n1

So the dedicated device list is considered as a single string.

This commit also adds the journal_devices, block_db_devices and
wal_devices documentation to the ceph_volume module.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1816713

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
pull/5031/head
Dimitri Savineau 2020-03-27 17:16:41 -04:00 committed by Guillaume Abrioux
parent 4ac99223b2
commit 760b6cd7b0
2 changed files with 100 additions and 5 deletions

View File

@ -114,6 +114,24 @@ options:
- Only applicable if action is 'batch'. - Only applicable if action is 'batch'.
required: false required: false
default: -1 default: -1
journal_devices:
description:
- A list of devices for filestore journal to pass to the 'ceph-volume lvm batch' subcommand.
- Only applicable if action is 'batch'.
- Only applicable if objectstore is 'filestore'.
required: false
block_db_devices:
description:
- A list of devices for bluestore block db to pass to the 'ceph-volume lvm batch' subcommand.
- Only applicable if action is 'batch'.
- Only applicable if objectstore is 'bluestore'.
required: false
wal_devices:
description:
- A list of devices for bluestore block wal to pass to the 'ceph-volume lvm batch' subcommand.
- Only applicable if action is 'batch'.
- Only applicable if objectstore is 'bluestore'.
required: false
report: report:
description: description:
- If provided the --report flag will be passed to 'ceph-volume lvm batch'. - If provided the --report flag will be passed to 'ceph-volume lvm batch'.
@ -321,13 +339,16 @@ def batch(module, container_image):
cmd.extend(batch_devices) cmd.extend(batch_devices)
if journal_devices and objectstore == 'filestore': if journal_devices and objectstore == 'filestore':
cmd.extend(['--journal-devices', ' '.join(journal_devices)]) cmd.append('--journal-devices')
cmd.extend(journal_devices)
if block_db_devices and objectstore == 'bluestore': if block_db_devices and objectstore == 'bluestore':
cmd.extend(['--db-devices', ' '.join(block_db_devices)]) cmd.append('--db-devices')
cmd.extend(block_db_devices)
if wal_devices and objectstore == 'bluestore': if wal_devices and objectstore == 'bluestore':
cmd.extend(['--wal-devices', ' '.join(wal_devices)]) cmd.append('--wal-devices')
cmd.extend(wal_devices)
return cmd return cmd

View File

@ -351,7 +351,7 @@ class TestCephVolumeModule(object):
'journal_size': '100', 'journal_size': '100',
'cluster': 'ceph', 'cluster': 'ceph',
'batch_devices': ["/dev/sda", "/dev/sdb"], 'batch_devices': ["/dev/sda", "/dev/sdb"],
'journal_devices': ["/dev/sdc"]} 'journal_devices': ["/dev/sdc", "/dev/sdd"]}
fake_container_image = None fake_container_image = None
expected_command_list = ['ceph-volume', expected_command_list = ['ceph-volume',
@ -366,7 +366,81 @@ class TestCephVolumeModule(object):
'/dev/sda', '/dev/sda',
'/dev/sdb', '/dev/sdb',
'--journal-devices', '--journal-devices',
'/dev/sdc'] '/dev/sdc',
'/dev/sdd']
result = ceph_volume.batch(
fake_module, fake_container_image)
assert result == expected_command_list
def test_batch_bluestore_with_dedicated_db(self):
fake_module = MagicMock()
fake_module.params = {'objectstore': 'bluestore',
'block_db_size': '-1',
'cluster': 'ceph',
'batch_devices': ["/dev/sda", "/dev/sdb"],
'block_db_devices': ["/dev/sdc", "/dev/sdd"]}
fake_container_image = None
expected_command_list = ['ceph-volume',
'--cluster',
'ceph',
'lvm',
'batch',
'--bluestore',
'--yes',
'/dev/sda',
'/dev/sdb',
'--db-devices',
'/dev/sdc',
'/dev/sdd']
result = ceph_volume.batch(
fake_module, fake_container_image)
assert result == expected_command_list
def test_batch_bluestore_with_dedicated_wal(self):
fake_module = MagicMock()
fake_module.params = {'objectstore': 'bluestore',
'cluster': 'ceph',
'block_db_size': '-1',
'batch_devices': ["/dev/sda", "/dev/sdb"],
'wal_devices': ["/dev/sdc", "/dev/sdd"]}
fake_container_image = None
expected_command_list = ['ceph-volume',
'--cluster',
'ceph',
'lvm',
'batch',
'--bluestore',
'--yes',
'/dev/sda',
'/dev/sdb',
'--wal-devices',
'/dev/sdc',
'/dev/sdd']
result = ceph_volume.batch(
fake_module, fake_container_image)
assert result == expected_command_list
def test_batch_bluestore_with_custom_db_size(self):
fake_module = MagicMock()
fake_module.params = {'objectstore': 'bluestore',
'cluster': 'ceph',
'block_db_size': '4096',
'batch_devices': ["/dev/sda", "/dev/sdb"]}
fake_container_image = None
expected_command_list = ['ceph-volume',
'--cluster',
'ceph',
'lvm',
'batch',
'--bluestore',
'--yes',
'--block-db-size',
'4096',
'/dev/sda',
'/dev/sdb']
result = ceph_volume.batch( result = ceph_volume.batch(
fake_module, fake_container_image) fake_module, fake_container_image)
assert result == expected_command_list assert result == expected_command_list