2017-12-01 21:25:13 +08:00
|
|
|
#!/usr/bin/python
|
2020-09-06 10:17:02 +08:00
|
|
|
|
2020-10-28 00:14:19 +08:00
|
|
|
from ansible.module_utils.basic import AnsibleModule
|
2020-11-05 18:05:34 +08:00
|
|
|
try:
|
2021-06-18 00:18:07 +08:00
|
|
|
from ansible.module_utils.ca_common import exec_command, \
|
|
|
|
is_containerized, \
|
|
|
|
fatal
|
2020-11-05 18:05:34 +08:00
|
|
|
except ImportError:
|
2021-06-18 00:18:07 +08:00
|
|
|
from module_utils.ca_common import exec_command, \
|
|
|
|
is_containerized, \
|
|
|
|
fatal
|
2017-12-01 21:25:13 +08:00
|
|
|
import datetime
|
2018-10-02 01:51:47 +08:00
|
|
|
import copy
|
2018-07-17 00:09:33 +08:00
|
|
|
import json
|
|
|
|
import os
|
2018-09-21 02:32:00 +08:00
|
|
|
|
2017-12-01 21:25:13 +08:00
|
|
|
ANSIBLE_METADATA = {
|
|
|
|
'metadata_version': '1.0',
|
|
|
|
'status': ['preview'],
|
|
|
|
'supported_by': 'community'
|
|
|
|
}
|
|
|
|
|
|
|
|
DOCUMENTATION = '''
|
|
|
|
---
|
|
|
|
module: ceph_volume
|
|
|
|
|
|
|
|
short_description: Create ceph OSDs with ceph-volume
|
|
|
|
|
|
|
|
description:
|
|
|
|
- Using the ceph-volume utility available in Ceph this module
|
|
|
|
can be used to create ceph OSDs that are backed by logical volumes.
|
|
|
|
- Only available in ceph versions luminous or greater.
|
|
|
|
|
|
|
|
options:
|
2017-12-01 22:40:33 +08:00
|
|
|
cluster:
|
|
|
|
description:
|
|
|
|
- The ceph cluster name.
|
|
|
|
required: false
|
|
|
|
default: ceph
|
2017-12-01 21:25:13 +08:00
|
|
|
objectstore:
|
|
|
|
description:
|
|
|
|
- The objectstore of the OSD, either filestore or bluestore
|
2018-04-04 00:55:36 +08:00
|
|
|
- Required if action is 'create'
|
2018-03-15 00:47:07 +08:00
|
|
|
required: false
|
2017-12-01 21:25:13 +08:00
|
|
|
choices: ['bluestore', 'filestore']
|
2018-04-05 22:12:32 +08:00
|
|
|
default: bluestore
|
2018-04-04 00:55:36 +08:00
|
|
|
action:
|
2018-03-14 23:14:21 +08:00
|
|
|
description:
|
2018-12-06 07:14:08 +08:00
|
|
|
- The action to take. Creating OSDs and zapping or querying devices.
|
2018-03-14 23:14:21 +08:00
|
|
|
required: true
|
2018-12-06 07:14:08 +08:00
|
|
|
choices: ['create', 'zap', 'batch', 'prepare', 'activate', 'list', 'inventory']
|
2018-04-04 00:55:36 +08:00
|
|
|
default: create
|
2017-12-01 21:25:13 +08:00
|
|
|
data:
|
|
|
|
description:
|
|
|
|
- The logical volume name or device to use for the OSD data.
|
|
|
|
required: true
|
|
|
|
data_vg:
|
|
|
|
description:
|
|
|
|
- If data is a lv, this must be the name of the volume group it belongs to.
|
|
|
|
required: false
|
2019-01-17 07:50:08 +08:00
|
|
|
osd_fsid:
|
|
|
|
description:
|
|
|
|
- The OSD FSID
|
|
|
|
required: false
|
2021-07-09 17:07:08 +08:00
|
|
|
osd_id:
|
|
|
|
description:
|
|
|
|
- The OSD ID
|
|
|
|
required: false
|
2017-12-01 21:25:13 +08:00
|
|
|
journal:
|
|
|
|
description:
|
|
|
|
- The logical volume name or partition to use as a filestore journal.
|
|
|
|
- Only applicable if objectstore is 'filestore'.
|
|
|
|
required: false
|
|
|
|
journal_vg:
|
|
|
|
description:
|
|
|
|
- If journal is a lv, this must be the name of the volume group it belongs to.
|
|
|
|
- Only applicable if objectstore is 'filestore'.
|
|
|
|
required: false
|
|
|
|
db:
|
|
|
|
description:
|
|
|
|
- A partition or logical volume name to use for block.db.
|
|
|
|
- Only applicable if objectstore is 'bluestore'.
|
|
|
|
required: false
|
|
|
|
db_vg:
|
|
|
|
description:
|
2021-06-18 00:18:07 +08:00
|
|
|
- If db is a lv, this must be the name of the volume group it belongs to. # noqa: E501
|
2017-12-01 21:25:13 +08:00
|
|
|
- Only applicable if objectstore is 'bluestore'.
|
|
|
|
required: false
|
|
|
|
wal:
|
|
|
|
description:
|
|
|
|
- A partition or logical volume name to use for block.wal.
|
|
|
|
- Only applicable if objectstore is 'bluestore'.
|
|
|
|
required: false
|
|
|
|
wal_vg:
|
|
|
|
description:
|
2021-06-18 00:18:07 +08:00
|
|
|
- If wal is a lv, this must be the name of the volume group it belongs to. # noqa: E501
|
2017-12-01 21:25:13 +08:00
|
|
|
- Only applicable if objectstore is 'bluestore'.
|
|
|
|
required: false
|
2018-01-12 00:56:39 +08:00
|
|
|
crush_device_class:
|
|
|
|
description:
|
|
|
|
- Will set the crush device class for the OSD.
|
|
|
|
required: false
|
2018-01-19 23:43:48 +08:00
|
|
|
dmcrypt:
|
|
|
|
description:
|
|
|
|
- If set to True the OSD will be encrypted with dmcrypt.
|
|
|
|
required: false
|
2018-08-04 00:15:58 +08:00
|
|
|
batch_devices:
|
|
|
|
description:
|
|
|
|
- A list of devices to pass to the 'ceph-volume lvm batch' subcommand.
|
|
|
|
- Only applicable if action is 'batch'.
|
|
|
|
required: false
|
2018-09-07 03:00:56 +08:00
|
|
|
osds_per_device:
|
|
|
|
description:
|
|
|
|
- The number of OSDs to create per device.
|
|
|
|
- Only applicable if action is 'batch'.
|
|
|
|
required: false
|
|
|
|
default: 1
|
2018-09-21 01:18:53 +08:00
|
|
|
journal_size:
|
|
|
|
description:
|
|
|
|
- The size in MB of filestore journals.
|
|
|
|
- Only applicable if action is 'batch'.
|
|
|
|
required: false
|
|
|
|
default: 5120
|
|
|
|
block_db_size:
|
|
|
|
description:
|
|
|
|
- The size in bytes of bluestore block db lvs.
|
|
|
|
- The default of -1 means to create them as big as possible.
|
|
|
|
- Only applicable if action is 'batch'.
|
|
|
|
required: false
|
|
|
|
default: -1
|
2020-03-28 05:16:41 +08:00
|
|
|
journal_devices:
|
|
|
|
description:
|
|
|
|
- A list of devices for filestore journal to pass to the 'ceph-volume lvm batch' subcommand.
|
|
|
|
- Only applicable if action is 'batch'.
|
|
|
|
- Only applicable if objectstore is 'filestore'.
|
|
|
|
required: false
|
|
|
|
block_db_devices:
|
|
|
|
description:
|
|
|
|
- A list of devices for bluestore block db to pass to the 'ceph-volume lvm batch' subcommand.
|
|
|
|
- Only applicable if action is 'batch'.
|
|
|
|
- Only applicable if objectstore is 'bluestore'.
|
|
|
|
required: false
|
|
|
|
wal_devices:
|
|
|
|
description:
|
|
|
|
- A list of devices for bluestore block wal to pass to the 'ceph-volume lvm batch' subcommand.
|
|
|
|
- Only applicable if action is 'batch'.
|
|
|
|
- Only applicable if objectstore is 'bluestore'.
|
|
|
|
required: false
|
2018-09-21 02:17:29 +08:00
|
|
|
report:
|
|
|
|
description:
|
|
|
|
- If provided the --report flag will be passed to 'ceph-volume lvm batch'.
|
|
|
|
- No OSDs will be created.
|
|
|
|
- Results will be returned in json format.
|
|
|
|
- Only applicable if action is 'batch'.
|
|
|
|
required: false
|
2018-10-04 01:52:42 +08:00
|
|
|
list:
|
|
|
|
description:
|
|
|
|
- List potential Ceph LVM metadata on a device
|
|
|
|
required: false
|
2018-12-06 07:14:08 +08:00
|
|
|
inventory:
|
|
|
|
description:
|
|
|
|
- List storage device inventory.
|
|
|
|
required: false
|
2017-12-01 21:25:13 +08:00
|
|
|
|
|
|
|
author:
|
|
|
|
- Andrew Schoen (@andrewschoen)
|
2018-10-04 01:52:42 +08:00
|
|
|
- Sebastien Han <seb@redhat.com>
|
2017-12-01 21:25:13 +08:00
|
|
|
'''
|
|
|
|
|
|
|
|
EXAMPLES = '''
|
|
|
|
- name: set up a filestore osd with an lv data and a journal partition
|
|
|
|
ceph_volume:
|
|
|
|
objectstore: filestore
|
|
|
|
data: data-lv
|
|
|
|
data_vg: data-vg
|
|
|
|
journal: /dev/sdc1
|
2018-07-09 22:58:35 +08:00
|
|
|
action: create
|
2017-12-01 21:25:13 +08:00
|
|
|
|
|
|
|
- name: set up a bluestore osd with a raw device for data
|
|
|
|
ceph_volume:
|
|
|
|
objectstore: bluestore
|
|
|
|
data: /dev/sdc
|
2018-07-09 22:58:35 +08:00
|
|
|
action: create
|
2017-12-01 21:25:13 +08:00
|
|
|
|
2018-07-09 22:58:35 +08:00
|
|
|
|
2021-06-18 00:18:07 +08:00
|
|
|
- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa: E501
|
2017-12-01 21:25:13 +08:00
|
|
|
ceph_volume:
|
|
|
|
objectstore: bluestore
|
|
|
|
data: data-lv
|
|
|
|
data_vg: data-vg
|
|
|
|
db: /dev/sdc1
|
|
|
|
wal: /dev/sdc2
|
2018-07-09 22:58:35 +08:00
|
|
|
action: create
|
2017-12-01 21:25:13 +08:00
|
|
|
'''
|
|
|
|
|
|
|
|
|
2021-11-30 16:52:59 +08:00
|
|
|
def container_exec(binary, container_image, mounts=None):
|
2018-07-17 00:09:33 +08:00
|
|
|
'''
|
2018-10-04 01:52:42 +08:00
|
|
|
Build the docker CLI to run a command inside a container
|
2018-07-17 00:09:33 +08:00
|
|
|
'''
|
2021-11-30 16:52:59 +08:00
|
|
|
_mounts = {}
|
|
|
|
_mounts['/run/lock/lvm'] = '/run/lock/lvm:z'
|
|
|
|
_mounts['/var/run/udev'] = '/var/run/udev:z'
|
|
|
|
_mounts['/dev'] = '/dev'
|
|
|
|
_mounts['/etc/ceph'] = '/etc/ceph:z'
|
|
|
|
_mounts['/run/lvm'] = '/run/lvm'
|
|
|
|
_mounts['/var/lib/ceph'] = '/var/lib/ceph:z'
|
|
|
|
_mounts['/var/log/ceph'] = '/var/log/ceph:z'
|
|
|
|
if mounts is None:
|
|
|
|
mounts = _mounts
|
|
|
|
else:
|
|
|
|
_mounts.update(mounts)
|
|
|
|
|
|
|
|
volumes = sum(
|
|
|
|
[['-v', '{}:{}'.format(src_dir, dst_dir)]
|
|
|
|
for src_dir, dst_dir in _mounts.items()], [])
|
|
|
|
|
2018-11-08 17:02:37 +08:00
|
|
|
container_binary = os.getenv('CEPH_CONTAINER_BINARY')
|
|
|
|
command_exec = [container_binary, 'run',
|
2021-11-30 16:52:59 +08:00
|
|
|
'--rm',
|
|
|
|
'--privileged',
|
|
|
|
'--net=host',
|
|
|
|
'--ipc=host'] + volumes + \
|
|
|
|
['--entrypoint=' + binary, container_image]
|
2018-07-17 00:09:33 +08:00
|
|
|
return command_exec
|
|
|
|
|
|
|
|
|
2021-11-30 16:52:59 +08:00
|
|
|
def build_cmd(action, container_image,
|
|
|
|
cluster='ceph',
|
|
|
|
binary='ceph-volume', mounts=None):
|
2018-10-04 01:52:42 +08:00
|
|
|
'''
|
|
|
|
Build the ceph-volume command
|
|
|
|
'''
|
|
|
|
|
2020-06-19 21:09:04 +08:00
|
|
|
_binary = binary
|
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
if container_image:
|
|
|
|
cmd = container_exec(
|
2021-11-30 16:52:59 +08:00
|
|
|
binary, container_image, mounts=mounts)
|
2018-10-04 01:52:42 +08:00
|
|
|
else:
|
2020-06-19 21:09:04 +08:00
|
|
|
binary = [binary]
|
2018-10-04 01:52:42 +08:00
|
|
|
cmd = binary
|
|
|
|
|
2020-06-19 21:09:04 +08:00
|
|
|
if _binary == 'ceph-volume':
|
2018-10-04 01:52:42 +08:00
|
|
|
cmd.extend(['--cluster', cluster])
|
|
|
|
|
2019-07-12 22:19:48 +08:00
|
|
|
cmd.extend(action)
|
2018-10-04 01:52:42 +08:00
|
|
|
|
|
|
|
return cmd
|
|
|
|
|
|
|
|
|
2017-12-02 03:26:36 +08:00
|
|
|
def get_data(data, data_vg):
|
|
|
|
if data_vg:
|
2018-10-04 01:52:42 +08:00
|
|
|
data = '{0}/{1}'.format(data_vg, data)
|
2017-12-02 03:26:36 +08:00
|
|
|
return data
|
|
|
|
|
|
|
|
|
|
|
|
def get_journal(journal, journal_vg):
|
|
|
|
if journal_vg:
|
2018-10-04 01:52:42 +08:00
|
|
|
journal = '{0}/{1}'.format(journal_vg, journal)
|
2017-12-02 03:26:36 +08:00
|
|
|
return journal
|
|
|
|
|
|
|
|
|
|
|
|
def get_db(db, db_vg):
|
|
|
|
if db_vg:
|
2018-10-04 01:52:42 +08:00
|
|
|
db = '{0}/{1}'.format(db_vg, db)
|
2017-12-02 03:26:36 +08:00
|
|
|
return db
|
|
|
|
|
|
|
|
|
|
|
|
def get_wal(wal, wal_vg):
|
|
|
|
if wal_vg:
|
2018-10-04 01:52:42 +08:00
|
|
|
wal = '{0}/{1}'.format(wal_vg, wal)
|
2017-12-02 03:26:36 +08:00
|
|
|
return wal
|
|
|
|
|
|
|
|
|
2021-02-09 22:28:08 +08:00
|
|
|
def batch(module, container_image, report=None):
|
2018-10-04 01:52:42 +08:00
|
|
|
'''
|
|
|
|
Batch prepare OSD devices
|
|
|
|
'''
|
2018-09-26 04:05:08 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
# get module variables
|
2018-08-04 00:15:58 +08:00
|
|
|
cluster = module.params['cluster']
|
|
|
|
objectstore = module.params['objectstore']
|
2018-10-04 01:52:42 +08:00
|
|
|
batch_devices = module.params.get('batch_devices', None)
|
2018-08-04 00:15:58 +08:00
|
|
|
crush_device_class = module.params.get('crush_device_class', None)
|
2020-02-29 02:59:22 +08:00
|
|
|
journal_devices = module.params.get('journal_devices', None)
|
2018-10-04 01:52:42 +08:00
|
|
|
journal_size = module.params.get('journal_size', None)
|
|
|
|
block_db_size = module.params.get('block_db_size', None)
|
2019-08-20 21:57:45 +08:00
|
|
|
block_db_devices = module.params.get('block_db_devices', None)
|
2019-08-23 15:02:12 +08:00
|
|
|
wal_devices = module.params.get('wal_devices', None)
|
2018-10-04 01:52:42 +08:00
|
|
|
dmcrypt = module.params.get('dmcrypt', None)
|
2018-11-28 01:38:37 +08:00
|
|
|
osds_per_device = module.params.get('osds_per_device', 1)
|
2018-08-04 00:15:58 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
if not osds_per_device:
|
|
|
|
fatal('osds_per_device must be provided if action is "batch"', module)
|
2018-08-04 00:15:58 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
if osds_per_device < 1:
|
2021-06-18 00:18:07 +08:00
|
|
|
fatal('osds_per_device must be greater than 0 if action is "batch"', module) # noqa: E501
|
2018-10-04 01:52:42 +08:00
|
|
|
|
|
|
|
if not batch_devices:
|
|
|
|
fatal('batch_devices must be provided if action is "batch"', module)
|
2018-09-28 19:06:18 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
# Build the CLI
|
2018-12-06 07:14:08 +08:00
|
|
|
action = ['lvm', 'batch']
|
2020-06-19 21:09:04 +08:00
|
|
|
cmd = build_cmd(action, container_image, cluster)
|
2018-10-04 01:52:42 +08:00
|
|
|
cmd.extend(['--%s' % objectstore])
|
2021-02-09 22:28:08 +08:00
|
|
|
if not report:
|
|
|
|
cmd.append('--yes')
|
2018-08-04 00:15:58 +08:00
|
|
|
|
2018-10-26 22:30:32 +08:00
|
|
|
if container_image:
|
|
|
|
cmd.append('--prepare')
|
|
|
|
|
2018-08-04 00:15:58 +08:00
|
|
|
if crush_device_class:
|
2018-10-04 01:52:42 +08:00
|
|
|
cmd.extend(['--crush-device-class', crush_device_class])
|
2018-08-04 00:15:58 +08:00
|
|
|
|
|
|
|
if dmcrypt:
|
2018-10-04 01:52:42 +08:00
|
|
|
cmd.append('--dmcrypt')
|
2018-08-04 00:15:58 +08:00
|
|
|
|
2018-09-07 03:00:56 +08:00
|
|
|
if osds_per_device > 1:
|
2018-10-20 04:40:36 +08:00
|
|
|
cmd.extend(['--osds-per-device', str(osds_per_device)])
|
2018-09-21 01:18:53 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
if objectstore == 'filestore':
|
|
|
|
cmd.extend(['--journal-size', journal_size])
|
2018-09-21 01:18:53 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
if objectstore == 'bluestore' and block_db_size != '-1':
|
|
|
|
cmd.extend(['--block-db-size', block_db_size])
|
2018-09-21 02:17:29 +08:00
|
|
|
|
2018-08-04 00:15:58 +08:00
|
|
|
cmd.extend(batch_devices)
|
|
|
|
|
2020-02-29 02:59:22 +08:00
|
|
|
if journal_devices and objectstore == 'filestore':
|
2020-03-28 05:16:41 +08:00
|
|
|
cmd.append('--journal-devices')
|
|
|
|
cmd.extend(journal_devices)
|
2020-02-29 02:59:22 +08:00
|
|
|
|
2020-01-07 23:29:48 +08:00
|
|
|
if block_db_devices and objectstore == 'bluestore':
|
2020-03-28 05:16:41 +08:00
|
|
|
cmd.append('--db-devices')
|
|
|
|
cmd.extend(block_db_devices)
|
2019-08-20 21:57:45 +08:00
|
|
|
|
2020-01-07 23:29:48 +08:00
|
|
|
if wal_devices and objectstore == 'bluestore':
|
2020-03-28 05:16:41 +08:00
|
|
|
cmd.append('--wal-devices')
|
|
|
|
cmd.extend(wal_devices)
|
2019-08-23 15:02:12 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
return cmd
|
2018-08-04 00:15:58 +08:00
|
|
|
|
|
|
|
|
2018-07-17 00:09:33 +08:00
|
|
|
def ceph_volume_cmd(subcommand, container_image, cluster=None):
|
2018-10-04 01:52:42 +08:00
|
|
|
'''
|
|
|
|
Build ceph-volume initial command
|
|
|
|
'''
|
2018-07-17 00:09:33 +08:00
|
|
|
|
|
|
|
if container_image:
|
2018-10-04 01:52:42 +08:00
|
|
|
binary = 'ceph-volume'
|
2018-07-17 00:09:33 +08:00
|
|
|
cmd = container_exec(
|
|
|
|
binary, container_image)
|
|
|
|
else:
|
2018-10-04 01:52:42 +08:00
|
|
|
binary = ['ceph-volume']
|
2018-07-17 00:09:33 +08:00
|
|
|
cmd = binary
|
|
|
|
|
2018-07-09 22:58:35 +08:00
|
|
|
if cluster:
|
2018-10-04 01:52:42 +08:00
|
|
|
cmd.extend(['--cluster', cluster])
|
2018-07-17 00:09:33 +08:00
|
|
|
|
2018-07-09 22:58:35 +08:00
|
|
|
cmd.append('lvm')
|
|
|
|
cmd.append(subcommand)
|
|
|
|
|
|
|
|
return cmd
|
|
|
|
|
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
def prepare_or_create_osd(module, action, container_image):
|
|
|
|
'''
|
|
|
|
Prepare or create OSD devices
|
|
|
|
'''
|
2018-07-09 22:58:35 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
# get module variables
|
2017-12-01 22:40:33 +08:00
|
|
|
cluster = module.params['cluster']
|
2018-04-05 22:12:32 +08:00
|
|
|
objectstore = module.params['objectstore']
|
2017-12-01 21:25:13 +08:00
|
|
|
data = module.params['data']
|
|
|
|
data_vg = module.params.get('data_vg', None)
|
2018-10-04 01:52:42 +08:00
|
|
|
data = get_data(data, data_vg)
|
2017-12-01 21:25:13 +08:00
|
|
|
journal = module.params.get('journal', None)
|
|
|
|
journal_vg = module.params.get('journal_vg', None)
|
|
|
|
db = module.params.get('db', None)
|
|
|
|
db_vg = module.params.get('db_vg', None)
|
|
|
|
wal = module.params.get('wal', None)
|
|
|
|
wal_vg = module.params.get('wal_vg', None)
|
2018-01-12 00:56:39 +08:00
|
|
|
crush_device_class = module.params.get('crush_device_class', None)
|
2018-10-04 01:52:42 +08:00
|
|
|
dmcrypt = module.params.get('dmcrypt', None)
|
2017-12-01 21:25:13 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
# Build the CLI
|
2018-12-06 07:14:08 +08:00
|
|
|
action = ['lvm', action]
|
2020-06-19 21:09:04 +08:00
|
|
|
cmd = build_cmd(action, container_image, cluster)
|
2018-10-04 01:52:42 +08:00
|
|
|
cmd.extend(['--%s' % objectstore])
|
|
|
|
cmd.append('--data')
|
2017-12-01 21:25:13 +08:00
|
|
|
cmd.append(data)
|
|
|
|
|
2020-01-07 23:29:48 +08:00
|
|
|
if journal and objectstore == 'filestore':
|
2017-12-02 03:26:36 +08:00
|
|
|
journal = get_journal(journal, journal_vg)
|
2018-10-04 01:52:42 +08:00
|
|
|
cmd.extend(['--journal', journal])
|
2017-12-01 21:25:13 +08:00
|
|
|
|
2020-01-07 23:29:48 +08:00
|
|
|
if db and objectstore == 'bluestore':
|
2017-12-02 03:26:36 +08:00
|
|
|
db = get_db(db, db_vg)
|
2018-10-04 01:52:42 +08:00
|
|
|
cmd.extend(['--block.db', db])
|
2017-12-01 21:25:13 +08:00
|
|
|
|
2020-01-07 23:29:48 +08:00
|
|
|
if wal and objectstore == 'bluestore':
|
2017-12-02 03:26:36 +08:00
|
|
|
wal = get_wal(wal, wal_vg)
|
2018-10-04 01:52:42 +08:00
|
|
|
cmd.extend(['--block.wal', wal])
|
2017-12-01 21:25:13 +08:00
|
|
|
|
2018-01-12 00:56:39 +08:00
|
|
|
if crush_device_class:
|
2018-10-04 01:52:42 +08:00
|
|
|
cmd.extend(['--crush-device-class', crush_device_class])
|
2018-01-12 00:56:39 +08:00
|
|
|
|
2018-01-19 23:43:48 +08:00
|
|
|
if dmcrypt:
|
2018-10-04 01:52:42 +08:00
|
|
|
cmd.append('--dmcrypt')
|
2018-01-19 23:43:48 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
return cmd
|
2017-12-01 21:25:13 +08:00
|
|
|
|
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
def list_osd(module, container_image):
|
|
|
|
'''
|
|
|
|
List will detect wether or not a device has Ceph LVM Metadata
|
|
|
|
'''
|
2018-07-09 22:58:35 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
# get module variables
|
|
|
|
cluster = module.params['cluster']
|
|
|
|
data = module.params.get('data', None)
|
|
|
|
data_vg = module.params.get('data_vg', None)
|
|
|
|
data = get_data(data, data_vg)
|
2017-12-01 21:25:13 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
# Build the CLI
|
2018-12-06 07:14:08 +08:00
|
|
|
action = ['lvm', 'list']
|
2021-11-30 16:52:59 +08:00
|
|
|
cmd = build_cmd(action,
|
|
|
|
container_image,
|
|
|
|
cluster,
|
|
|
|
mounts={'/var/lib/ceph': '/var/lib/ceph:ro'})
|
2018-10-04 01:52:42 +08:00
|
|
|
if data:
|
|
|
|
cmd.append(data)
|
|
|
|
cmd.append('--format=json')
|
2017-12-01 21:25:13 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
return cmd
|
2017-12-01 21:25:13 +08:00
|
|
|
|
2020-09-06 10:17:02 +08:00
|
|
|
|
2018-12-06 07:14:08 +08:00
|
|
|
def list_storage_inventory(module, container_image):
|
|
|
|
'''
|
|
|
|
List storage inventory.
|
|
|
|
'''
|
|
|
|
|
2019-07-12 22:19:48 +08:00
|
|
|
action = ['inventory']
|
2020-06-19 21:09:04 +08:00
|
|
|
cmd = build_cmd(action, container_image)
|
2018-12-06 07:14:08 +08:00
|
|
|
cmd.append('--format=json')
|
|
|
|
|
|
|
|
return cmd
|
2017-12-01 21:25:13 +08:00
|
|
|
|
2020-09-06 10:17:02 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
def activate_osd():
|
|
|
|
'''
|
|
|
|
Activate all the OSDs on a machine
|
|
|
|
'''
|
2017-12-01 21:25:13 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
# build the CLI
|
2018-12-06 07:14:08 +08:00
|
|
|
action = ['lvm', 'activate']
|
2018-10-04 01:52:42 +08:00
|
|
|
container_image = None
|
2020-06-19 21:09:04 +08:00
|
|
|
cmd = build_cmd(action, container_image)
|
2018-10-04 01:52:42 +08:00
|
|
|
cmd.append('--all')
|
2017-12-01 21:25:13 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
return cmd
|
2017-12-01 21:25:13 +08:00
|
|
|
|
|
|
|
|
2020-06-19 21:09:04 +08:00
|
|
|
def is_lv(module, vg, lv, container_image):
|
|
|
|
'''
|
|
|
|
Check if an LV exists
|
|
|
|
'''
|
|
|
|
|
2021-06-18 00:18:07 +08:00
|
|
|
args = ['--noheadings', '--reportformat', 'json', '--select', 'lv_name={},vg_name={}'.format(lv, vg)] # noqa: E501
|
2020-06-19 21:09:04 +08:00
|
|
|
|
|
|
|
cmd = build_cmd(args, container_image, binary='lvs')
|
|
|
|
|
|
|
|
rc, cmd, out, err = exec_command(module, cmd)
|
|
|
|
|
2021-03-22 21:46:55 +08:00
|
|
|
if rc == 0:
|
|
|
|
result = json.loads(out)['report'][0]['lv']
|
|
|
|
if len(result) > 0:
|
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
2020-06-19 21:09:04 +08:00
|
|
|
|
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
def zap_devices(module, container_image):
|
|
|
|
'''
|
2018-04-04 00:55:36 +08:00
|
|
|
Will run 'ceph-volume lvm zap' on all devices, lvs and partitions
|
|
|
|
used to create the OSD. The --destroy flag is always passed so that
|
|
|
|
if an OSD was originally created with a raw device or partition for
|
|
|
|
'data' then any lvs that were created by ceph-volume are removed.
|
2018-10-04 01:52:42 +08:00
|
|
|
'''
|
|
|
|
|
|
|
|
# get module variables
|
2019-01-17 07:50:08 +08:00
|
|
|
data = module.params.get('data', None)
|
2018-03-15 00:24:40 +08:00
|
|
|
data_vg = module.params.get('data_vg', None)
|
|
|
|
journal = module.params.get('journal', None)
|
|
|
|
journal_vg = module.params.get('journal_vg', None)
|
|
|
|
db = module.params.get('db', None)
|
|
|
|
db_vg = module.params.get('db_vg', None)
|
|
|
|
wal = module.params.get('wal', None)
|
|
|
|
wal_vg = module.params.get('wal_vg', None)
|
2019-01-17 07:50:08 +08:00
|
|
|
osd_fsid = module.params.get('osd_fsid', None)
|
2021-07-09 17:07:08 +08:00
|
|
|
osd_id = module.params.get('osd_id', None)
|
2019-12-10 22:57:42 +08:00
|
|
|
destroy = module.params.get('destroy', True)
|
2018-03-15 00:24:40 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
# build the CLI
|
2018-12-06 07:14:08 +08:00
|
|
|
action = ['lvm', 'zap']
|
2020-06-19 21:09:04 +08:00
|
|
|
cmd = build_cmd(action, container_image)
|
2019-12-10 22:57:42 +08:00
|
|
|
if destroy:
|
|
|
|
cmd.append('--destroy')
|
2019-01-17 07:50:08 +08:00
|
|
|
|
|
|
|
if osd_fsid:
|
|
|
|
cmd.extend(['--osd-fsid', osd_fsid])
|
|
|
|
|
2021-07-09 17:07:08 +08:00
|
|
|
if osd_id:
|
|
|
|
cmd.extend(['--osd-id', osd_id])
|
|
|
|
|
2019-01-17 07:50:08 +08:00
|
|
|
if data:
|
|
|
|
data = get_data(data, data_vg)
|
|
|
|
cmd.append(data)
|
2018-03-15 00:24:40 +08:00
|
|
|
|
2018-04-04 00:55:36 +08:00
|
|
|
if journal:
|
|
|
|
journal = get_journal(journal, journal_vg)
|
2018-10-04 01:52:42 +08:00
|
|
|
cmd.extend([journal])
|
2018-03-15 00:24:40 +08:00
|
|
|
|
2018-04-04 00:55:36 +08:00
|
|
|
if db:
|
|
|
|
db = get_db(db, db_vg)
|
2018-10-04 01:52:42 +08:00
|
|
|
cmd.extend([db])
|
2018-04-04 00:55:36 +08:00
|
|
|
|
|
|
|
if wal:
|
|
|
|
wal = get_wal(wal, wal_vg)
|
2018-10-04 01:52:42 +08:00
|
|
|
cmd.extend([wal])
|
2018-03-15 00:24:40 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
return cmd
|
2018-03-15 00:24:40 +08:00
|
|
|
|
|
|
|
|
|
|
|
def run_module():
|
|
|
|
module_args = dict(
|
|
|
|
cluster=dict(type='str', required=False, default='ceph'),
|
2018-07-09 22:58:35 +08:00
|
|
|
objectstore=dict(type='str', required=False, choices=[
|
|
|
|
'bluestore', 'filestore'], default='bluestore'),
|
|
|
|
action=dict(type='str', required=False, choices=[
|
2018-12-06 07:14:08 +08:00
|
|
|
'create', 'zap', 'batch', 'prepare', 'activate', 'list',
|
2021-06-18 00:18:07 +08:00
|
|
|
'inventory'], default='create'), # noqa: 4502
|
2018-08-04 00:15:58 +08:00
|
|
|
data=dict(type='str', required=False),
|
2018-03-15 00:24:40 +08:00
|
|
|
data_vg=dict(type='str', required=False),
|
|
|
|
journal=dict(type='str', required=False),
|
|
|
|
journal_vg=dict(type='str', required=False),
|
|
|
|
db=dict(type='str', required=False),
|
|
|
|
db_vg=dict(type='str', required=False),
|
|
|
|
wal=dict(type='str', required=False),
|
|
|
|
wal_vg=dict(type='str', required=False),
|
|
|
|
crush_device_class=dict(type='str', required=False),
|
|
|
|
dmcrypt=dict(type='bool', required=False, default=False),
|
2018-08-04 00:15:58 +08:00
|
|
|
batch_devices=dict(type='list', required=False, default=[]),
|
2018-09-07 03:00:56 +08:00
|
|
|
osds_per_device=dict(type='int', required=False, default=1),
|
2018-10-04 01:52:42 +08:00
|
|
|
journal_size=dict(type='str', required=False, default='5120'),
|
2020-03-03 23:12:10 +08:00
|
|
|
journal_devices=dict(type='list', required=False, default=[]),
|
2018-10-04 01:52:42 +08:00
|
|
|
block_db_size=dict(type='str', required=False, default='-1'),
|
2019-08-20 21:57:45 +08:00
|
|
|
block_db_devices=dict(type='list', required=False, default=[]),
|
2019-08-23 15:02:12 +08:00
|
|
|
wal_devices=dict(type='list', required=False, default=[]),
|
2018-09-21 02:17:29 +08:00
|
|
|
report=dict(type='bool', required=False, default=False),
|
2019-01-17 07:50:08 +08:00
|
|
|
osd_fsid=dict(type='str', required=False),
|
2021-07-09 17:07:08 +08:00
|
|
|
osd_id=dict(type='str', required=False),
|
2019-12-10 22:57:42 +08:00
|
|
|
destroy=dict(type='bool', required=False, default=True),
|
2018-03-15 00:24:40 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
module = AnsibleModule(
|
|
|
|
argument_spec=module_args,
|
2021-07-09 17:07:08 +08:00
|
|
|
supports_check_mode=True,
|
|
|
|
mutually_exclusive=[
|
|
|
|
('data', 'osd_fsid', 'osd_id'),
|
|
|
|
],
|
|
|
|
required_if=[
|
|
|
|
('action', 'zap', ('data', 'osd_fsid', 'osd_id'), True)
|
|
|
|
]
|
2018-03-15 00:24:40 +08:00
|
|
|
)
|
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
result = dict(
|
|
|
|
changed=False,
|
|
|
|
stdout='',
|
|
|
|
stderr='',
|
2020-09-01 19:06:57 +08:00
|
|
|
rc=0,
|
2018-10-04 01:52:42 +08:00
|
|
|
start='',
|
|
|
|
end='',
|
|
|
|
delta='',
|
|
|
|
)
|
|
|
|
|
|
|
|
if module.check_mode:
|
2020-09-01 19:06:57 +08:00
|
|
|
module.exit_json(**result)
|
2018-10-04 01:52:42 +08:00
|
|
|
|
|
|
|
# start execution
|
|
|
|
startd = datetime.datetime.now()
|
|
|
|
|
|
|
|
# get the desired action
|
2018-04-04 00:55:36 +08:00
|
|
|
action = module.params['action']
|
2018-03-15 00:24:40 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
# will return either the image name or None
|
|
|
|
container_image = is_containerized()
|
|
|
|
|
|
|
|
# Assume the task's status will be 'changed'
|
|
|
|
changed = True
|
|
|
|
|
|
|
|
if action == 'create' or action == 'prepare':
|
|
|
|
# First test if the device has Ceph LVM Metadata
|
|
|
|
rc, cmd, out, err = exec_command(
|
|
|
|
module, list_osd(module, container_image))
|
|
|
|
|
|
|
|
# list_osd returns a dict, if the dict is empty this means
|
|
|
|
# we can not check the return code since it's not consistent
|
|
|
|
# with the plain output
|
|
|
|
# see: http://tracker.ceph.com/issues/36329
|
|
|
|
# FIXME: it's probably less confusing to check for rc
|
|
|
|
|
2018-10-24 22:55:52 +08:00
|
|
|
# convert out to json, ansible returns a string...
|
|
|
|
try:
|
|
|
|
out_dict = json.loads(out)
|
|
|
|
except ValueError:
|
2021-06-18 00:18:07 +08:00
|
|
|
fatal("Could not decode json output: {} from the command {}".format(out, cmd), module) # noqa: E501
|
2018-10-24 22:55:52 +08:00
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
if out_dict:
|
|
|
|
data = module.params['data']
|
2021-06-18 00:18:07 +08:00
|
|
|
result['stdout'] = 'skipped, since {0} is already used for an osd'.format(data) # noqa: E501
|
2018-10-04 01:52:42 +08:00
|
|
|
result['rc'] = 0
|
|
|
|
module.exit_json(**result)
|
|
|
|
|
|
|
|
# Prepare or create the OSD
|
|
|
|
rc, cmd, out, err = exec_command(
|
|
|
|
module, prepare_or_create_osd(module, action, container_image))
|
|
|
|
|
|
|
|
elif action == 'activate':
|
|
|
|
if container_image:
|
|
|
|
fatal(
|
2021-06-18 00:18:07 +08:00
|
|
|
"This is not how container's activation happens, nothing to activate", module) # noqa: E501
|
2018-10-04 01:52:42 +08:00
|
|
|
|
|
|
|
# Activate the OSD
|
|
|
|
rc, cmd, out, err = exec_command(
|
|
|
|
module, activate_osd())
|
|
|
|
|
|
|
|
elif action == 'zap':
|
|
|
|
# Zap the OSD
|
2020-06-19 21:09:04 +08:00
|
|
|
skip = []
|
2020-09-06 10:17:02 +08:00
|
|
|
for device_type in ['journal', 'data', 'db', 'wal']:
|
2020-07-08 07:04:10 +08:00
|
|
|
# 1/ if we passed vg/lv
|
2021-06-18 00:18:07 +08:00
|
|
|
if module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa: E501
|
2020-07-08 07:04:10 +08:00
|
|
|
# 2/ check this is an actual lv/vg
|
2021-06-18 00:18:07 +08:00
|
|
|
ret = is_lv(module, module.params['{}_vg'.format(device_type)], module.params[device_type], container_image) # noqa: E501
|
2020-06-19 21:09:04 +08:00
|
|
|
skip.append(ret)
|
2020-07-08 07:04:10 +08:00
|
|
|
# 3/ This isn't a lv/vg device
|
2020-06-19 21:09:04 +08:00
|
|
|
if not ret:
|
|
|
|
module.params['{}_vg'.format(device_type)] = False
|
|
|
|
module.params[device_type] = False
|
2021-06-18 00:18:07 +08:00
|
|
|
# 4/ no journal|data|db|wal|_vg was passed, so it must be a raw device # noqa: E501
|
|
|
|
elif not module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa: E501
|
2020-06-19 21:09:04 +08:00
|
|
|
skip.append(True)
|
|
|
|
|
|
|
|
cmd = zap_devices(module, container_image)
|
|
|
|
|
2021-07-09 17:07:08 +08:00
|
|
|
if any(skip) or module.params.get('osd_fsid', None) \
|
|
|
|
or module.params.get('osd_id', None):
|
2020-06-19 21:09:04 +08:00
|
|
|
rc, cmd, out, err = exec_command(
|
|
|
|
module, cmd)
|
2020-10-19 16:22:21 +08:00
|
|
|
for scan_cmd in ['vgscan', 'lvscan']:
|
|
|
|
module.run_command([scan_cmd, '--cache'])
|
2020-06-19 21:09:04 +08:00
|
|
|
else:
|
|
|
|
out = 'Skipped, nothing to zap'
|
|
|
|
err = ''
|
|
|
|
changed = False
|
|
|
|
rc = 0
|
2018-10-04 01:52:42 +08:00
|
|
|
|
|
|
|
elif action == 'list':
|
|
|
|
# List Ceph LVM Metadata on a device
|
|
|
|
rc, cmd, out, err = exec_command(
|
|
|
|
module, list_osd(module, container_image))
|
|
|
|
|
2018-12-06 07:14:08 +08:00
|
|
|
elif action == 'inventory':
|
|
|
|
# List storage device inventory.
|
|
|
|
rc, cmd, out, err = exec_command(
|
|
|
|
module, list_storage_inventory(module, container_image))
|
|
|
|
|
2018-10-04 01:52:42 +08:00
|
|
|
elif action == 'batch':
|
|
|
|
# Batch prepare AND activate OSDs
|
|
|
|
report = module.params.get('report', None)
|
|
|
|
|
|
|
|
# Add --report flag for the idempotency test
|
|
|
|
report_flags = [
|
|
|
|
'--report',
|
|
|
|
'--format=json',
|
|
|
|
]
|
|
|
|
|
2021-02-09 22:28:08 +08:00
|
|
|
cmd = batch(module, container_image, report=True)
|
2018-10-04 01:52:42 +08:00
|
|
|
batch_report_cmd = copy.copy(cmd)
|
|
|
|
batch_report_cmd.extend(report_flags)
|
|
|
|
|
|
|
|
# Run batch --report to see what's going to happen
|
|
|
|
# Do not run the batch command if there is nothing to do
|
|
|
|
rc, cmd, out, err = exec_command(
|
|
|
|
module, batch_report_cmd)
|
|
|
|
try:
|
2020-10-02 22:12:13 +08:00
|
|
|
if not out:
|
|
|
|
out = '{}'
|
2018-10-04 01:52:42 +08:00
|
|
|
report_result = json.loads(out)
|
|
|
|
except ValueError:
|
2020-04-07 19:50:35 +08:00
|
|
|
strategy_changed_in_out = "strategy changed" in out
|
|
|
|
strategy_changed_in_err = "strategy changed" in err
|
|
|
|
strategy_changed = strategy_changed_in_out or \
|
2020-09-06 10:17:02 +08:00
|
|
|
strategy_changed_in_err
|
2020-04-07 19:50:35 +08:00
|
|
|
if strategy_changed:
|
|
|
|
if strategy_changed_in_out:
|
|
|
|
out = json.dumps({"changed": False,
|
|
|
|
"stdout": out.rstrip("\r\n")})
|
|
|
|
elif strategy_changed_in_err:
|
|
|
|
out = json.dumps({"changed": False,
|
|
|
|
"stderr": err.rstrip("\r\n")})
|
2018-11-21 04:28:58 +08:00
|
|
|
rc = 0
|
|
|
|
changed = False
|
|
|
|
else:
|
|
|
|
out = out.rstrip("\r\n")
|
2018-10-04 01:52:42 +08:00
|
|
|
result = dict(
|
|
|
|
cmd=cmd,
|
2018-11-08 17:02:37 +08:00
|
|
|
stdout=out.rstrip('\r\n'),
|
|
|
|
stderr=err.rstrip('\r\n'),
|
2018-10-04 01:52:42 +08:00
|
|
|
rc=rc,
|
|
|
|
changed=changed,
|
|
|
|
)
|
2020-04-07 19:50:35 +08:00
|
|
|
if strategy_changed:
|
2018-11-21 04:28:58 +08:00
|
|
|
module.exit_json(**result)
|
2018-10-04 01:52:42 +08:00
|
|
|
module.fail_json(msg='non-zero return code', **result)
|
|
|
|
|
|
|
|
if not report:
|
2020-06-26 16:29:24 +08:00
|
|
|
if 'changed' in report_result:
|
|
|
|
# we have the old batch implementation
|
|
|
|
# if not asking for a report, let's just run the batch command
|
|
|
|
changed = report_result['changed']
|
|
|
|
if changed:
|
|
|
|
# Batch prepare the OSD
|
|
|
|
rc, cmd, out, err = exec_command(
|
|
|
|
module, batch(module, container_image))
|
|
|
|
else:
|
|
|
|
# we have the refactored batch, its idempotent so lets just
|
|
|
|
# run it
|
2018-10-04 01:52:42 +08:00
|
|
|
rc, cmd, out, err = exec_command(
|
|
|
|
module, batch(module, container_image))
|
|
|
|
else:
|
|
|
|
cmd = batch_report_cmd
|
|
|
|
|
|
|
|
endd = datetime.datetime.now()
|
|
|
|
delta = endd - startd
|
|
|
|
|
|
|
|
result = dict(
|
|
|
|
cmd=cmd,
|
|
|
|
start=str(startd),
|
|
|
|
end=str(endd),
|
|
|
|
delta=str(delta),
|
|
|
|
rc=rc,
|
2018-11-08 17:02:37 +08:00
|
|
|
stdout=out.rstrip('\r\n'),
|
|
|
|
stderr=err.rstrip('\r\n'),
|
2018-10-04 01:52:42 +08:00
|
|
|
changed=changed,
|
|
|
|
)
|
|
|
|
|
|
|
|
if rc != 0:
|
|
|
|
module.fail_json(msg='non-zero return code', **result)
|
|
|
|
|
|
|
|
module.exit_json(**result)
|
2018-03-15 00:24:40 +08:00
|
|
|
|
|
|
|
|
2017-12-01 21:25:13 +08:00
|
|
|
def main():
|
|
|
|
run_module()
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
main()
|