mirror of https://github.com/ceph/ceph-ansible.git
ceph-volume: implement the 'lvm batch' subcommand
This adds the action 'batch' to the ceph-volume module so that we can run the new 'ceph-volume lvm batch' subcommand. A functional test is also included. If devices is defind and osd_scenario is lvm then the 'ceph-volume lvm batch' command will be used to create the OSDs. Signed-off-by: Andrew Schoen <aschoen@redhat.com>pull/2997/head
parent
4d64dd4686
commit
6d431ec22d
|
@ -36,7 +36,7 @@ options:
|
||||||
description:
|
description:
|
||||||
- The action to take. Either creating OSDs or zapping devices.
|
- The action to take. Either creating OSDs or zapping devices.
|
||||||
required: true
|
required: true
|
||||||
choices: ['create', 'zap']
|
choices: ['create', 'zap', 'batch']
|
||||||
default: create
|
default: create
|
||||||
data:
|
data:
|
||||||
description:
|
description:
|
||||||
|
@ -84,6 +84,11 @@ options:
|
||||||
description:
|
description:
|
||||||
- If set to True the OSD will be encrypted with dmcrypt.
|
- If set to True the OSD will be encrypted with dmcrypt.
|
||||||
required: false
|
required: false
|
||||||
|
batch_devices:
|
||||||
|
description:
|
||||||
|
- A list of devices to pass to the 'ceph-volume lvm batch' subcommand.
|
||||||
|
- Only applicable if action is 'batch'.
|
||||||
|
required: false
|
||||||
|
|
||||||
|
|
||||||
author:
|
author:
|
||||||
|
@ -140,6 +145,72 @@ def get_wal(wal, wal_vg):
|
||||||
return wal
|
return wal
|
||||||
|
|
||||||
|
|
||||||
|
def batch(module):
|
||||||
|
cluster = module.params['cluster']
|
||||||
|
objectstore = module.params['objectstore']
|
||||||
|
batch_devices = module.params['batch_devices']
|
||||||
|
crush_device_class = module.params.get('crush_device_class', None)
|
||||||
|
dmcrypt = module.params['dmcrypt']
|
||||||
|
|
||||||
|
if not batch_devices:
|
||||||
|
module.fail_json(msg='batch_devices must be provided if action is "batch"', changed=False, rc=1)
|
||||||
|
|
||||||
|
cmd = [
|
||||||
|
'ceph-volume',
|
||||||
|
'--cluster',
|
||||||
|
cluster,
|
||||||
|
'lvm',
|
||||||
|
'batch',
|
||||||
|
'--%s' % objectstore,
|
||||||
|
'--yes',
|
||||||
|
]
|
||||||
|
|
||||||
|
if crush_device_class:
|
||||||
|
cmd.extend(["--crush-device-class", crush_device_class])
|
||||||
|
|
||||||
|
if dmcrypt:
|
||||||
|
cmd.append("--dmcrypt")
|
||||||
|
|
||||||
|
cmd.extend(batch_devices)
|
||||||
|
|
||||||
|
result = dict(
|
||||||
|
changed=False,
|
||||||
|
cmd=cmd,
|
||||||
|
stdout='',
|
||||||
|
stderr='',
|
||||||
|
rc='',
|
||||||
|
start='',
|
||||||
|
end='',
|
||||||
|
delta='',
|
||||||
|
)
|
||||||
|
|
||||||
|
if module.check_mode:
|
||||||
|
return result
|
||||||
|
|
||||||
|
startd = datetime.datetime.now()
|
||||||
|
|
||||||
|
rc, out, err = module.run_command(cmd, encoding=None)
|
||||||
|
|
||||||
|
endd = datetime.datetime.now()
|
||||||
|
delta = endd - startd
|
||||||
|
|
||||||
|
result = dict(
|
||||||
|
cmd=cmd,
|
||||||
|
stdout=out.rstrip(b"\r\n"),
|
||||||
|
stderr=err.rstrip(b"\r\n"),
|
||||||
|
rc=rc,
|
||||||
|
start=str(startd),
|
||||||
|
end=str(endd),
|
||||||
|
delta=str(delta),
|
||||||
|
changed=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
if rc != 0:
|
||||||
|
module.fail_json(msg='non-zero return code', **result)
|
||||||
|
|
||||||
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
|
||||||
def create_osd(module):
|
def create_osd(module):
|
||||||
cluster = module.params['cluster']
|
cluster = module.params['cluster']
|
||||||
objectstore = module.params['objectstore']
|
objectstore = module.params['objectstore']
|
||||||
|
@ -313,8 +384,8 @@ def run_module():
|
||||||
module_args = dict(
|
module_args = dict(
|
||||||
cluster=dict(type='str', required=False, default='ceph'),
|
cluster=dict(type='str', required=False, default='ceph'),
|
||||||
objectstore=dict(type='str', required=False, choices=['bluestore', 'filestore'], default='bluestore'),
|
objectstore=dict(type='str', required=False, choices=['bluestore', 'filestore'], default='bluestore'),
|
||||||
action=dict(type='str', required=False, choices=['create', 'zap'], default='create'),
|
action=dict(type='str', required=False, choices=['create', 'zap', 'batch'], default='create'),
|
||||||
data=dict(type='str', required=True),
|
data=dict(type='str', required=False),
|
||||||
data_vg=dict(type='str', required=False),
|
data_vg=dict(type='str', required=False),
|
||||||
journal=dict(type='str', required=False),
|
journal=dict(type='str', required=False),
|
||||||
journal_vg=dict(type='str', required=False),
|
journal_vg=dict(type='str', required=False),
|
||||||
|
@ -324,6 +395,7 @@ def run_module():
|
||||||
wal_vg=dict(type='str', required=False),
|
wal_vg=dict(type='str', required=False),
|
||||||
crush_device_class=dict(type='str', required=False),
|
crush_device_class=dict(type='str', required=False),
|
||||||
dmcrypt=dict(type='bool', required=False, default=False),
|
dmcrypt=dict(type='bool', required=False, default=False),
|
||||||
|
batch_devices=dict(type='list', required=False, default=[]),
|
||||||
)
|
)
|
||||||
|
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
|
@ -337,6 +409,8 @@ def run_module():
|
||||||
create_osd(module)
|
create_osd(module)
|
||||||
elif action == "zap":
|
elif action == "zap":
|
||||||
zap_devices(module)
|
zap_devices(module)
|
||||||
|
elif action == "batch":
|
||||||
|
batch(module)
|
||||||
|
|
||||||
module.fail_json(msg='State must either be "present" or "absent".', changed=False, rc=1)
|
module.fail_json(msg='State must either be "present" or "absent".', changed=False, rc=1)
|
||||||
|
|
||||||
|
|
|
@ -82,7 +82,9 @@ class ActionModule(ActionBase):
|
||||||
notario.validate(host_vars, non_collocated_osd_scenario, defined_keys=True)
|
notario.validate(host_vars, non_collocated_osd_scenario, defined_keys=True)
|
||||||
|
|
||||||
if host_vars["osd_scenario"] == "lvm":
|
if host_vars["osd_scenario"] == "lvm":
|
||||||
if notario_store['osd_objectstore'] == 'filestore':
|
if host_vars.get("devices"):
|
||||||
|
notario.validate(host_vars, lvm_batch_scenario, defined_keys=True)
|
||||||
|
elif notario_store['osd_objectstore'] == 'filestore':
|
||||||
notario.validate(host_vars, lvm_filestore_scenario, defined_keys=True)
|
notario.validate(host_vars, lvm_filestore_scenario, defined_keys=True)
|
||||||
elif notario_store['osd_objectstore'] == 'bluestore':
|
elif notario_store['osd_objectstore'] == 'bluestore':
|
||||||
notario.validate(host_vars, lvm_bluestore_scenario, defined_keys=True)
|
notario.validate(host_vars, lvm_bluestore_scenario, defined_keys=True)
|
||||||
|
@ -222,6 +224,8 @@ non_collocated_osd_scenario = (
|
||||||
("devices", iterables.AllItems(types.string)),
|
("devices", iterables.AllItems(types.string)),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
lvm_batch_scenario = ("devices", iterables.AllItems(types.string))
|
||||||
|
|
||||||
lvm_filestore_scenario = ("lvm_volumes", iterables.AllItems((
|
lvm_filestore_scenario = ("lvm_volumes", iterables.AllItems((
|
||||||
(optional('crush_device_class'), types.string),
|
(optional('crush_device_class'), types.string),
|
||||||
('data', types.string),
|
('data', types.string),
|
||||||
|
|
|
@ -54,6 +54,16 @@
|
||||||
include: scenarios/lvm.yml
|
include: scenarios/lvm.yml
|
||||||
when:
|
when:
|
||||||
- osd_scenario == 'lvm'
|
- osd_scenario == 'lvm'
|
||||||
|
- lvm_volumes is defined
|
||||||
|
- not containerized_deployment
|
||||||
|
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||||
|
static: False
|
||||||
|
|
||||||
|
- name: include scenarios/lvm-batch.yml
|
||||||
|
include: scenarios/lvm-batch.yml
|
||||||
|
when:
|
||||||
|
- osd_scenario == 'lvm'
|
||||||
|
- devices is defined
|
||||||
- not containerized_deployment
|
- not containerized_deployment
|
||||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||||
static: False
|
static: False
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
- name: "use ceph-volume lvm batch to create {{ osd_objectstore }} osds"
|
||||||
|
ceph_volume:
|
||||||
|
cluster: "{{ cluster }}"
|
||||||
|
objectstore: "{{ osd_objectstore }}"
|
||||||
|
batch_devices: "{{ devices }}"
|
||||||
|
dmcrypt: "{{ dmcrypt|default(omit) }}"
|
||||||
|
action: "batch"
|
||||||
|
environment:
|
||||||
|
CEPH_VOLUME_DEBUG: 1
|
|
@ -58,7 +58,7 @@
|
||||||
when:
|
when:
|
||||||
- osd_group_name in group_names
|
- osd_group_name in group_names
|
||||||
- not osd_auto_discovery | default(False)
|
- not osd_auto_discovery | default(False)
|
||||||
- osd_scenario != "lvm"
|
- osd_scenario != "lvm" and devices is not defined
|
||||||
|
|
||||||
- name: include check_eth_mon.yml
|
- name: include check_eth_mon.yml
|
||||||
include: check_eth_mon.yml
|
include: check_eth_mon.yml
|
||||||
|
|
|
@ -22,7 +22,8 @@ def node(host, request):
|
||||||
group_names = ansible_vars["group_names"]
|
group_names = ansible_vars["group_names"]
|
||||||
docker = ansible_vars.get("docker")
|
docker = ansible_vars.get("docker")
|
||||||
osd_auto_discovery = ansible_vars.get("osd_auto_discovery")
|
osd_auto_discovery = ansible_vars.get("osd_auto_discovery")
|
||||||
lvm_scenario = ansible_vars.get("osd_scenario") == 'lvm'
|
osd_scenario = ansible_vars.get("osd_scenario")
|
||||||
|
lvm_scenario = osd_scenario in ['lvm', 'lvm-batch']
|
||||||
ceph_release_num = {
|
ceph_release_num = {
|
||||||
'jewel': 10,
|
'jewel': 10,
|
||||||
'kraken': 11,
|
'kraken': 11,
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
../../../../../Vagrantfile
|
|
@ -0,0 +1,9 @@
|
||||||
|
{
|
||||||
|
"ceph_conf_overrides": {
|
||||||
|
"global": {
|
||||||
|
"osd_pool_default_pg_num": 12,
|
||||||
|
"osd_pool_default_size": 1
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ceph_mon_docker_memory_limit": "2g"
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
ceph_origin: repository
|
||||||
|
ceph_repository: community
|
||||||
|
cluster: ceph
|
||||||
|
public_network: "192.168.39.0/24"
|
||||||
|
cluster_network: "192.168.40.0/24"
|
||||||
|
monitor_interface: eth1
|
||||||
|
radosgw_interface: eth1
|
||||||
|
journal_size: 100
|
||||||
|
osd_objectstore: "bluestore"
|
||||||
|
osd_scenario: lvm
|
||||||
|
copy_admin_key: true
|
||||||
|
devices:
|
||||||
|
- /dev/sdb
|
||||||
|
- /dev/sdc
|
||||||
|
os_tuning_params:
|
||||||
|
- { name: fs.file-max, value: 26234859 }
|
||||||
|
ceph_conf_overrides:
|
||||||
|
global:
|
||||||
|
osd_pool_default_size: 1
|
|
@ -0,0 +1,8 @@
|
||||||
|
[mons]
|
||||||
|
mon0
|
||||||
|
|
||||||
|
[mgrs]
|
||||||
|
mon0
|
||||||
|
|
||||||
|
[osds]
|
||||||
|
osd0
|
|
@ -0,0 +1,73 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
# DEPLOY CONTAINERIZED DAEMONS
|
||||||
|
docker: false
|
||||||
|
|
||||||
|
# DEFINE THE NUMBER OF VMS TO RUN
|
||||||
|
mon_vms: 1
|
||||||
|
osd_vms: 1
|
||||||
|
mds_vms: 0
|
||||||
|
rgw_vms: 0
|
||||||
|
nfs_vms: 0
|
||||||
|
rbd_mirror_vms: 0
|
||||||
|
client_vms: 0
|
||||||
|
iscsi_gw_vms: 0
|
||||||
|
mgr_vms: 0
|
||||||
|
|
||||||
|
# Deploy RESTAPI on each of the Monitors
|
||||||
|
restapi: true
|
||||||
|
|
||||||
|
# INSTALL SOURCE OF CEPH
|
||||||
|
# valid values are 'stable' and 'dev'
|
||||||
|
ceph_install_source: stable
|
||||||
|
|
||||||
|
# SUBNETS TO USE FOR THE VMS
|
||||||
|
public_subnet: 192.168.39
|
||||||
|
cluster_subnet: 192.168.40
|
||||||
|
|
||||||
|
# MEMORY
|
||||||
|
# set 1024 for CentOS
|
||||||
|
memory: 512
|
||||||
|
|
||||||
|
# Ethernet interface name
|
||||||
|
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
|
||||||
|
eth: 'eth1'
|
||||||
|
|
||||||
|
# Disks
|
||||||
|
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
|
||||||
|
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
|
||||||
|
disks: "[ '/dev/sdb', '/dev/sdc' ]"
|
||||||
|
|
||||||
|
# VAGRANT BOX
|
||||||
|
# Ceph boxes are *strongly* suggested. They are under better control and will
|
||||||
|
# not get updated frequently unless required for build systems. These are (for
|
||||||
|
# now):
|
||||||
|
#
|
||||||
|
# * ceph/ubuntu-xenial
|
||||||
|
#
|
||||||
|
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
|
||||||
|
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
|
||||||
|
# libvirt CentOS: centos/7
|
||||||
|
# parallels Ubuntu: parallels/ubuntu-14.04
|
||||||
|
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
|
||||||
|
# For more boxes have a look at:
|
||||||
|
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
|
||||||
|
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
|
||||||
|
vagrant_box: centos/7
|
||||||
|
#ssh_private_key_path: "~/.ssh/id_rsa"
|
||||||
|
# The sync directory changes based on vagrant box
|
||||||
|
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
|
||||||
|
#vagrant_sync_dir: /home/vagrant/sync
|
||||||
|
vagrant_sync_dir: /vagrant
|
||||||
|
# Disables synced folder creation. Not needed for testing, will skip mounting
|
||||||
|
# the vagrant directory on the remote box regardless of the provider.
|
||||||
|
vagrant_disable_synced_folder: true
|
||||||
|
# VAGRANT URL
|
||||||
|
# This is a URL to download an image from an alternate location. vagrant_box
|
||||||
|
# above should be set to the filename of the image.
|
||||||
|
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
|
||||||
|
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
|
||||||
|
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
|
||||||
|
|
||||||
|
os_tuning_params:
|
||||||
|
- { name: fs.file-max, value: 26234859 }
|
3
tox.ini
3
tox.ini
|
@ -1,6 +1,6 @@
|
||||||
[tox]
|
[tox]
|
||||||
envlist = {dev,jewel,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation}
|
envlist = {dev,jewel,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation}
|
||||||
{dev,luminous,mimic,rhcs}-{filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container}
|
{dev,luminous,mimic,rhcs}-{filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container,lvm_batch}
|
||||||
|
|
||||||
skipsdist = True
|
skipsdist = True
|
||||||
|
|
||||||
|
@ -197,6 +197,7 @@ changedir=
|
||||||
update_cluster: {toxinidir}/tests/functional/centos/7/cluster
|
update_cluster: {toxinidir}/tests/functional/centos/7/cluster
|
||||||
switch_to_containers: {toxinidir}/tests/functional/centos/7/cluster
|
switch_to_containers: {toxinidir}/tests/functional/centos/7/cluster
|
||||||
lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
|
lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
|
||||||
|
lvm_batch: {toxinidir}/tests/functional/centos/7/lvm-batch
|
||||||
bluestore_lvm_osds: {toxinidir}/tests/functional/centos/7/bs-lvm-osds
|
bluestore_lvm_osds: {toxinidir}/tests/functional/centos/7/bs-lvm-osds
|
||||||
purge_lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
|
purge_lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
|
||||||
ooo_collocation: {toxinidir}/tests/functional/centos/7/ooo-collocation
|
ooo_collocation: {toxinidir}/tests/functional/centos/7/ooo-collocation
|
||||||
|
|
Loading…
Reference in New Issue