ceph_volume: adds the osds_per_device parameter

If this is set to anything other than the default value of 1 then the
--osds-per-device flag will be used by the batch command to define how
many osds will be created per device.

Signed-off-by: Andrew Schoen <aschoen@redhat.com>
pull/3118/head
Andrew Schoen 2018-09-06 14:00:56 -05:00 committed by mergify[bot]
parent 1c88c444a3
commit b36f3e06b5
6 changed files with 24 additions and 7 deletions

View File

@ -195,7 +195,8 @@ with the ``lvm`` osd scenario.
defined in ``lvm_volumes``. defined in ``lvm_volumes``.
- ``devices`` is a list of raw device names as strings. If ``devices`` is defined then the ``ceph-volume lvm batch`` - ``devices`` is a list of raw device names as strings. If ``devices`` is defined then the ``ceph-volume lvm batch``
command will be used to deploy OSDs. command will be used to deploy OSDs. You can also use the ``osds_per_device`` variable to inform ``ceph-volume`` how
many OSDs it should create from each device it finds suitable.
Both ``lvm_volumes`` and ``devices`` can be defined and both methods would be used in the deployment or you Both ``lvm_volumes`` and ``devices`` can be defined and both methods would be used in the deployment or you
can pick just one method. can pick just one method.

View File

@ -220,6 +220,7 @@ dummy:
#lvm_volumes: [] #lvm_volumes: []
#crush_device_class: "" #crush_device_class: ""
#osds_per_device: 1
########## ##########

View File

@ -89,6 +89,12 @@ options:
- A list of devices to pass to the 'ceph-volume lvm batch' subcommand. - A list of devices to pass to the 'ceph-volume lvm batch' subcommand.
- Only applicable if action is 'batch'. - Only applicable if action is 'batch'.
required: false required: false
osds_per_device:
description:
- The number of OSDs to create per device.
- Only applicable if action is 'batch'.
required: false
default: 1
author: author:
@ -151,6 +157,7 @@ def batch(module):
batch_devices = module.params['batch_devices'] batch_devices = module.params['batch_devices']
crush_device_class = module.params.get('crush_device_class', None) crush_device_class = module.params.get('crush_device_class', None)
dmcrypt = module.params['dmcrypt'] dmcrypt = module.params['dmcrypt']
osds_per_device = module.params['osds_per_device']
if not batch_devices: if not batch_devices:
module.fail_json(msg='batch_devices must be provided if action is "batch"', changed=False, rc=1) module.fail_json(msg='batch_devices must be provided if action is "batch"', changed=False, rc=1)
@ -171,6 +178,9 @@ def batch(module):
if dmcrypt: if dmcrypt:
cmd.append("--dmcrypt") cmd.append("--dmcrypt")
if osds_per_device > 1:
cmd.extend(["--osds-per-device", osds_per_device])
cmd.extend(batch_devices) cmd.extend(batch_devices)
result = dict( result = dict(
@ -396,6 +406,7 @@ def run_module():
crush_device_class=dict(type='str', required=False), crush_device_class=dict(type='str', required=False),
dmcrypt=dict(type='bool', required=False, default=False), dmcrypt=dict(type='bool', required=False, default=False),
batch_devices=dict(type='list', required=False, default=[]), batch_devices=dict(type='list', required=False, default=[]),
osds_per_device=dict(type='int', required=False, default=1),
) )
module = AnsibleModule( module = AnsibleModule(

View File

@ -212,6 +212,7 @@ bluestore_wal_devices: "{{ dedicated_devices }}"
lvm_volumes: [] lvm_volumes: []
crush_device_class: "" crush_device_class: ""
osds_per_device: 1
########## ##########

View File

@ -7,6 +7,7 @@
batch_devices: "{{ devices }}" batch_devices: "{{ devices }}"
dmcrypt: "{{ dmcrypt|default(omit) }}" dmcrypt: "{{ dmcrypt|default(omit) }}"
crush_device_class: "{{ crush_device_class|default(omit) }}" crush_device_class: "{{ crush_device_class|default(omit) }}"
osds_per_device: "{{ osds_per_device }}"
action: "batch" action: "batch"
environment: environment:
CEPH_VOLUME_DEBUG: 1 CEPH_VOLUME_DEBUG: 1

View File

@ -86,14 +86,17 @@ def node(host, request):
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1]) subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
num_mons = len(ansible_vars["groups"]["mons"]) num_mons = len(ansible_vars["groups"]["mons"])
if osd_auto_discovery: if osd_auto_discovery:
num_devices = 3 num_osds = 3
else: else:
num_devices = len(ansible_vars.get("devices", [])) num_osds = len(ansible_vars.get("devices", []))
if not num_devices: if not num_osds:
num_devices = len(ansible_vars.get("lvm_volumes", [])) num_osds = len(ansible_vars.get("lvm_volumes", []))
osds_per_device = ansible_vars.get("osds_per_device", 1)
num_osds = num_osds * osds_per_device
# If number of devices doesn't map to number of OSDs, allow tests to define # If number of devices doesn't map to number of OSDs, allow tests to define
# that custom number, defaulting it to ``num_devices`` # that custom number, defaulting it to ``num_devices``
num_osds = ansible_vars.get('num_osds', num_devices) num_osds = ansible_vars.get('num_osds', num_osds)
cluster_name = ansible_vars.get("cluster", "ceph") cluster_name = ansible_vars.get("cluster", "ceph")
conf_path = "/etc/ceph/{}.conf".format(cluster_name) conf_path = "/etc/ceph/{}.conf".format(cluster_name)
if "osds" in group_names: if "osds" in group_names:
@ -118,7 +121,6 @@ def node(host, request):
vars=ansible_vars, vars=ansible_vars,
osd_ids=osd_ids, osd_ids=osd_ids,
num_mons=num_mons, num_mons=num_mons,
num_devices=num_devices,
num_osds=num_osds, num_osds=num_osds,
cluster_name=cluster_name, cluster_name=cluster_name,
conf_path=conf_path, conf_path=conf_path,