From b36f3e06b5d750e99556840090658ffd01b92b21 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Thu, 6 Sep 2018 14:00:56 -0500 Subject: [PATCH] ceph_volume: adds the osds_per_device parameter If this is set to anything other than the default value of 1 then the --osds-per-device flag will be used by the batch command to define how many osds will be created per device. Signed-off-by: Andrew Schoen --- docs/source/osds/scenarios.rst | 3 ++- group_vars/osds.yml.sample | 1 + library/ceph_volume.py | 11 +++++++++++ roles/ceph-osd/defaults/main.yml | 1 + roles/ceph-osd/tasks/scenarios/lvm-batch.yml | 1 + tests/conftest.py | 14 ++++++++------ 6 files changed, 24 insertions(+), 7 deletions(-) diff --git a/docs/source/osds/scenarios.rst b/docs/source/osds/scenarios.rst index b6678b804..0359bb02b 100644 --- a/docs/source/osds/scenarios.rst +++ b/docs/source/osds/scenarios.rst @@ -195,7 +195,8 @@ with the ``lvm`` osd scenario. defined in ``lvm_volumes``. - ``devices`` is a list of raw device names as strings. If ``devices`` is defined then the ``ceph-volume lvm batch`` - command will be used to deploy OSDs. + command will be used to deploy OSDs. You can also use the ``osds_per_device`` variable to inform ``ceph-volume`` how + many OSDs it should create from each device it finds suitable. Both ``lvm_volumes`` and ``devices`` can be defined and both methods would be used in the deployment or you can pick just one method. diff --git a/group_vars/osds.yml.sample b/group_vars/osds.yml.sample index 81dc556a3..5e8802831 100644 --- a/group_vars/osds.yml.sample +++ b/group_vars/osds.yml.sample @@ -220,6 +220,7 @@ dummy: #lvm_volumes: [] #crush_device_class: "" +#osds_per_device: 1 ########## diff --git a/library/ceph_volume.py b/library/ceph_volume.py index b7b4495fc..12180c1cd 100644 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -89,6 +89,12 @@ options: - A list of devices to pass to the 'ceph-volume lvm batch' subcommand. - Only applicable if action is 'batch'. required: false + osds_per_device: + description: + - The number of OSDs to create per device. + - Only applicable if action is 'batch'. + required: false + default: 1 author: @@ -151,6 +157,7 @@ def batch(module): batch_devices = module.params['batch_devices'] crush_device_class = module.params.get('crush_device_class', None) dmcrypt = module.params['dmcrypt'] + osds_per_device = module.params['osds_per_device'] if not batch_devices: module.fail_json(msg='batch_devices must be provided if action is "batch"', changed=False, rc=1) @@ -171,6 +178,9 @@ def batch(module): if dmcrypt: cmd.append("--dmcrypt") + if osds_per_device > 1: + cmd.extend(["--osds-per-device", osds_per_device]) + cmd.extend(batch_devices) result = dict( @@ -396,6 +406,7 @@ def run_module(): crush_device_class=dict(type='str', required=False), dmcrypt=dict(type='bool', required=False, default=False), batch_devices=dict(type='list', required=False, default=[]), + osds_per_device=dict(type='int', required=False, default=1), ) module = AnsibleModule( diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index cb061552e..8bb36e496 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -212,6 +212,7 @@ bluestore_wal_devices: "{{ dedicated_devices }}" lvm_volumes: [] crush_device_class: "" +osds_per_device: 1 ########## diff --git a/roles/ceph-osd/tasks/scenarios/lvm-batch.yml b/roles/ceph-osd/tasks/scenarios/lvm-batch.yml index fcee4f9d2..d3afc438f 100644 --- a/roles/ceph-osd/tasks/scenarios/lvm-batch.yml +++ b/roles/ceph-osd/tasks/scenarios/lvm-batch.yml @@ -7,6 +7,7 @@ batch_devices: "{{ devices }}" dmcrypt: "{{ dmcrypt|default(omit) }}" crush_device_class: "{{ crush_device_class|default(omit) }}" + osds_per_device: "{{ osds_per_device }}" action: "batch" environment: CEPH_VOLUME_DEBUG: 1 diff --git a/tests/conftest.py b/tests/conftest.py index a89ff260e..324887f69 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -86,14 +86,17 @@ def node(host, request): subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1]) num_mons = len(ansible_vars["groups"]["mons"]) if osd_auto_discovery: - num_devices = 3 + num_osds = 3 else: - num_devices = len(ansible_vars.get("devices", [])) - if not num_devices: - num_devices = len(ansible_vars.get("lvm_volumes", [])) + num_osds = len(ansible_vars.get("devices", [])) + if not num_osds: + num_osds = len(ansible_vars.get("lvm_volumes", [])) + osds_per_device = ansible_vars.get("osds_per_device", 1) + num_osds = num_osds * osds_per_device + # If number of devices doesn't map to number of OSDs, allow tests to define # that custom number, defaulting it to ``num_devices`` - num_osds = ansible_vars.get('num_osds', num_devices) + num_osds = ansible_vars.get('num_osds', num_osds) cluster_name = ansible_vars.get("cluster", "ceph") conf_path = "/etc/ceph/{}.conf".format(cluster_name) if "osds" in group_names: @@ -118,7 +121,6 @@ def node(host, request): vars=ansible_vars, osd_ids=osd_ids, num_mons=num_mons, - num_devices=num_devices, num_osds=num_osds, cluster_name=cluster_name, conf_path=conf_path,