mirror of https://github.com/ceph/ceph-ansible.git
ceph-osd: ceph-volume container support
Signed-off-by: Sébastien Han <seb@redhat.com>pull/3220/head
parent
678e155328
commit
aa2c1b27e3
|
@ -38,7 +38,7 @@ options:
|
||||||
description:
|
description:
|
||||||
- The action to take. Either creating OSDs or zapping devices.
|
- The action to take. Either creating OSDs or zapping devices.
|
||||||
required: true
|
required: true
|
||||||
choices: ['create', 'zap', 'batch', 'list']
|
choices: ['create', 'zap', 'batch', 'prepare', 'activate', 'list']
|
||||||
default: create
|
default: create
|
||||||
data:
|
data:
|
||||||
description:
|
description:
|
||||||
|
@ -65,7 +65,7 @@ options:
|
||||||
required: false
|
required: false
|
||||||
db_vg:
|
db_vg:
|
||||||
description:
|
description:
|
||||||
- If db is a lv, this must be the name of the volume group it belongs to.
|
- If db is a lv, this must be the name of the volume group it belongs to. # noqa E501
|
||||||
- Only applicable if objectstore is 'bluestore'.
|
- Only applicable if objectstore is 'bluestore'.
|
||||||
required: false
|
required: false
|
||||||
wal:
|
wal:
|
||||||
|
@ -75,7 +75,7 @@ options:
|
||||||
required: false
|
required: false
|
||||||
wal_vg:
|
wal_vg:
|
||||||
description:
|
description:
|
||||||
- If wal is a lv, this must be the name of the volume group it belongs to.
|
- If wal is a lv, this must be the name of the volume group it belongs to. # noqa E501
|
||||||
- Only applicable if objectstore is 'bluestore'.
|
- Only applicable if objectstore is 'bluestore'.
|
||||||
required: false
|
required: false
|
||||||
crush_device_class:
|
crush_device_class:
|
||||||
|
@ -117,7 +117,12 @@ options:
|
||||||
- Results will be returned in json format.
|
- Results will be returned in json format.
|
||||||
- Only applicable if action is 'batch'.
|
- Only applicable if action is 'batch'.
|
||||||
required: false
|
required: false
|
||||||
|
containerized:
|
||||||
|
description:
|
||||||
|
- Wether or not this is a containerized cluster. The value is
|
||||||
|
assigned or not depending on how the playbook runs.
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
|
||||||
author:
|
author:
|
||||||
- Andrew Schoen (@andrewschoen)
|
- Andrew Schoen (@andrewschoen)
|
||||||
|
@ -130,23 +135,27 @@ EXAMPLES = '''
|
||||||
data: data-lv
|
data: data-lv
|
||||||
data_vg: data-vg
|
data_vg: data-vg
|
||||||
journal: /dev/sdc1
|
journal: /dev/sdc1
|
||||||
|
action: create
|
||||||
|
|
||||||
- name: set up a bluestore osd with a raw device for data
|
- name: set up a bluestore osd with a raw device for data
|
||||||
ceph_volume:
|
ceph_volume:
|
||||||
objectstore: bluestore
|
objectstore: bluestore
|
||||||
data: /dev/sdc
|
data: /dev/sdc
|
||||||
|
action: create
|
||||||
|
|
||||||
- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db
|
|
||||||
|
- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa E501
|
||||||
ceph_volume:
|
ceph_volume:
|
||||||
objectstore: bluestore
|
objectstore: bluestore
|
||||||
data: data-lv
|
data: data-lv
|
||||||
data_vg: data-vg
|
data_vg: data-vg
|
||||||
db: /dev/sdc1
|
db: /dev/sdc1
|
||||||
wal: /dev/sdc2
|
wal: /dev/sdc2
|
||||||
|
action: create
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
|
||||||
from ansible.module_utils.basic import AnsibleModule
|
from ansible.module_utils.basic import AnsibleModule # noqa 4502
|
||||||
|
|
||||||
|
|
||||||
def get_data(data, data_vg):
|
def get_data(data, data_vg):
|
||||||
|
@ -231,7 +240,8 @@ def batch(module):
|
||||||
report = module.params['report']
|
report = module.params['report']
|
||||||
|
|
||||||
if not batch_devices:
|
if not batch_devices:
|
||||||
module.fail_json(msg='batch_devices must be provided if action is "batch"', changed=False, rc=1)
|
module.fail_json(
|
||||||
|
msg='batch_devices must be provided if action is "batch"', changed=False, rc=1) # noqa 4502
|
||||||
|
|
||||||
cmd = [
|
cmd = [
|
||||||
'ceph-volume',
|
'ceph-volume',
|
||||||
|
@ -326,7 +336,28 @@ def batch(module):
|
||||||
module.exit_json(**result)
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
|
||||||
def create_osd(module):
|
def ceph_volume_cmd(subcommand, containerized, cluster=None):
|
||||||
|
cmd = ['ceph-volume']
|
||||||
|
if cluster:
|
||||||
|
cmd.extend(["--cluster", cluster])
|
||||||
|
cmd.append('lvm')
|
||||||
|
cmd.append(subcommand)
|
||||||
|
|
||||||
|
if containerized:
|
||||||
|
cmd = containerized.split() + cmd
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
|
def activate_osd(module, containerized=None):
|
||||||
|
subcommand = "activate"
|
||||||
|
cmd = ceph_volume_cmd(subcommand)
|
||||||
|
cmd.append("--all")
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_osd(module):
|
||||||
cluster = module.params['cluster']
|
cluster = module.params['cluster']
|
||||||
objectstore = module.params['objectstore']
|
objectstore = module.params['objectstore']
|
||||||
data = module.params['data']
|
data = module.params['data']
|
||||||
|
@ -339,16 +370,12 @@ def create_osd(module):
|
||||||
wal_vg = module.params.get('wal_vg', None)
|
wal_vg = module.params.get('wal_vg', None)
|
||||||
crush_device_class = module.params.get('crush_device_class', None)
|
crush_device_class = module.params.get('crush_device_class', None)
|
||||||
dmcrypt = module.params['dmcrypt']
|
dmcrypt = module.params['dmcrypt']
|
||||||
|
containerized = module.params.get('containerized', None)
|
||||||
|
subcommand = "prepare"
|
||||||
|
|
||||||
cmd = [
|
cmd = ceph_volume_cmd(subcommand, containerized, cluster)
|
||||||
'ceph-volume',
|
cmd.extend(["--%s" % objectstore])
|
||||||
'--cluster',
|
cmd.append("--data")
|
||||||
cluster,
|
|
||||||
'lvm',
|
|
||||||
'create',
|
|
||||||
'--%s' % objectstore,
|
|
||||||
'--data',
|
|
||||||
]
|
|
||||||
|
|
||||||
data = get_data(data, data_vg)
|
data = get_data(data, data_vg)
|
||||||
cmd.append(data)
|
cmd.append(data)
|
||||||
|
@ -387,11 +414,17 @@ def create_osd(module):
|
||||||
|
|
||||||
# check to see if osd already exists
|
# check to see if osd already exists
|
||||||
# FIXME: this does not work when data is a raw device
|
# FIXME: this does not work when data is a raw device
|
||||||
# support for 'lvm list' and raw devices was added with https://github.com/ceph/ceph/pull/20620 but
|
# support for 'lvm list' and raw devices
|
||||||
|
# was added with https://github.com/ceph/ceph/pull/20620 but
|
||||||
# has not made it to a luminous release as of 12.2.4
|
# has not made it to a luminous release as of 12.2.4
|
||||||
rc, out, err = module.run_command(["ceph-volume", "lvm", "list", data], encoding=None)
|
ceph_volume_list_cmd = ["ceph-volume", "lvm", "list", data]
|
||||||
|
if containerized:
|
||||||
|
ceph_volume_list_cmd = containerized.split() + ceph_volume_list_cmd
|
||||||
|
|
||||||
|
rc, out, err = module.run_command(ceph_volume_list_cmd, encoding=None)
|
||||||
if rc == 0:
|
if rc == 0:
|
||||||
result["stdout"] = "skipped, since {0} is already used for an osd".format(data)
|
result["stdout"] = "skipped, since {0} is already used for an osd".format( # noqa E501
|
||||||
|
data)
|
||||||
result['rc'] = 0
|
result['rc'] = 0
|
||||||
module.exit_json(**result)
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
@ -498,8 +531,10 @@ def zap_devices(module):
|
||||||
def run_module():
|
def run_module():
|
||||||
module_args = dict(
|
module_args = dict(
|
||||||
cluster=dict(type='str', required=False, default='ceph'),
|
cluster=dict(type='str', required=False, default='ceph'),
|
||||||
objectstore=dict(type='str', required=False, choices=['bluestore', 'filestore'], default='bluestore'),
|
objectstore=dict(type='str', required=False, choices=[
|
||||||
action=dict(type='str', required=False, choices=['create', 'zap', 'batch', 'list'], default='create'),
|
'bluestore', 'filestore'], default='bluestore'),
|
||||||
|
action=dict(type='str', required=False, choices=[
|
||||||
|
'create', 'zap', 'batch', 'prepare', 'activate', 'list'], default='create'), # noqa 4502
|
||||||
data=dict(type='str', required=False),
|
data=dict(type='str', required=False),
|
||||||
data_vg=dict(type='str', required=False),
|
data_vg=dict(type='str', required=False),
|
||||||
journal=dict(type='str', required=False),
|
journal=dict(type='str', required=False),
|
||||||
|
@ -515,6 +550,7 @@ def run_module():
|
||||||
journal_size=dict(type='str', required=False, default="5120"),
|
journal_size=dict(type='str', required=False, default="5120"),
|
||||||
block_db_size=dict(type='str', required=False, default="-1"),
|
block_db_size=dict(type='str', required=False, default="-1"),
|
||||||
report=dict(type='bool', required=False, default=False),
|
report=dict(type='bool', required=False, default=False),
|
||||||
|
containerized=dict(type='str', required=False, default=False),
|
||||||
)
|
)
|
||||||
|
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
|
@ -525,7 +561,12 @@ def run_module():
|
||||||
action = module.params['action']
|
action = module.params['action']
|
||||||
|
|
||||||
if action == "create":
|
if action == "create":
|
||||||
create_osd(module)
|
prepare_osd(module)
|
||||||
|
activate_osd(module)
|
||||||
|
elif action == "prepare":
|
||||||
|
prepare_osd(module)
|
||||||
|
elif action == "activate":
|
||||||
|
activate_osd(module)
|
||||||
elif action == "zap":
|
elif action == "zap":
|
||||||
zap_devices(module)
|
zap_devices(module)
|
||||||
elif action == "batch":
|
elif action == "batch":
|
||||||
|
@ -533,7 +574,8 @@ def run_module():
|
||||||
elif action == "list":
|
elif action == "list":
|
||||||
_list(module)
|
_list(module)
|
||||||
|
|
||||||
module.fail_json(msg='State must either be "present" or "absent".', changed=False, rc=1)
|
module.fail_json(
|
||||||
|
msg='State must either be "present" or "absent".', changed=False, rc=1)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
|
@ -47,14 +47,16 @@
|
||||||
when:
|
when:
|
||||||
- osd_scenario == 'lvm'
|
- osd_scenario == 'lvm'
|
||||||
- lvm_volumes|length > 0
|
- lvm_volumes|length > 0
|
||||||
- not containerized_deployment
|
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||||
|
static: False
|
||||||
|
|
||||||
- name: include_tasks scenarios/lvm-batch.yml
|
- name: include_tasks scenarios/lvm-batch.yml
|
||||||
include_tasks: scenarios/lvm-batch.yml
|
include_tasks: scenarios/lvm-batch.yml
|
||||||
when:
|
when:
|
||||||
- osd_scenario == 'lvm'
|
- osd_scenario == 'lvm'
|
||||||
- devices|length > 0
|
- devices|length > 0
|
||||||
- not containerized_deployment
|
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||||
|
static: False
|
||||||
|
|
||||||
- name: include_tasks activate_osds.yml
|
- name: include_tasks activate_osds.yml
|
||||||
include_tasks: activate_osds.yml
|
include_tasks: activate_osds.yml
|
||||||
|
|
|
@ -1,4 +1,28 @@
|
||||||
---
|
---
|
||||||
|
- name: set_fact docker_exec_prepare_cmd
|
||||||
|
set_fact:
|
||||||
|
docker_exec_prepare_cmd: "docker exec ceph-volume-prepare"
|
||||||
|
when:
|
||||||
|
- containerized_deployment
|
||||||
|
|
||||||
|
- name: run a ceph-volume prepare container (sleep 3000)
|
||||||
|
command: >
|
||||||
|
docker run \
|
||||||
|
--rm \
|
||||||
|
--privileged=true \
|
||||||
|
--net=host \
|
||||||
|
-v /dev:/dev \
|
||||||
|
-d \
|
||||||
|
-v {{ ceph_conf_key_directory }}:{{ ceph_conf_key_directory }}:z \
|
||||||
|
-v /var/lib/ceph/:/var/lib/ceph/:z \
|
||||||
|
-v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket \
|
||||||
|
--name ceph-volume-prepare \
|
||||||
|
--entrypoint=sleep \
|
||||||
|
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
|
||||||
|
3000
|
||||||
|
changed_when: false
|
||||||
|
when:
|
||||||
|
- containerized_deployment
|
||||||
|
|
||||||
- name: "use ceph-volume to create {{ osd_objectstore }} osds"
|
- name: "use ceph-volume to create {{ osd_objectstore }} osds"
|
||||||
ceph_volume:
|
ceph_volume:
|
||||||
|
@ -14,6 +38,8 @@
|
||||||
wal_vg: "{{ item.wal_vg|default(omit) }}"
|
wal_vg: "{{ item.wal_vg|default(omit) }}"
|
||||||
crush_device_class: "{{ item.crush_device_class|default(omit) }}"
|
crush_device_class: "{{ item.crush_device_class|default(omit) }}"
|
||||||
dmcrypt: "{{ dmcrypt|default(omit) }}"
|
dmcrypt: "{{ dmcrypt|default(omit) }}"
|
||||||
|
containerized: "{{ docker_exec_prepare_cmd | default(False) }}"
|
||||||
|
action: "{{ 'prepare' if containerized_deployment else 'create' }}"
|
||||||
environment:
|
environment:
|
||||||
CEPH_VOLUME_DEBUG: 1
|
CEPH_VOLUME_DEBUG: 1
|
||||||
with_items: "{{ lvm_volumes }}"
|
with_items: "{{ lvm_volumes }}"
|
|
@ -107,7 +107,12 @@ expose_partitions "$1"
|
||||||
{% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%}
|
{% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%}
|
||||||
-e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
|
-e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
{% if osd_scenario == 'lvm' -%}
|
||||||
|
-v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket \
|
||||||
|
-e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \
|
||||||
|
{% else -%}
|
||||||
-e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
|
-e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
|
||||||
|
{% endif -%}
|
||||||
{{ ceph_osd_docker_extra_env }} \
|
{{ ceph_osd_docker_extra_env }} \
|
||||||
--name=ceph-osd-{{ ansible_hostname }}-${1} \
|
--name=ceph-osd-{{ ansible_hostname }}-${1} \
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
|
|
Loading…
Reference in New Issue