mirror of https://github.com/ceph/ceph-ansible.git
ceph_volume: try to get ride of the dummy container
If we run on a containerized deployment we pass an env variable which contains the container image. Signed-off-by: Sébastien Han <seb@redhat.com>pull/3220/head
parent
aa2c1b27e3
commit
3ddcc9af16
|
@ -1,8 +1,8 @@
|
||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
import datetime
|
import datetime
|
||||||
import json
|
|
||||||
import copy
|
import copy
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
|
||||||
ANSIBLE_METADATA = {
|
ANSIBLE_METADATA = {
|
||||||
'metadata_version': '1.0',
|
'metadata_version': '1.0',
|
||||||
|
@ -158,6 +158,20 @@ EXAMPLES = '''
|
||||||
from ansible.module_utils.basic import AnsibleModule # noqa 4502
|
from ansible.module_utils.basic import AnsibleModule # noqa 4502
|
||||||
|
|
||||||
|
|
||||||
|
def container_exec(binary, container_image):
|
||||||
|
'''
|
||||||
|
Build the CLI to run a command inside a container
|
||||||
|
'''
|
||||||
|
|
||||||
|
command_exec = ["docker", "run", "--rm", "--privileged", "--net=host",
|
||||||
|
"-v", "/dev:/dev", "-v", "/etc/ceph:/etc/ceph:z",
|
||||||
|
"-v", "/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket",
|
||||||
|
"-v", "/var/lib/ceph/:/var/lib/ceph/:z",
|
||||||
|
os.path.join("--entrypoint=" + binary),
|
||||||
|
container_image]
|
||||||
|
return command_exec
|
||||||
|
|
||||||
|
|
||||||
def get_data(data, data_vg):
|
def get_data(data, data_vg):
|
||||||
if data_vg:
|
if data_vg:
|
||||||
data = "{0}/{1}".format(data_vg, data)
|
data = "{0}/{1}".format(data_vg, data)
|
||||||
|
@ -336,20 +350,26 @@ def batch(module):
|
||||||
module.exit_json(**result)
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
|
||||||
def ceph_volume_cmd(subcommand, containerized, cluster=None):
|
def ceph_volume_cmd(subcommand, container_image, cluster=None):
|
||||||
cmd = ['ceph-volume']
|
|
||||||
|
if container_image:
|
||||||
|
binary = "ceph-volume"
|
||||||
|
cmd = container_exec(
|
||||||
|
binary, container_image)
|
||||||
|
else:
|
||||||
|
binary = ["ceph-volume"]
|
||||||
|
cmd = binary
|
||||||
|
|
||||||
if cluster:
|
if cluster:
|
||||||
cmd.extend(["--cluster", cluster])
|
cmd.extend(["--cluster", cluster])
|
||||||
|
|
||||||
cmd.append('lvm')
|
cmd.append('lvm')
|
||||||
cmd.append(subcommand)
|
cmd.append(subcommand)
|
||||||
|
|
||||||
if containerized:
|
|
||||||
cmd = containerized.split() + cmd
|
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def activate_osd(module, containerized=None):
|
def activate_osd(module, container_image=None):
|
||||||
subcommand = "activate"
|
subcommand = "activate"
|
||||||
cmd = ceph_volume_cmd(subcommand)
|
cmd = ceph_volume_cmd(subcommand)
|
||||||
cmd.append("--all")
|
cmd.append("--all")
|
||||||
|
@ -370,10 +390,14 @@ def prepare_osd(module):
|
||||||
wal_vg = module.params.get('wal_vg', None)
|
wal_vg = module.params.get('wal_vg', None)
|
||||||
crush_device_class = module.params.get('crush_device_class', None)
|
crush_device_class = module.params.get('crush_device_class', None)
|
||||||
dmcrypt = module.params['dmcrypt']
|
dmcrypt = module.params['dmcrypt']
|
||||||
containerized = module.params.get('containerized', None)
|
|
||||||
subcommand = "prepare"
|
subcommand = "prepare"
|
||||||
|
|
||||||
cmd = ceph_volume_cmd(subcommand, containerized, cluster)
|
if "CEPH_CONTAINER_IMAGE" in os.environ:
|
||||||
|
container_image = os.getenv("CEPH_CONTAINER_IMAGE")
|
||||||
|
else:
|
||||||
|
container_image = None
|
||||||
|
|
||||||
|
cmd = ceph_volume_cmd(subcommand, container_image, cluster)
|
||||||
cmd.extend(["--%s" % objectstore])
|
cmd.extend(["--%s" % objectstore])
|
||||||
cmd.append("--data")
|
cmd.append("--data")
|
||||||
|
|
||||||
|
@ -417,9 +441,14 @@ def prepare_osd(module):
|
||||||
# support for 'lvm list' and raw devices
|
# support for 'lvm list' and raw devices
|
||||||
# was added with https://github.com/ceph/ceph/pull/20620 but
|
# was added with https://github.com/ceph/ceph/pull/20620 but
|
||||||
# has not made it to a luminous release as of 12.2.4
|
# has not made it to a luminous release as of 12.2.4
|
||||||
ceph_volume_list_cmd = ["ceph-volume", "lvm", "list", data]
|
ceph_volume_list_cmd_args = ["lvm", "list", data]
|
||||||
if containerized:
|
if container_image:
|
||||||
ceph_volume_list_cmd = containerized.split() + ceph_volume_list_cmd
|
binary = "ceph-volume"
|
||||||
|
ceph_volume_list_cmd = container_exec(
|
||||||
|
binary, container_image) + ceph_volume_list_cmd_args
|
||||||
|
else:
|
||||||
|
binary = ["ceph-volume"]
|
||||||
|
ceph_volume_list_cmd = binary + ceph_volume_list_cmd_args
|
||||||
|
|
||||||
rc, out, err = module.run_command(ceph_volume_list_cmd, encoding=None)
|
rc, out, err = module.run_command(ceph_volume_list_cmd, encoding=None)
|
||||||
if rc == 0:
|
if rc == 0:
|
||||||
|
|
|
@ -38,10 +38,27 @@
|
||||||
notify:
|
notify:
|
||||||
- restart ceph osds
|
- restart ceph osds
|
||||||
|
|
||||||
|
- name: collect osd ids
|
||||||
|
shell: >
|
||||||
|
docker run --rm
|
||||||
|
--privileged=true
|
||||||
|
-v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket
|
||||||
|
-v /etc/ceph:/etc/ceph:z
|
||||||
|
-v /dev:/dev
|
||||||
|
--entrypoint=ceph-volume
|
||||||
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
|
lvm list --format json | python -c 'import sys, json; print("\n".join(json.load(sys.stdin).keys()))'
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_osd_ids
|
||||||
|
when:
|
||||||
|
- containerized_deployment
|
||||||
|
- osd_scenario == 'lvm'
|
||||||
|
|
||||||
- name: systemd start osd container
|
- name: systemd start osd container
|
||||||
systemd:
|
systemd:
|
||||||
name: ceph-osd@{{ item | regex_replace('/dev/', '') }}
|
name: ceph-osd@{{ item | regex_replace('/dev/', '') if osd_scenario != 'lvm' else item }}
|
||||||
state: started
|
state: started
|
||||||
enabled: yes
|
enabled: yes
|
||||||
daemon_reload: yes
|
daemon_reload: yes
|
||||||
with_items: "{{ devices }}"
|
with_items: "{{ devices if osd_scenario != 'lvm' else ceph_osd_ids.stdout_lines }}"
|
|
@ -1,29 +1,4 @@
|
||||||
---
|
---
|
||||||
- name: set_fact docker_exec_prepare_cmd
|
|
||||||
set_fact:
|
|
||||||
docker_exec_prepare_cmd: "docker exec ceph-volume-prepare"
|
|
||||||
when:
|
|
||||||
- containerized_deployment
|
|
||||||
|
|
||||||
- name: run a ceph-volume prepare container (sleep 3000)
|
|
||||||
command: >
|
|
||||||
docker run \
|
|
||||||
--rm \
|
|
||||||
--privileged=true \
|
|
||||||
--net=host \
|
|
||||||
-v /dev:/dev \
|
|
||||||
-d \
|
|
||||||
-v {{ ceph_conf_key_directory }}:{{ ceph_conf_key_directory }}:z \
|
|
||||||
-v /var/lib/ceph/:/var/lib/ceph/:z \
|
|
||||||
-v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket \
|
|
||||||
--name ceph-volume-prepare \
|
|
||||||
--entrypoint=sleep \
|
|
||||||
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
|
|
||||||
3000
|
|
||||||
changed_when: false
|
|
||||||
when:
|
|
||||||
- containerized_deployment
|
|
||||||
|
|
||||||
- name: "use ceph-volume to create {{ osd_objectstore }} osds"
|
- name: "use ceph-volume to create {{ osd_objectstore }} osds"
|
||||||
ceph_volume:
|
ceph_volume:
|
||||||
cluster: "{{ cluster }}"
|
cluster: "{{ cluster }}"
|
||||||
|
@ -38,8 +13,8 @@
|
||||||
wal_vg: "{{ item.wal_vg|default(omit) }}"
|
wal_vg: "{{ item.wal_vg|default(omit) }}"
|
||||||
crush_device_class: "{{ item.crush_device_class|default(omit) }}"
|
crush_device_class: "{{ item.crush_device_class|default(omit) }}"
|
||||||
dmcrypt: "{{ dmcrypt|default(omit) }}"
|
dmcrypt: "{{ dmcrypt|default(omit) }}"
|
||||||
containerized: "{{ docker_exec_prepare_cmd | default(False) }}"
|
|
||||||
action: "{{ 'prepare' if containerized_deployment else 'create' }}"
|
action: "{{ 'prepare' if containerized_deployment else 'create' }}"
|
||||||
environment:
|
environment:
|
||||||
CEPH_VOLUME_DEBUG: 1
|
CEPH_VOLUME_DEBUG: 1
|
||||||
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
|
||||||
with_items: "{{ lvm_volumes }}"
|
with_items: "{{ lvm_volumes }}"
|
|
@ -1,8 +1,13 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# {{ ansible_managed }}
|
# {{ ansible_managed }}
|
||||||
|
|
||||||
|
|
||||||
|
#############
|
||||||
|
# VARIABLES #
|
||||||
|
#############
|
||||||
DOCKER_ENV=""
|
DOCKER_ENV=""
|
||||||
|
|
||||||
|
|
||||||
#############
|
#############
|
||||||
# FUNCTIONS #
|
# FUNCTIONS #
|
||||||
#############
|
#############
|
||||||
|
@ -50,6 +55,16 @@ function expose_partitions {
|
||||||
|
|
||||||
expose_partitions "$1"
|
expose_partitions "$1"
|
||||||
|
|
||||||
|
{% if osd_scenario == 'lvm' -%}
|
||||||
|
function find_device_from_id {
|
||||||
|
OSD_ID="$1"
|
||||||
|
LV=$(docker run --privileged=true -v /dev:/dev -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket -v /etc/ceph:/etc/ceph:z --entrypoint=ceph-volume {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} lvm list --format json | python -c "import sys, json; print(json.load(sys.stdin)[\"$OSD_ID\"][0][\"path\"])")
|
||||||
|
OSD_DEVICE=$(lvdisplay -m $LV | awk '/Physical volume/ {print $3}')
|
||||||
|
}
|
||||||
|
|
||||||
|
find_device_from_id $@
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
|
||||||
########
|
########
|
||||||
# MAIN #
|
# MAIN #
|
||||||
|
@ -103,16 +118,18 @@ expose_partitions "$1"
|
||||||
-e OSD_DMCRYPT=1 \
|
-e OSD_DMCRYPT=1 \
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
-e CLUSTER={{ cluster }} \
|
-e CLUSTER={{ cluster }} \
|
||||||
-e OSD_DEVICE=/dev/${1} \
|
|
||||||
{% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%}
|
{% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%}
|
||||||
-e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
|
-e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
{% if osd_scenario == 'lvm' -%}
|
{% if osd_scenario == 'lvm' -%}
|
||||||
-v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket \
|
-v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket \
|
||||||
-e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \
|
-e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \
|
||||||
|
-e OSD_DEVICE="$OSD_DEVICE" \
|
||||||
|
--name=ceph-osd-"$OSD_ID" \
|
||||||
{% else -%}
|
{% else -%}
|
||||||
-e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
|
-e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
|
||||||
|
-e OSD_DEVICE=/dev/"${1}" \
|
||||||
|
--name=ceph-osd-{{ ansible_hostname }}-"${1}" \
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
{{ ceph_osd_docker_extra_env }} \
|
{{ ceph_osd_docker_extra_env }} \
|
||||||
--name=ceph-osd-{{ ansible_hostname }}-${1} \
|
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
|
|
Loading…
Reference in New Issue