ceph-iscsi: support for containerize deployment

We now have the ability to deploy a containerized version of ceph-iscsi.
The result is similar to the non-containerized version, you simply have
3 containers running for the following services:

* rbd-target-api
* rbd-target-gw
* tcmu-runner

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1508144
Signed-off-by: Sébastien Han <seb@redhat.com>
pull/2740/head
Sébastien Han 2018-03-23 11:24:56 +08:00 committed by Guillaume Abrioux
parent 8363ab43d3
commit 91bf53ee93
18 changed files with 441 additions and 56 deletions

View File

@ -9,6 +9,9 @@ dummy:
# You can override vars by using host or group vars
###########
# GENERAL #
###########
# Specify the iqn for ALL gateways. This iqn is shared across the gateways, so an iscsi
# client sees the gateway group as a single storage subsystem.
#gateway_iqn: "iqn.2003-01.com.redhat.iscsi-gw:ceph-igw"
@ -40,7 +43,6 @@ dummy:
# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
#rbd_devices: {}
# client_connections defines the client ACL's to restrict client access to specific LUNs
# The settings are as follows;
# - image_list is a comma separated list of rbd images of the form <pool name>.<rbd_image_name>
@ -58,6 +60,41 @@ dummy:
#client_connections: {}
# Whether or not to generate secure certificate to iSCSI gateway nodes
#generate_crt: False
##################
# RBD-TARGET-API #
##################
# Optional settings related to the CLI/API service
#api_user: admin
#api_password: admin
#api_port: 5001
#api_secure: false
#loop_delay: .5
#trusted_ip_list: 192.168.122.1
##########
# DOCKER #
##########
# Resource limitation
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
# TCMU_RUNNER resource limitation
#ceph_tcmu_runner_docker_memory_limit: 1g
#ceph_tcmu_runner_docker_cpu_limit: 1
# RBD_TARGET_GW resource limitation
#ceph_rbd_target_gw_docker_memory_limit: 1g
#ceph_rbd_target_gw_docker_cpu_limit: 1
# RBD_TARGET_API resource limitation
#ceph_rbd_target_api_docker_memory_limit: 1g
#ceph_rbd_target_api_docker_cpu_limit: 1

View File

@ -358,3 +358,102 @@
set_fact:
_mgr_handler_called: False
listen: "restart ceph mgrs"
- name: set _tcmu_runner_handler_called before restart
set_fact:
_tcmu_runner_handler_called: True
listen: "restart ceph tcmu-runner"
- name: copy tcmu-runner restart script
template:
src: restart_tcmu_runner.sh.j2
dest: /tmp/restart_tcmu_runner.sh
owner: root
group: root
mode: 0750
listen: "restart ceph tcmu-runner"
when:
- iscsi_gw_group_name in group_names
- name: restart tcmu-runner
command: /usr/bin/env bash /tmp/restart_tcmu_runner.sh
listen: "restart ceph tcmu-runner"
when:
- iscsi_gw_group_name in group_names
- ceph_tcmu_runner_stat.get('rc') == 0
- hostvars[item]['_tcmu_runner_handler_called'] | default(False)
- ceph_tcmu_runner_stat.get('stdout_lines', [])|length != 0
with_items: "{{ groups[iscsi_gw_group_name] }}"
delegate_to: "{{ item }}"
run_once: True
- name: set _tcmu_runner_handler_called after restart
set_fact:
_tcmu_runner_handler_called: False
listen: "restart ceph tcmu-runner"
- name: set _rbd_target_gw_handler_called before restart
set_fact:
_rbd_target_gw_handler_called: True
listen: "restart ceph rbd-target-gw"
- name: copy rbd-target-gw restart script
template:
src: restart_rbd_target_gw.sh.j2
dest: /tmp/restart_rbd_target_gw.sh
owner: root
group: root
mode: 0750
listen: "restart ceph rbd-target-gw"
when:
- iscsi_gw_group_name in group_names
- name: restart rbd-target-gw
command: /usr/bin/env bash /tmp/restart_rbd_target_gw.sh
listen: "restart ceph rbd-target-gw"
when:
- iscsi_gw_group_name in group_names
- ceph_rbd_target_gw_stat.get('rc') == 0
- hostvars[item]['_rbd_target_gw_handler_called'] | default(False)
- ceph_rbd_target_gw_stat.get('stdout_lines', [])|length != 0
with_items: "{{ groups[iscsi_gw_group_name] }}"
delegate_to: "{{ item }}"
run_once: True
- name: set _rbd_target_gw_handler_called after restart
set_fact:
_rbd_target_gw_handler_called: False
listen: "restart ceph rbd-target-gw"
- name: set _rbd_target_api_handler_called before restart
set_fact:
_rbd_target_api_handler_called: True
listen: "restart ceph rbd-target-api"
- name: copy rbd-target-api restart script
template:
src: restart_rbd_target_api.sh.j2
dest: /tmp/restart_rbd_target_api.sh
owner: root
group: root
mode: 0750
listen: "restart ceph rbd-target-api"
when:
- iscsi_gw_group_name in group_names
- name: restart rbd-target-api
command: /usr/bin/env bash /tmp/restart_rbd_target_api.sh
listen: "restart ceph rbd-target-api"
when:
- iscsi_gw_group_name in group_names
- ceph_rbd_target_api_stat.get('rc') == 0
- hostvars[item]['_rbd_target_api_handler_called'] | default(False)
- ceph_rbd_target_api_stat.get('stdout_lines', [])|length != 0
with_items: "{{ groups[iscsi_gw_group_name] }}"
delegate_to: "{{ item }}"
run_once: True
- name: set _rbd_target_api_handler_called after restart
set_fact:
_rbd_target_api_handler_called: False
listen: "restart ceph rbd-target-api"

View File

@ -61,3 +61,30 @@
check_mode: no
when:
- inventory_hostname in groups.get(nfs_group_name, [])
- name: check for a tcmu-runner container
command: "docker ps -q --filter='name=tcmu-runner'"
register: ceph_tcmu_runner_stat
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: check for a rbd-target-api container
command: "docker ps -q --filter='name=rbd-target-api'"
register: ceph_rbd_target_api_stat
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: check for a rbd-target-gw container
command: "docker ps -q --filter='name=rbd-target-gw'"
register: ceph_rbd_target_gw_stat
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(iscsi_gw_group_name, [])

View File

@ -199,3 +199,30 @@
- inventory_hostname in groups.get(nfs_group_name, [])
- nfs_socket_stat.rc == 0
- nfs_socket.rc == 1
- name: check for a tcmu-runner
command: "pgrep tcmu-runner"
register: ceph_tcmu_runner_stat
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: check for a rbd-target-api
command: "pgrep rbd-target-api"
register: ceph_rbd_target_api_stat
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: check for a rbd-target-gw
command: "pgrep name=rbd-target-gw"
register: ceph_rbd_target_gw_stat
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(iscsi_gw_group_name, [])

View File

@ -0,0 +1,3 @@
#!/bin/bash
systemctl restart rbd-target-api

View File

@ -0,0 +1,3 @@
#!/bin/bash
systemctl restart rbd-target-gw

View File

@ -0,0 +1,3 @@
#!/bin/bash
systemctl restart tcmu-runner

View File

@ -1,6 +1,9 @@
---
# You can override vars by using host or group vars
###########
# GENERAL #
###########
# Specify the iqn for ALL gateways. This iqn is shared across the gateways, so an iscsi
# client sees the gateway group as a single storage subsystem.
gateway_iqn: "iqn.2003-01.com.redhat.iscsi-gw:ceph-igw"
@ -32,7 +35,6 @@ gateway_ip_list: 0.0.0.0
# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
rbd_devices: {}
# client_connections defines the client ACL's to restrict client access to specific LUNs
# The settings are as follows;
# - image_list is a comma separated list of rbd images of the form <pool name>.<rbd_image_name>
@ -50,5 +52,40 @@ rbd_devices: {}
client_connections: {}
# Whether or not to generate secure certificate to iSCSI gateway nodes
generate_crt: False
##################
# RBD-TARGET-API #
##################
# Optional settings related to the CLI/API service
api_user: admin
api_password: admin
api_port: 5001
api_secure: false
loop_delay: 1
trusted_ip_list: 192.168.122.1
##########
# DOCKER #
##########
# Resource limitation
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
# TCMU_RUNNER resource limitation
ceph_tcmu_runner_docker_memory_limit: 1g
ceph_tcmu_runner_docker_cpu_limit: 1
# RBD_TARGET_GW resource limitation
ceph_rbd_target_gw_docker_memory_limit: 1g
ceph_rbd_target_gw_docker_cpu_limit: 1
# RBD_TARGET_API resource limitation
ceph_rbd_target_api_docker_memory_limit: 1g
ceph_rbd_target_api_docker_cpu_limit: 1

View File

@ -1,6 +0,0 @@
---
- name: make sure gateway_ip_list is configured
fail:
msg: "you must set a list of IPs (comma separated) for gateway_ip_list"
when:
- gateway_ip_list == "0.0.0.0"

View File

@ -0,0 +1,46 @@
---
- name: make sure gateway_ip_list is configured
fail:
msg: "you must set a list of IPs (comma separated) for gateway_ip_list"
when:
- gateway_ip_list == "0.0.0.0"
- name: copy admin key
copy:
src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.admin.keyring"
dest: "/etc/ceph/{{ cluster }}.client.admin.keyring"
owner: "root"
group: "root"
mode: "0600"
when:
- cephx
- name: deploy gateway settings, used by the ceph_iscsi_config modules
template:
src: "{{ role_path }}/templates/iscsi-gateway.cfg.j2"
dest: /etc/ceph/iscsi-gateway.cfg
- name: set_fact docker_exec_cmd
set_fact:
docker_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: containerized_deployment
- name: check if a rbd pool exists
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool ls --format json"
changed_when: false
register: rbd_pool_exists
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: get default value for osd_pool_default_pg_num
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} config get osd_pool_default_pg_num"
changed_when: false
register: osd_pool_default_pg_num
delegate_to: "{{ groups[mon_group_name][0] }}"
when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
- name: create a rbd pool if it doesn't exist
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ (osd_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"

View File

@ -0,0 +1,27 @@
---
- name: generate systemd unit files for tcmu-runner, rbd-target-api and rbd-target-gw
become: true
template:
src: "{{ role_path }}/templates/{{ item }}.service.j2"
dest: /etc/systemd/system/{{ item }}.service
owner: "root"
group: "root"
mode: "0644"
with_items:
- tcmu-runner
- rbd-target-gw
- rbd-target-api
notify:
- restart ceph {{ item }}
- name: systemd start tcmu-runner, rbd-target-api and rbd-target-gw containers
systemd:
name: "{{ item }}"
state: started
enabled: yes
daemon_reload: yes
changed_when: false
with_items:
- tcmu-runner
- rbd-target-gw
- rbd-target-api

View File

@ -1,15 +1,11 @@
---
- name: set_fact docker_exec_cmd
set_fact:
docker_exec_cmd: "docker exec ceph-iscsi-gw-{{ ansible_hostname }}"
- name: include common.yml
include: common.yml
- name: include non-container/prerequisites.yml
include: non-container/prerequisites.yml
when:
- containerized_deployment
- name: include check_mandatory_vars.yml
include: check_mandatory_vars.yml
- name: include prerequisites.yml
include: prerequisites.yml
- not containerized_deployment
# deploy_ssl_keys used the ansible controller to create self-signed crt/key/pub files
# and transfers them to /etc/ceph directory on each controller. SSL certs are used by
@ -19,5 +15,12 @@
when:
- generate_crt|bool
- name: include configure_iscsi.yml
include: configure_iscsi.yml
- name: include non-container/configure_iscsi.yml
include: non-container/configure_iscsi.yml
when:
- not containerized_deployment
- name: include containerized.yml
include: container/containerized.yml
when:
- containerized_deployment

View File

@ -1,19 +1,4 @@
---
- name: check if a rbd pool exists
command: ceph --cluster {{ cluster }} osd pool ls --format json
register: rbd_pool_exists
- name: get default value for osd_pool_default_pg_num
command: ceph --cluster {{ cluster }} daemon mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} config get osd_pool_default_pg_num
register: osd_pool_default_pg_num
when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: create a rbd pool if it doesn't exist
command: ceph --cluster {{ cluster }} osd pool create rbd {{ (osd_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}
when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: igw_gateway (tgt) | configure iscsi target (gateway)
igw_gateway:
mode: "target"

View File

@ -70,18 +70,3 @@
name: rbd-target-gw
enabled: yes
state: started
- name: copy admin key
copy:
src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.admin.keyring"
dest: "/etc/ceph/{{ cluster }}.client.admin.keyring"
owner: "root"
group: "root"
mode: "0600"
when:
- cephx
- name: deploy gateway settings, used by the ceph_iscsi_config modules
template:
src: "{{ role_path }}/templates/iscsi-gateway.cfg.j2"
dest: /etc/ceph/iscsi-gateway.cfg

View File

@ -7,11 +7,21 @@
cluster_name = {{ cluster }}
gateway_keyring = {{ cluster }}.client.admin.keyring
# API settings.
# The API supports a number of options that allow you to tailor it to your
# local environment. If you want to run the API under https, you will need to
# create cert/key files that are compatible for each iSCSI gateway node, that is
# not locked to a specific node. SSL cert and key files *must* be called
# 'iscsi-gateway.crt' and 'iscsi-gateway.key' and placed in the '/etc/ceph/' directory
# on *each* gateway node. With the SSL files in place, you can use 'api_secure = true'
# to switch to https mode.
# To support the API, the bear minimum settings are:
api_secure = {{ api_secure }}
# Optional settings related to the CLI/API service
#api_user = admin
#api_password = admin
#api_port = 5001
#api_secure = true
#loop_delay = .5
#trusted_ip_list = 192.168.122.1
api_user = {{ api_user }}
api_password = {{ api_password }}
api_port = {{ api_port }}
loop_delay = {{ loop_delay }}
trusted_ip_list = {{ trusted_ip_list }}

View File

@ -0,0 +1,33 @@
[Unit]
Description=RBD Target API Service
After=docker.service
[Service]
EnvironmentFile=-/etc/environment
ExecStartPre=-/usr/bin/docker stop rbd-target-api
ExecStartPre=-/usr/bin/docker rm rbd-target-api
ExecStart=/usr/bin/docker run --rm \
--memory={{ ceph_rbd_target_api_docker_memory_limit }} \
{% if ceph_docker_version.split('.')[0] | version_compare('13', '>=') -%}
--cpus={{ ceph_rbd_target_api_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_rbd_target_api_docker_cpu_limit * 100000 }} \
{% endif -%}
-v /etc/localtime:/etc/localtime:ro \
--privileged \
--cap-add=ALL \
-v /dev:/dev \
-v /lib/modules:/lib/modules \
-v /etc/ceph:/etc/ceph \
-e CLUSTER={{ cluster }} \
-e CEPH_DAEMON=RBD_TARGET_API \
--name=rbd-target-api \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
ExecStopPost=-/usr/bin/docker stop rbd-target-api
Restart=always
RestartSec=10s
TimeoutStartSec=120
TimeoutStopSec=15
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,33 @@
[Unit]
Description=RBD Target Gateway Service
After=docker.service
[Service]
EnvironmentFile=-/etc/environment
ExecStartPre=-/usr/bin/docker stop rbd-target-gw
ExecStartPre=-/usr/bin/docker rm rbd-target-gw
ExecStart=/usr/bin/docker run --rm \
--memory={{ ceph_rbd_target_gw_docker_memory_limit }} \
{% if ceph_docker_version.split('.')[0] | version_compare('13', '>=') -%}
--cpus={{ ceph_rbd_target_gw_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_rbd_target_gw_docker_cpu_limit * 100000 }} \
{% endif -%}
-v /etc/localtime:/etc/localtime:ro \
--privileged \
--cap-add=ALL \
-v /dev:/dev \
-v /lib/modules:/lib/modules \
-v /etc/ceph:/etc/ceph \
-e CLUSTER={{ cluster }} \
-e CEPH_DAEMON=RBD_TARGET_GW \
--name=rbd-target-gw \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
ExecStopPost=-/usr/bin/docker stop rbd-target-gw
Restart=always
RestartSec=10s
TimeoutStartSec=120
TimeoutStopSec=15
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,33 @@
[Unit]
Description=TCMU Runner
After=docker.service
[Service]
EnvironmentFile=-/etc/environment
ExecStartPre=-/usr/bin/docker stop tcmu-runner
ExecStartPre=-/usr/bin/docker rm tcmu-runner
ExecStart=/usr/bin/docker run --rm \
--memory={{ ceph_tcmu_runner_docker_memory_limit }} \
{% if ceph_docker_version.split('.')[0] | version_compare('13', '>=') -%}
--cpus={{ ceph_tcmu_runner_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_tcmu_runner_docker_cpu_limit * 100000 }} \
{% endif -%}
-v /etc/localtime:/etc/localtime:ro \
--privileged \
--cap-add=ALL \
-v /dev:/dev \
-v /lib/modules:/lib/modules \
-v /etc/ceph:/etc/ceph \
-e CLUSTER={{ cluster }} \
-e CEPH_DAEMON=TCMU_RUNNER \
--name=tcmu-runner \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
ExecStopPost=-/usr/bin/docker stop tcmu-runner
Restart=always
RestartSec=10s
TimeoutStartSec=120
TimeoutStopSec=15
[Install]
WantedBy=multi-user.target