mirror of https://github.com/ceph/ceph-ansible.git
infra: introduce docker to podman playbook
This isn't backported from master because there are too many changes between stable-3.2 and other newer branches. NOTE: This playbook *doesn't* add podman support in stable-3.2 at all. This is a tripleO dedicated playbook which is intended to be run early during FFU workflow in order to prepare the OS upgrade. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1853457 Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>pull/5524/head
parent
6daa2c9d22
commit
90f3f61548
|
@ -555,6 +555,7 @@ dummy:
|
||||||
##########
|
##########
|
||||||
# DOCKER #
|
# DOCKER #
|
||||||
##########
|
##########
|
||||||
|
#container_binary: docker
|
||||||
#docker_exec_cmd:
|
#docker_exec_cmd:
|
||||||
#docker: false
|
#docker: false
|
||||||
#ceph_docker_image: "ceph/daemon"
|
#ceph_docker_image: "ceph/daemon"
|
||||||
|
|
|
@ -555,6 +555,7 @@ ceph_rhcs_version: 3
|
||||||
##########
|
##########
|
||||||
# DOCKER #
|
# DOCKER #
|
||||||
##########
|
##########
|
||||||
|
#container_binary: docker
|
||||||
#docker_exec_cmd:
|
#docker_exec_cmd:
|
||||||
#docker: false
|
#docker: false
|
||||||
ceph_docker_image: "rhceph/rhceph-3-rhel7"
|
ceph_docker_image: "rhceph/rhceph-3-rhel7"
|
||||||
|
|
|
@ -0,0 +1,127 @@
|
||||||
|
# This playbook *doesn't* add podman support in stable-3.2 at all.
|
||||||
|
# This is a tripleO dedicated playbook which is intended to be run
|
||||||
|
# early during FFU workflow in order to prepare the OS upgrade.
|
||||||
|
|
||||||
|
- hosts:
|
||||||
|
- mons
|
||||||
|
- osds
|
||||||
|
- mdss
|
||||||
|
- rgws
|
||||||
|
- nfss
|
||||||
|
- rbdmirrors
|
||||||
|
- clients
|
||||||
|
- iscsigws
|
||||||
|
- iscsi-gws # for backward compatibility only!
|
||||||
|
- mgrs
|
||||||
|
|
||||||
|
gather_facts: false
|
||||||
|
become: True
|
||||||
|
any_errors_fatal: true
|
||||||
|
|
||||||
|
vars:
|
||||||
|
delegate_facts_host: True
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- ceph-defaults
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: gather facts
|
||||||
|
setup:
|
||||||
|
gather_subset:
|
||||||
|
- 'all'
|
||||||
|
- '!facter'
|
||||||
|
- '!ohai'
|
||||||
|
when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
|
||||||
|
|
||||||
|
- name: gather and delegate facts
|
||||||
|
setup:
|
||||||
|
gather_subset:
|
||||||
|
- 'all'
|
||||||
|
- '!facter'
|
||||||
|
- '!ohai'
|
||||||
|
delegate_to: "{{ item }}"
|
||||||
|
delegate_facts: True
|
||||||
|
with_items: "{{ groups['all'] | difference(groups.get(client_group_name | default('clients'), [])) }}"
|
||||||
|
run_once: true
|
||||||
|
when: delegate_facts_host | bool
|
||||||
|
|
||||||
|
- hosts:
|
||||||
|
- "{{ mon_group_name | default('mons') }}"
|
||||||
|
- "{{ osd_group_name | default('osds') }}"
|
||||||
|
- "{{ mds_group_name | default('mdss') }}"
|
||||||
|
- "{{ rgw_group_name | default('rgws') }}"
|
||||||
|
- "{{ nfs_group_name | default('nfss') }}"
|
||||||
|
- "{{ mgr_group_name | default('mgrs') }}"
|
||||||
|
- "{{ iscsi_gw_group_name | default('iscsigws') }}"
|
||||||
|
- "{{ rbdmirror_group_name | default('rbdmirrors') }}"
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- ceph-defaults
|
||||||
|
post_tasks:
|
||||||
|
- import_role:
|
||||||
|
name: ceph-facts
|
||||||
|
- import_role:
|
||||||
|
name: ceph-handler
|
||||||
|
- import_role:
|
||||||
|
name: ceph-docker-common
|
||||||
|
tasks_from: ceph_docker_version.yml
|
||||||
|
|
||||||
|
- name: set_fact docker2podman and container_binary
|
||||||
|
set_fact:
|
||||||
|
docker2podman: True
|
||||||
|
container_binary: podman
|
||||||
|
|
||||||
|
- import_role:
|
||||||
|
name: ceph-mon
|
||||||
|
tasks_from: docker2podman.yml
|
||||||
|
when: inventory_hostname in groups.get(mon_group_name, [])
|
||||||
|
|
||||||
|
- import_role:
|
||||||
|
name: ceph-iscsi-gw
|
||||||
|
tasks_from: docker2podman.yml
|
||||||
|
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
|
||||||
|
|
||||||
|
- import_role:
|
||||||
|
name: ceph-mds
|
||||||
|
tasks_from: systemd.yml
|
||||||
|
when: inventory_hostname in groups.get(mds_group_name, [])
|
||||||
|
|
||||||
|
- import_role:
|
||||||
|
name: ceph-mgr
|
||||||
|
tasks_from: docker2podman.yml
|
||||||
|
when: inventory_hostname in groups.get(mgr_group_name, [])
|
||||||
|
|
||||||
|
- import_role:
|
||||||
|
name: ceph-nfs
|
||||||
|
tasks_from: systemd.yml
|
||||||
|
when: inventory_hostname in groups.get(nfs_group_name, [])
|
||||||
|
|
||||||
|
- import_role:
|
||||||
|
name: ceph-osd
|
||||||
|
tasks_from: systemd.yml
|
||||||
|
when: inventory_hostname in groups.get(osd_group_name, [])
|
||||||
|
|
||||||
|
- import_role:
|
||||||
|
name: ceph-rbd-mirror
|
||||||
|
tasks_from: docker2podman.yml
|
||||||
|
when: inventory_hostname in groups.get(rbdmirror_group_name, [])
|
||||||
|
|
||||||
|
- import_role:
|
||||||
|
name: ceph-rgw
|
||||||
|
tasks_from: docker2podman.yml
|
||||||
|
when: inventory_hostname in groups.get(rgw_group_name, [])
|
||||||
|
|
||||||
|
# This is needed, otherwise containers won't come back after the reboot
|
||||||
|
# because this file is added later by the call of rolling_update playbook.
|
||||||
|
- name: add /etc/tmpfiles.d/ceph-common.conf
|
||||||
|
copy:
|
||||||
|
content: "d /run/ceph 0770 root root -"
|
||||||
|
dest: /etc/tmpfiles.d/ceph-common.conf
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
|
||||||
|
- name: reload systemd daemon
|
||||||
|
systemd:
|
||||||
|
daemon_reload: yes
|
|
@ -547,6 +547,7 @@ ceph_tcmalloc_max_total_thread_cache: 0
|
||||||
##########
|
##########
|
||||||
# DOCKER #
|
# DOCKER #
|
||||||
##########
|
##########
|
||||||
|
container_binary: docker
|
||||||
docker_exec_cmd:
|
docker_exec_cmd:
|
||||||
docker: false
|
docker: false
|
||||||
ceph_docker_image: "ceph/daemon"
|
ceph_docker_image: "ceph/daemon"
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
---
|
||||||
|
- name: get docker version
|
||||||
|
command: docker --version
|
||||||
|
changed_when: false
|
||||||
|
check_mode: no
|
||||||
|
register: ceph_docker_version
|
||||||
|
|
||||||
|
- name: set_fact ceph_docker_version ceph_docker_version.stdout.split
|
||||||
|
set_fact:
|
||||||
|
ceph_docker_version: "{{ ceph_docker_version.stdout.split(' ')[2] }}"
|
|
@ -23,15 +23,8 @@
|
||||||
when:
|
when:
|
||||||
- mon_use_fqdn
|
- mon_use_fqdn
|
||||||
|
|
||||||
- name: get docker version
|
- name: include ceph_docker_version.yml
|
||||||
command: docker --version
|
include_tasks: ceph_docker_version.yml
|
||||||
changed_when: false
|
|
||||||
check_mode: no
|
|
||||||
register: ceph_docker_version
|
|
||||||
|
|
||||||
- name: set_fact ceph_docker_version ceph_docker_version.stdout.split
|
|
||||||
set_fact:
|
|
||||||
ceph_docker_version: "{{ ceph_docker_version.stdout.split(' ')[2] }}"
|
|
||||||
|
|
||||||
# Only include 'checks.yml' when :
|
# Only include 'checks.yml' when :
|
||||||
# we are deploying containers without kv AND host is either a mon OR a nfs OR an osd,
|
# we are deploying containers without kv AND host is either a mon OR a nfs OR an osd,
|
||||||
|
|
|
@ -1,463 +1,467 @@
|
||||||
---
|
---
|
||||||
- name: update apt cache
|
- name: handlers
|
||||||
apt:
|
|
||||||
update-cache: yes
|
|
||||||
when:
|
when:
|
||||||
- ansible_os_family == 'Debian'
|
- not docker2podman | default(False) | bool
|
||||||
|
block:
|
||||||
|
- name: update apt cache
|
||||||
|
apt:
|
||||||
|
update-cache: yes
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
|
||||||
- name: unset noup flag
|
- name: unset noup flag
|
||||||
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd unset noup"
|
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd unset noup"
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
changed_when: False
|
changed_when: False
|
||||||
|
|
||||||
# We only want to restart on hosts that have called the handler.
|
# We only want to restart on hosts that have called the handler.
|
||||||
# This var is set when he handler is called, and unset after the
|
# This var is set when he handler is called, and unset after the
|
||||||
# restart to ensure only the correct hosts are restarted.
|
# restart to ensure only the correct hosts are restarted.
|
||||||
- name: set _mon_handler_called before restart
|
- name: set _mon_handler_called before restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_mon_handler_called: True
|
_mon_handler_called: True
|
||||||
listen: "restart ceph mons"
|
listen: "restart ceph mons"
|
||||||
|
|
||||||
- name: copy mon restart script
|
- name: copy mon restart script
|
||||||
template:
|
template:
|
||||||
src: restart_mon_daemon.sh.j2
|
src: restart_mon_daemon.sh.j2
|
||||||
dest: /tmp/restart_mon_daemon.sh
|
dest: /tmp/restart_mon_daemon.sh
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0750
|
mode: 0750
|
||||||
listen: "restart ceph mons"
|
listen: "restart ceph mons"
|
||||||
when:
|
when:
|
||||||
- mon_group_name in group_names
|
- mon_group_name in group_names
|
||||||
|
|
||||||
- name: restart ceph mon daemon(s) - non container
|
- name: restart ceph mon daemon(s) - non container
|
||||||
command: /usr/bin/env bash /tmp/restart_mon_daemon.sh
|
command: /usr/bin/env bash /tmp/restart_mon_daemon.sh
|
||||||
listen: "restart ceph mons"
|
listen: "restart ceph mons"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mon_group_name in group_names
|
- mon_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment
|
||||||
- hostvars[item]['_mon_handler_called'] | default(False)
|
- hostvars[item]['_mon_handler_called'] | default(False)
|
||||||
- mon_socket_stat.rc == 0
|
- mon_socket_stat.rc == 0
|
||||||
with_items: "{{ groups[mon_group_name] }}"
|
with_items: "{{ groups[mon_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: restart ceph mon daemon(s) - container
|
- name: restart ceph mon daemon(s) - container
|
||||||
command: /usr/bin/env bash /tmp/restart_mon_daemon.sh
|
command: /usr/bin/env bash /tmp/restart_mon_daemon.sh
|
||||||
listen: "restart ceph mons"
|
listen: "restart ceph mons"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mon_group_name in group_names
|
- mon_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment
|
||||||
- ceph_mon_container_stat.get('rc') == 0
|
- ceph_mon_container_stat.get('rc') == 0
|
||||||
- hostvars[item]['_mon_handler_called'] | default(False)
|
- hostvars[item]['_mon_handler_called'] | default(False)
|
||||||
- ceph_mon_container_stat.get('stdout_lines', [])|length != 0
|
- ceph_mon_container_stat.get('stdout_lines', [])|length != 0
|
||||||
with_items: "{{ groups[mon_group_name] }}"
|
with_items: "{{ groups[mon_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: set _mon_handler_called after restart
|
- name: set _mon_handler_called after restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_mon_handler_called: False
|
_mon_handler_called: False
|
||||||
listen: "restart ceph mons"
|
listen: "restart ceph mons"
|
||||||
|
|
||||||
- name: set _osd_handler_called before restart
|
- name: set _osd_handler_called before restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_osd_handler_called: True
|
_osd_handler_called: True
|
||||||
listen: "restart ceph osds"
|
listen: "restart ceph osds"
|
||||||
|
|
||||||
# This does not just restart OSDs but everything else too. Unfortunately
|
# This does not just restart OSDs but everything else too. Unfortunately
|
||||||
# at this time the ansible role does not have an OSD id list to use
|
# at this time the ansible role does not have an OSD id list to use
|
||||||
# for restarting them specifically.
|
# for restarting them specifically.
|
||||||
# This does not need to run during a rolling update as the playbook will
|
# This does not need to run during a rolling update as the playbook will
|
||||||
# restart all OSDs using the tasks "start ceph osd" or
|
# restart all OSDs using the tasks "start ceph osd" or
|
||||||
# "restart containerized ceph osd"
|
# "restart containerized ceph osd"
|
||||||
- name: copy osd restart script
|
- name: copy osd restart script
|
||||||
template:
|
template:
|
||||||
src: restart_osd_daemon.sh.j2
|
src: restart_osd_daemon.sh.j2
|
||||||
dest: /tmp/restart_osd_daemon.sh
|
dest: /tmp/restart_osd_daemon.sh
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0750
|
mode: 0750
|
||||||
listen: "restart ceph osds"
|
listen: "restart ceph osds"
|
||||||
when:
|
when:
|
||||||
- osd_group_name in group_names
|
- osd_group_name in group_names
|
||||||
- not rolling_update
|
- not rolling_update
|
||||||
|
|
||||||
- name: restart ceph osds daemon(s) - non container
|
- name: restart ceph osds daemon(s) - non container
|
||||||
command: /usr/bin/env bash /tmp/restart_osd_daemon.sh
|
command: /usr/bin/env bash /tmp/restart_osd_daemon.sh
|
||||||
listen: "restart ceph osds"
|
listen: "restart ceph osds"
|
||||||
when:
|
when:
|
||||||
- osd_group_name in group_names
|
- osd_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment
|
||||||
- not rolling_update
|
- not rolling_update
|
||||||
# We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
|
||||||
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
||||||
- osd_socket_stat.rc == 0
|
- osd_socket_stat.rc == 0
|
||||||
- ceph_current_status.fsid is defined
|
- ceph_current_status.fsid is defined
|
||||||
- handler_health_osd_check
|
- handler_health_osd_check
|
||||||
- hostvars[item]['_osd_handler_called'] | default(False)
|
- hostvars[item]['_osd_handler_called'] | default(False)
|
||||||
with_items: "{{ groups[osd_group_name] | intersect(ansible_play_batch) }}"
|
with_items: "{{ groups[osd_group_name] | intersect(ansible_play_batch) }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: restart ceph osds daemon(s) - container
|
- name: restart ceph osds daemon(s) - container
|
||||||
command: /usr/bin/env bash /tmp/restart_osd_daemon.sh
|
command: /usr/bin/env bash /tmp/restart_osd_daemon.sh
|
||||||
listen: "restart ceph osds"
|
listen: "restart ceph osds"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket_osd_container_stat.results[n].rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket_osd_container_stat.results[n].rc == 0`)
|
||||||
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
||||||
- osd_group_name in group_names
|
- osd_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment
|
||||||
- not rolling_update
|
- not rolling_update
|
||||||
- ceph_osd_container_stat.get('rc') == 0
|
- ceph_osd_container_stat.get('rc') == 0
|
||||||
- ceph_osd_container_stat.get('stdout_lines', [])|length != 0
|
- ceph_osd_container_stat.get('stdout_lines', [])|length != 0
|
||||||
- handler_health_osd_check
|
- handler_health_osd_check
|
||||||
- hostvars[item]['_osd_handler_called'] | default(False)
|
- hostvars[item]['_osd_handler_called'] | default(False)
|
||||||
with_items: "{{ groups[osd_group_name] | intersect(ansible_play_batch) }}"
|
with_items: "{{ groups[osd_group_name] | intersect(ansible_play_batch) }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: set _osd_handler_called after restart
|
- name: set _osd_handler_called after restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_osd_handler_called: False
|
_osd_handler_called: False
|
||||||
listen: "restart ceph osds"
|
listen: "restart ceph osds"
|
||||||
|
|
||||||
- name: set _mds_handler_called before restart
|
- name: set _mds_handler_called before restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_mds_handler_called: True
|
_mds_handler_called: True
|
||||||
listen: "restart ceph mdss"
|
listen: "restart ceph mdss"
|
||||||
|
|
||||||
- name: copy mds restart script
|
- name: copy mds restart script
|
||||||
template:
|
template:
|
||||||
src: restart_mds_daemon.sh.j2
|
src: restart_mds_daemon.sh.j2
|
||||||
dest: /tmp/restart_mds_daemon.sh
|
dest: /tmp/restart_mds_daemon.sh
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0750
|
mode: 0750
|
||||||
listen: "restart ceph mdss"
|
listen: "restart ceph mdss"
|
||||||
when:
|
when:
|
||||||
- mds_group_name in group_names
|
- mds_group_name in group_names
|
||||||
|
|
||||||
- name: restart ceph mds daemon(s) - non container
|
- name: restart ceph mds daemon(s) - non container
|
||||||
command: /usr/bin/env bash /tmp/restart_mds_daemon.sh
|
command: /usr/bin/env bash /tmp/restart_mds_daemon.sh
|
||||||
listen: "restart ceph mdss"
|
listen: "restart ceph mdss"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mds_group_name in group_names
|
- mds_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment
|
||||||
- hostvars[item]['_mds_handler_called'] | default(False)
|
- hostvars[item]['_mds_handler_called'] | default(False)
|
||||||
- mds_socket_stat.rc == 0
|
- mds_socket_stat.rc == 0
|
||||||
with_items: "{{ groups[mds_group_name] }}"
|
with_items: "{{ groups[mds_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: restart ceph mds daemon(s) - container
|
- name: restart ceph mds daemon(s) - container
|
||||||
command: /usr/bin/env bash /tmp/restart_mds_daemon.sh
|
command: /usr/bin/env bash /tmp/restart_mds_daemon.sh
|
||||||
listen: "restart ceph mdss"
|
listen: "restart ceph mdss"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mds_group_name in group_names
|
- mds_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment
|
||||||
- ceph_mds_container_stat.get('rc') == 0
|
- ceph_mds_container_stat.get('rc') == 0
|
||||||
- hostvars[item]['_mds_handler_called'] | default(False)
|
- hostvars[item]['_mds_handler_called'] | default(False)
|
||||||
- ceph_mds_container_stat.get('stdout_lines', [])|length != 0
|
- ceph_mds_container_stat.get('stdout_lines', [])|length != 0
|
||||||
with_items: "{{ groups[mds_group_name] }}"
|
with_items: "{{ groups[mds_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: set _mds_handler_called after restart
|
- name: set _mds_handler_called after restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_mds_handler_called: False
|
_mds_handler_called: False
|
||||||
listen: "restart ceph mdss"
|
listen: "restart ceph mdss"
|
||||||
|
|
||||||
- name: set _rgw_handler_called before restart
|
- name: set _rgw_handler_called before restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_rgw_handler_called: True
|
_rgw_handler_called: True
|
||||||
listen: "restart ceph rgws"
|
listen: "restart ceph rgws"
|
||||||
|
|
||||||
- name: copy rgw restart script
|
- name: copy rgw restart script
|
||||||
template:
|
template:
|
||||||
src: restart_rgw_daemon.sh.j2
|
src: restart_rgw_daemon.sh.j2
|
||||||
dest: /tmp/restart_rgw_daemon.sh
|
dest: /tmp/restart_rgw_daemon.sh
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0750
|
mode: 0750
|
||||||
listen: "restart ceph rgws"
|
listen: "restart ceph rgws"
|
||||||
when:
|
when:
|
||||||
- rgw_group_name in group_names
|
- rgw_group_name in group_names
|
||||||
|
|
||||||
- name: restart ceph rgw daemon(s) - non container
|
- name: restart ceph rgw daemon(s) - non container
|
||||||
command: /usr/bin/env bash /tmp/restart_rgw_daemon.sh
|
command: /usr/bin/env bash /tmp/restart_rgw_daemon.sh
|
||||||
listen: "restart ceph rgws"
|
listen: "restart ceph rgws"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- rgw_group_name in group_names
|
- rgw_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment
|
||||||
- hostvars[item]['_rgw_handler_called'] | default(False)
|
- hostvars[item]['_rgw_handler_called'] | default(False)
|
||||||
- rgw_socket_stat.rc == 0
|
- rgw_socket_stat.rc == 0
|
||||||
with_items: "{{ groups[rgw_group_name] }}"
|
with_items: "{{ groups[rgw_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: restart ceph rgw daemon(s) - container
|
- name: restart ceph rgw daemon(s) - container
|
||||||
command: /usr/bin/env bash /tmp/restart_rgw_daemon.sh
|
command: /usr/bin/env bash /tmp/restart_rgw_daemon.sh
|
||||||
listen: "restart ceph rgws"
|
listen: "restart ceph rgws"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- rgw_group_name in group_names
|
- rgw_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment
|
||||||
- ceph_rgw_container_stat.get('rc') == 0
|
- ceph_rgw_container_stat.get('rc') == 0
|
||||||
- hostvars[item]['_rgw_handler_called'] | default(False)
|
- hostvars[item]['_rgw_handler_called'] | default(False)
|
||||||
- ceph_rgw_container_stat.get('stdout_lines', [])|length != 0
|
- ceph_rgw_container_stat.get('stdout_lines', [])|length != 0
|
||||||
with_items: "{{ groups[rgw_group_name] }}"
|
with_items: "{{ groups[rgw_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: set _rgw_handler_called after restart
|
- name: set _rgw_handler_called after restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_rgw_handler_called: False
|
_rgw_handler_called: False
|
||||||
listen: "restart ceph rgws"
|
listen: "restart ceph rgws"
|
||||||
|
|
||||||
- name: set _nfs_handler_called before restart
|
- name: set _nfs_handler_called before restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_nfs_handler_called: True
|
_nfs_handler_called: True
|
||||||
listen: "restart ceph nfss"
|
listen: "restart ceph nfss"
|
||||||
|
|
||||||
- name: copy nfs restart script
|
- name: copy nfs restart script
|
||||||
template:
|
template:
|
||||||
src: restart_nfs_daemon.sh.j2
|
src: restart_nfs_daemon.sh.j2
|
||||||
dest: /tmp/restart_nfs_daemon.sh
|
dest: /tmp/restart_nfs_daemon.sh
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0750
|
mode: 0750
|
||||||
listen: "restart ceph nfss"
|
listen: "restart ceph nfss"
|
||||||
when:
|
when:
|
||||||
- nfs_group_name in group_names
|
- nfs_group_name in group_names
|
||||||
|
|
||||||
- name: restart ceph nfs daemon(s) - non container
|
- name: restart ceph nfs daemon(s) - non container
|
||||||
command: /usr/bin/env bash /tmp/restart_nfs_daemon.sh
|
command: /usr/bin/env bash /tmp/restart_nfs_daemon.sh
|
||||||
listen: "restart ceph nfss"
|
listen: "restart ceph nfss"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- nfs_group_name in group_names
|
- nfs_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment
|
||||||
- hostvars[item]['_nfs_handler_called'] | default(False)
|
- hostvars[item]['_nfs_handler_called'] | default(False)
|
||||||
- nfs_socket_stat.rc == 0
|
- nfs_socket_stat.rc == 0
|
||||||
with_items: "{{ groups[nfs_group_name] }}"
|
with_items: "{{ groups[nfs_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: restart ceph nfs daemon(s) - container
|
- name: restart ceph nfs daemon(s) - container
|
||||||
command: /usr/bin/env bash /tmp/restart_nfs_daemon.sh
|
command: /usr/bin/env bash /tmp/restart_nfs_daemon.sh
|
||||||
listen: "restart ceph nfss"
|
listen: "restart ceph nfss"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- nfs_group_name in group_names
|
- nfs_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment
|
||||||
- ceph_nfs_container_stat.get('rc') == 0
|
- ceph_nfs_container_stat.get('rc') == 0
|
||||||
- hostvars[item]['_nfs_handler_called'] | default(False)
|
- hostvars[item]['_nfs_handler_called'] | default(False)
|
||||||
- ceph_nfs_container_stat.get('stdout_lines', [])|length != 0
|
- ceph_nfs_container_stat.get('stdout_lines', [])|length != 0
|
||||||
with_items: "{{ groups[nfs_group_name] }}"
|
with_items: "{{ groups[nfs_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: set _nfs_handler_called after restart
|
- name: set _nfs_handler_called after restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_nfs_handler_called: False
|
_nfs_handler_called: False
|
||||||
listen: "restart ceph nfss"
|
listen: "restart ceph nfss"
|
||||||
|
|
||||||
- name: set _rbdmirror_handler_called before restart
|
- name: set _rbdmirror_handler_called before restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_rbdmirror_handler_called: True
|
_rbdmirror_handler_called: True
|
||||||
listen: "restart ceph rbdmirrors"
|
listen: "restart ceph rbdmirrors"
|
||||||
|
|
||||||
- name: copy rbd mirror restart script
|
- name: copy rbd mirror restart script
|
||||||
template:
|
template:
|
||||||
src: restart_rbd_mirror_daemon.sh.j2
|
src: restart_rbd_mirror_daemon.sh.j2
|
||||||
dest: /tmp/restart_rbd_mirror_daemon.sh
|
dest: /tmp/restart_rbd_mirror_daemon.sh
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0750
|
mode: 0750
|
||||||
listen: "restart ceph rbdmirrors"
|
listen: "restart ceph rbdmirrors"
|
||||||
when:
|
when:
|
||||||
- rbdmirror_group_name in group_names
|
- rbdmirror_group_name in group_names
|
||||||
|
|
||||||
- name: restart ceph rbd mirror daemon(s) - non container
|
- name: restart ceph rbd mirror daemon(s) - non container
|
||||||
command: /usr/bin/env bash /tmp/restart_rbd_mirror_daemon.sh
|
command: /usr/bin/env bash /tmp/restart_rbd_mirror_daemon.sh
|
||||||
listen: "restart ceph rbdmirrors"
|
listen: "restart ceph rbdmirrors"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- rbdmirror_group_name in group_names
|
- rbdmirror_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment
|
||||||
- hostvars[item]['_rbdmirror_handler_called'] | default(False)
|
- hostvars[item]['_rbdmirror_handler_called'] | default(False)
|
||||||
- rbd_mirror_socket_stat.rc == 0
|
- rbd_mirror_socket_stat.rc == 0
|
||||||
with_items: "{{ groups[rbdmirror_group_name] }}"
|
with_items: "{{ groups[rbdmirror_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: restart ceph rbd mirror daemon(s) - container
|
- name: restart ceph rbd mirror daemon(s) - container
|
||||||
command: /usr/bin/env bash /tmp/restart_rbd_mirror_daemon.sh
|
command: /usr/bin/env bash /tmp/restart_rbd_mirror_daemon.sh
|
||||||
listen: "restart ceph rbdmirrors"
|
listen: "restart ceph rbdmirrors"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- rbdmirror_group_name in group_names
|
- rbdmirror_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment
|
||||||
- ceph_rbd_mirror_container_stat.get('rc') == 0
|
- ceph_rbd_mirror_container_stat.get('rc') == 0
|
||||||
- hostvars[item]['_rbdmirror_handler_called'] | default(False)
|
- hostvars[item]['_rbdmirror_handler_called'] | default(False)
|
||||||
- ceph_rbd_mirror_container_stat.get('stdout_lines', [])|length != 0
|
- ceph_rbd_mirror_container_stat.get('stdout_lines', [])|length != 0
|
||||||
with_items: "{{ groups[rbdmirror_group_name] }}"
|
with_items: "{{ groups[rbdmirror_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: set _rbdmirror_handler_called after restart
|
- name: set _rbdmirror_handler_called after restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_rbdmirror_handler_called: False
|
_rbdmirror_handler_called: False
|
||||||
listen: "restart ceph rbdmirrors"
|
listen: "restart ceph rbdmirrors"
|
||||||
|
|
||||||
- name: set _mgr_handler_called before restart
|
- name: set _mgr_handler_called before restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_mgr_handler_called: True
|
_mgr_handler_called: True
|
||||||
listen: "restart ceph mgrs"
|
listen: "restart ceph mgrs"
|
||||||
|
|
||||||
- name: copy mgr restart script
|
- name: copy mgr restart script
|
||||||
template:
|
template:
|
||||||
src: restart_mgr_daemon.sh.j2
|
src: restart_mgr_daemon.sh.j2
|
||||||
dest: /tmp/restart_mgr_daemon.sh
|
dest: /tmp/restart_mgr_daemon.sh
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0750
|
mode: 0750
|
||||||
listen: "restart ceph mgrs"
|
listen: "restart ceph mgrs"
|
||||||
when:
|
when:
|
||||||
- mgr_group_name in group_names
|
- mgr_group_name in group_names
|
||||||
|
|
||||||
- name: restart ceph mgr daemon(s) - non container
|
- name: restart ceph mgr daemon(s) - non container
|
||||||
command: /usr/bin/env bash /tmp/restart_mgr_daemon.sh
|
command: /usr/bin/env bash /tmp/restart_mgr_daemon.sh
|
||||||
listen: "restart ceph mgrs"
|
listen: "restart ceph mgrs"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mgr_group_name in group_names
|
- mgr_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment
|
||||||
- hostvars[item]['_mgr_handler_called'] | default(False)
|
- hostvars[item]['_mgr_handler_called'] | default(False)
|
||||||
- mgr_socket_stat.rc == 0
|
- mgr_socket_stat.rc == 0
|
||||||
with_items: "{{ groups[mgr_group_name] }}"
|
with_items: "{{ groups[mgr_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: restart ceph mgr daemon(s) - container
|
- name: restart ceph mgr daemon(s) - container
|
||||||
command: /usr/bin/env bash /tmp/restart_mgr_daemon.sh
|
command: /usr/bin/env bash /tmp/restart_mgr_daemon.sh
|
||||||
listen: "restart ceph mgrs"
|
listen: "restart ceph mgrs"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mgr_group_name in group_names
|
- mgr_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment
|
||||||
- ceph_mgr_container_stat.get('rc') == 0
|
- ceph_mgr_container_stat.get('rc') == 0
|
||||||
- hostvars[item]['_mgr_handler_called'] | default(False)
|
- hostvars[item]['_mgr_handler_called'] | default(False)
|
||||||
- ceph_mgr_container_stat.get('stdout_lines', [])|length != 0
|
- ceph_mgr_container_stat.get('stdout_lines', [])|length != 0
|
||||||
with_items: "{{ groups[mgr_group_name] }}"
|
with_items: "{{ groups[mgr_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: set _mgr_handler_called after restart
|
- name: set _mgr_handler_called after restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_mgr_handler_called: False
|
_mgr_handler_called: False
|
||||||
listen: "restart ceph mgrs"
|
listen: "restart ceph mgrs"
|
||||||
|
|
||||||
- name: set _tcmu_runner_handler_called before restart
|
- name: set _tcmu_runner_handler_called before restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_tcmu_runner_handler_called: True
|
_tcmu_runner_handler_called: True
|
||||||
listen: "restart ceph tcmu-runner"
|
listen: "restart ceph tcmu-runner"
|
||||||
|
|
||||||
- name: copy tcmu-runner restart script
|
- name: copy tcmu-runner restart script
|
||||||
template:
|
template:
|
||||||
src: restart_tcmu_runner.sh.j2
|
src: restart_tcmu_runner.sh.j2
|
||||||
dest: /tmp/restart_tcmu_runner.sh
|
dest: /tmp/restart_tcmu_runner.sh
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0750
|
mode: 0750
|
||||||
listen: "restart ceph tcmu-runner"
|
listen: "restart ceph tcmu-runner"
|
||||||
when:
|
when:
|
||||||
- iscsi_gw_group_name in group_names
|
- iscsi_gw_group_name in group_names
|
||||||
|
|
||||||
- name: restart tcmu-runner
|
- name: restart tcmu-runner
|
||||||
command: /usr/bin/env bash /tmp/restart_tcmu_runner.sh
|
command: /usr/bin/env bash /tmp/restart_tcmu_runner.sh
|
||||||
listen: "restart ceph tcmu-runner"
|
listen: "restart ceph tcmu-runner"
|
||||||
when:
|
when:
|
||||||
- iscsi_gw_group_name in group_names
|
- iscsi_gw_group_name in group_names
|
||||||
- ceph_tcmu_runner_stat.get('rc') == 0
|
- ceph_tcmu_runner_stat.get('rc') == 0
|
||||||
- hostvars[item]['_tcmu_runner_handler_called'] | default(False)
|
- hostvars[item]['_tcmu_runner_handler_called'] | default(False)
|
||||||
- ceph_tcmu_runner_stat.get('stdout_lines', [])|length != 0
|
- ceph_tcmu_runner_stat.get('stdout_lines', [])|length != 0
|
||||||
with_items: "{{ groups[iscsi_gw_group_name] }}"
|
with_items: "{{ groups[iscsi_gw_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: set _tcmu_runner_handler_called after restart
|
- name: set _tcmu_runner_handler_called after restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_tcmu_runner_handler_called: False
|
_tcmu_runner_handler_called: False
|
||||||
listen: "restart ceph tcmu-runner"
|
listen: "restart ceph tcmu-runner"
|
||||||
|
|
||||||
- name: set _rbd_target_gw_handler_called before restart
|
- name: set _rbd_target_gw_handler_called before restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_rbd_target_gw_handler_called: True
|
_rbd_target_gw_handler_called: True
|
||||||
listen: "restart ceph rbd-target-gw"
|
listen: "restart ceph rbd-target-gw"
|
||||||
|
|
||||||
- name: copy rbd-target-gw restart script
|
- name: copy rbd-target-gw restart script
|
||||||
template:
|
template:
|
||||||
src: restart_rbd_target_gw.sh.j2
|
src: restart_rbd_target_gw.sh.j2
|
||||||
dest: /tmp/restart_rbd_target_gw.sh
|
dest: /tmp/restart_rbd_target_gw.sh
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0750
|
mode: 0750
|
||||||
listen: "restart ceph rbd-target-gw"
|
listen: "restart ceph rbd-target-gw"
|
||||||
when:
|
when:
|
||||||
- iscsi_gw_group_name in group_names
|
- iscsi_gw_group_name in group_names
|
||||||
|
|
||||||
- name: restart rbd-target-gw
|
- name: restart rbd-target-gw
|
||||||
command: /usr/bin/env bash /tmp/restart_rbd_target_gw.sh
|
command: /usr/bin/env bash /tmp/restart_rbd_target_gw.sh
|
||||||
listen: "restart ceph rbd-target-gw"
|
listen: "restart ceph rbd-target-gw"
|
||||||
when:
|
when:
|
||||||
- iscsi_gw_group_name in group_names
|
- iscsi_gw_group_name in group_names
|
||||||
- ceph_rbd_target_gw_stat.get('rc') == 0
|
- ceph_rbd_target_gw_stat.get('rc') == 0
|
||||||
- hostvars[item]['_rbd_target_gw_handler_called'] | default(False)
|
- hostvars[item]['_rbd_target_gw_handler_called'] | default(False)
|
||||||
- ceph_rbd_target_gw_stat.get('stdout_lines', [])|length != 0
|
- ceph_rbd_target_gw_stat.get('stdout_lines', [])|length != 0
|
||||||
with_items: "{{ groups[iscsi_gw_group_name] }}"
|
with_items: "{{ groups[iscsi_gw_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: set _rbd_target_gw_handler_called after restart
|
- name: set _rbd_target_gw_handler_called after restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_rbd_target_gw_handler_called: False
|
_rbd_target_gw_handler_called: False
|
||||||
listen: "restart ceph rbd-target-gw"
|
listen: "restart ceph rbd-target-gw"
|
||||||
|
|
||||||
- name: set _rbd_target_api_handler_called before restart
|
- name: set _rbd_target_api_handler_called before restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_rbd_target_api_handler_called: True
|
_rbd_target_api_handler_called: True
|
||||||
listen: "restart ceph rbd-target-api"
|
listen: "restart ceph rbd-target-api"
|
||||||
|
|
||||||
- name: copy rbd-target-api restart script
|
- name: copy rbd-target-api restart script
|
||||||
template:
|
template:
|
||||||
src: restart_rbd_target_api.sh.j2
|
src: restart_rbd_target_api.sh.j2
|
||||||
dest: /tmp/restart_rbd_target_api.sh
|
dest: /tmp/restart_rbd_target_api.sh
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0750
|
mode: 0750
|
||||||
listen: "restart ceph rbd-target-api"
|
listen: "restart ceph rbd-target-api"
|
||||||
when:
|
when:
|
||||||
- iscsi_gw_group_name in group_names
|
- iscsi_gw_group_name in group_names
|
||||||
|
|
||||||
- name: restart rbd-target-api
|
- name: restart rbd-target-api
|
||||||
command: /usr/bin/env bash /tmp/restart_rbd_target_api.sh
|
command: /usr/bin/env bash /tmp/restart_rbd_target_api.sh
|
||||||
listen: "restart ceph rbd-target-api"
|
listen: "restart ceph rbd-target-api"
|
||||||
when:
|
when:
|
||||||
- iscsi_gw_group_name in group_names
|
- iscsi_gw_group_name in group_names
|
||||||
- ceph_rbd_target_api_stat.get('rc') == 0
|
- ceph_rbd_target_api_stat.get('rc') == 0
|
||||||
- hostvars[item]['_rbd_target_api_handler_called'] | default(False)
|
- hostvars[item]['_rbd_target_api_handler_called'] | default(False)
|
||||||
- ceph_rbd_target_api_stat.get('stdout_lines', [])|length != 0
|
- ceph_rbd_target_api_stat.get('stdout_lines', [])|length != 0
|
||||||
with_items: "{{ groups[iscsi_gw_group_name] }}"
|
with_items: "{{ groups[iscsi_gw_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: set _rbd_target_api_handler_called after restart
|
- name: set _rbd_target_api_handler_called after restart
|
||||||
set_fact:
|
set_fact:
|
||||||
_rbd_target_api_handler_called: False
|
_rbd_target_api_handler_called: False
|
||||||
listen: "restart ceph rbd-target-api"
|
listen: "restart ceph rbd-target-api"
|
||||||
|
|
|
@ -1,18 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: generate systemd unit files for tcmu-runner, rbd-target-api and rbd-target-gw
|
- name: include systemd.yml
|
||||||
become: true
|
include_tasks: systemd.yml
|
||||||
template:
|
|
||||||
src: "{{ role_path }}/templates/{{ item }}.service.j2"
|
|
||||||
dest: /etc/systemd/system/{{ item }}.service
|
|
||||||
owner: "root"
|
|
||||||
group: "root"
|
|
||||||
mode: "0644"
|
|
||||||
with_items:
|
|
||||||
- tcmu-runner
|
|
||||||
- rbd-target-gw
|
|
||||||
- rbd-target-api
|
|
||||||
notify:
|
|
||||||
- restart ceph {{ item }}
|
|
||||||
|
|
||||||
- name: systemd start tcmu-runner, rbd-target-api and rbd-target-gw containers
|
- name: systemd start tcmu-runner, rbd-target-api and rbd-target-gw containers
|
||||||
systemd:
|
systemd:
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
---
|
||||||
|
- name: generate systemd unit files for tcmu-runner, rbd-target-api and rbd-target-gw
|
||||||
|
become: true
|
||||||
|
template:
|
||||||
|
src: "{{ role_path }}/templates/{{ item }}.service.j2"
|
||||||
|
dest: /etc/systemd/system/{{ item }}.service
|
||||||
|
owner: "root"
|
||||||
|
group: "root"
|
||||||
|
mode: "0644"
|
||||||
|
with_items:
|
||||||
|
- tcmu-runner
|
||||||
|
- rbd-target-gw
|
||||||
|
- rbd-target-api
|
||||||
|
notify:
|
||||||
|
- restart ceph {{ item }}
|
|
@ -0,0 +1 @@
|
||||||
|
container/systemd.yml
|
|
@ -1,15 +1,19 @@
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=RBD Target API Service
|
Description=RBD Target API Service
|
||||||
|
{% if container_binary == 'docker' %}
|
||||||
After=docker.service
|
After=docker.service
|
||||||
Requires=docker.service
|
Requires=docker.service
|
||||||
|
{% else %}
|
||||||
|
After=network.target
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/docker stop rbd-target-api
|
ExecStartPre=-/usr/bin/{{ container_binary }} stop rbd-target-api
|
||||||
ExecStartPre=-/usr/bin/docker rm rbd-target-api
|
ExecStartPre=-/usr/bin/{{ container_binary }} rm rbd-target-api
|
||||||
ExecStart=/usr/bin/docker run --rm \
|
ExecStart=/usr/bin/{{ container_binary }} run --rm \
|
||||||
--memory={{ ceph_rbd_target_api_docker_memory_limit }} \
|
--memory={{ ceph_rbd_target_api_docker_memory_limit }} \
|
||||||
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
|
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
|
||||||
--cpus={{ ceph_rbd_target_api_docker_cpu_limit }} \
|
--cpus={{ ceph_rbd_target_api_docker_cpu_limit }} \
|
||||||
{% else -%}
|
{% else -%}
|
||||||
--cpu-quota={{ ceph_rbd_target_api_docker_cpu_limit * 100000 }} \
|
--cpu-quota={{ ceph_rbd_target_api_docker_cpu_limit * 100000 }} \
|
||||||
|
@ -25,7 +29,7 @@ ExecStart=/usr/bin/docker run --rm \
|
||||||
-e CEPH_DAEMON=RBD_TARGET_API \
|
-e CEPH_DAEMON=RBD_TARGET_API \
|
||||||
--name=rbd-target-api \
|
--name=rbd-target-api \
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
ExecStopPost=-/usr/bin/docker stop rbd-target-api
|
ExecStopPost=-/usr/bin/{{ container_binary }} stop rbd-target-api
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
TimeoutStartSec=120
|
TimeoutStartSec=120
|
||||||
|
|
|
@ -1,15 +1,19 @@
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=RBD Target Gateway Service
|
Description=RBD Target Gateway Service
|
||||||
|
{% if container_binary == 'docker' %}
|
||||||
After=docker.service
|
After=docker.service
|
||||||
Requires=docker.service
|
Requires=docker.service
|
||||||
|
{% else %}
|
||||||
|
After=network.target
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/docker stop rbd-target-gw
|
ExecStartPre=-/usr/bin/{{ container_binary }} stop rbd-target-gw
|
||||||
ExecStartPre=-/usr/bin/docker rm rbd-target-gw
|
ExecStartPre=-/usr/bin/{{ container_binary }} rm rbd-target-gw
|
||||||
ExecStart=/usr/bin/docker run --rm \
|
ExecStart=/usr/bin/{{ container_binary }} run --rm \
|
||||||
--memory={{ ceph_rbd_target_gw_docker_memory_limit }} \
|
--memory={{ ceph_rbd_target_gw_docker_memory_limit }} \
|
||||||
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
|
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
|
||||||
--cpus={{ ceph_rbd_target_gw_docker_cpu_limit }} \
|
--cpus={{ ceph_rbd_target_gw_docker_cpu_limit }} \
|
||||||
{% else -%}
|
{% else -%}
|
||||||
--cpu-quota={{ ceph_rbd_target_gw_docker_cpu_limit * 100000 }} \
|
--cpu-quota={{ ceph_rbd_target_gw_docker_cpu_limit * 100000 }} \
|
||||||
|
@ -25,7 +29,8 @@ ExecStart=/usr/bin/docker run --rm \
|
||||||
-e CEPH_DAEMON=RBD_TARGET_GW \
|
-e CEPH_DAEMON=RBD_TARGET_GW \
|
||||||
--name=rbd-target-gw \
|
--name=rbd-target-gw \
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
ExecStopPost=-/usr/bin/docker stop rbd-target-gw
|
ExecStopPost=-/usr/bin/{{ container_binary }} stop rbd-target-gw
|
||||||
|
KillMode=none
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
TimeoutStartSec=120
|
TimeoutStartSec=120
|
||||||
|
|
|
@ -1,15 +1,19 @@
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=TCMU Runner
|
Description=TCMU Runner
|
||||||
|
{% if container_binary == 'docker' %}
|
||||||
After=docker.service
|
After=docker.service
|
||||||
Requires=docker.service
|
Requires=docker.service
|
||||||
|
{% else %}
|
||||||
|
After=network.target
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/docker stop tcmu-runner
|
ExecStartPre=-/usr/bin/{{ container_binary }} stop tcmu-runner
|
||||||
ExecStartPre=-/usr/bin/docker rm tcmu-runner
|
ExecStartPre=-/usr/bin/{{ container_binary }} rm tcmu-runner
|
||||||
ExecStart=/usr/bin/docker run --rm \
|
ExecStart=/usr/bin/{{ container_binary }} run --rm \
|
||||||
--memory={{ ceph_tcmu_runner_docker_memory_limit }} \
|
--memory={{ ceph_tcmu_runner_docker_memory_limit }} \
|
||||||
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
|
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
|
||||||
--cpus={{ ceph_tcmu_runner_docker_cpu_limit }} \
|
--cpus={{ ceph_tcmu_runner_docker_cpu_limit }} \
|
||||||
{% else -%}
|
{% else -%}
|
||||||
--cpu-quota={{ ceph_tcmu_runner_docker_cpu_limit * 100000 }} \
|
--cpu-quota={{ ceph_tcmu_runner_docker_cpu_limit * 100000 }} \
|
||||||
|
@ -25,7 +29,7 @@ ExecStart=/usr/bin/docker run --rm \
|
||||||
-e CEPH_DAEMON=TCMU_RUNNER \
|
-e CEPH_DAEMON=TCMU_RUNNER \
|
||||||
--name=tcmu-runner \
|
--name=tcmu-runner \
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
ExecStopPost=-/usr/bin/docker stop tcmu-runner
|
ExecStopPost=-/usr/bin/{{ container_binary }} stop tcmu-runner
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
TimeoutStartSec=120
|
TimeoutStartSec=120
|
||||||
|
|
|
@ -45,16 +45,8 @@
|
||||||
- "{{ statconfig.results }}"
|
- "{{ statconfig.results }}"
|
||||||
when: item.1.stat.exists == true
|
when: item.1.stat.exists == true
|
||||||
|
|
||||||
- name: generate systemd unit file
|
- name: include systemd.yml
|
||||||
become: true
|
include_tasks: systemd.yml
|
||||||
template:
|
|
||||||
src: "{{ role_path }}/templates/ceph-mds.service.j2"
|
|
||||||
dest: /etc/systemd/system/ceph-mds@.service
|
|
||||||
owner: "root"
|
|
||||||
group: "root"
|
|
||||||
mode: "0644"
|
|
||||||
notify:
|
|
||||||
- restart ceph mdss
|
|
||||||
|
|
||||||
- name: systemd start mds container
|
- name: systemd start mds container
|
||||||
systemd:
|
systemd:
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
- name: generate systemd unit file
|
||||||
|
become: true
|
||||||
|
template:
|
||||||
|
src: "{{ role_path }}/templates/ceph-mds.service.j2"
|
||||||
|
dest: /etc/systemd/system/ceph-mds@.service
|
||||||
|
owner: "root"
|
||||||
|
group: "root"
|
||||||
|
mode: "0644"
|
||||||
|
notify:
|
||||||
|
- restart ceph mdss
|
|
@ -1,16 +1,20 @@
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Ceph MDS
|
Description=Ceph MDS
|
||||||
|
{% if container_binary == 'docker' %}
|
||||||
After=docker.service
|
After=docker.service
|
||||||
Requires=docker.service
|
Requires=docker.service
|
||||||
|
{% else %}
|
||||||
|
After=network.target
|
||||||
|
{% endif %}
|
||||||
{% set cpu_limit = ansible_processor_vcpus|int if ceph_mds_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_mds_docker_cpu_limit|int %}
|
{% set cpu_limit = ansible_processor_vcpus|int if ceph_mds_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_mds_docker_cpu_limit|int %}
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/docker stop ceph-mds-{{ ansible_hostname }}
|
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}
|
||||||
ExecStartPre=-/usr/bin/docker rm ceph-mds-{{ ansible_hostname }}
|
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mds-{{ ansible_hostname }}
|
||||||
ExecStart=/usr/bin/docker run --rm --net=host \
|
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
|
||||||
--memory={{ ceph_mds_docker_memory_limit }} \
|
--memory={{ ceph_mds_docker_memory_limit }} \
|
||||||
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
|
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
|
||||||
--cpus={{ cpu_limit }} \
|
--cpus={{ cpu_limit }} \
|
||||||
{% else -%}
|
{% else -%}
|
||||||
--cpu-quota={{ cpu_limit * 100000 }} \
|
--cpu-quota={{ cpu_limit * 100000 }} \
|
||||||
|
@ -30,7 +34,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \
|
||||||
{{ ceph_mds_docker_extra_env }} \
|
{{ ceph_mds_docker_extra_env }} \
|
||||||
--name=ceph-mds-{{ ansible_hostname }} \
|
--name=ceph-mds-{{ ansible_hostname }} \
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
ExecStopPost=-/usr/bin/docker stop ceph-mds-{{ ansible_hostname }}
|
ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
TimeoutStartSec=120
|
TimeoutStartSec=120
|
||||||
|
|
|
@ -1,14 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: generate systemd unit file
|
- name: include systemd.yml
|
||||||
become: true
|
include_tasks: systemd.yml
|
||||||
template:
|
|
||||||
src: "{{ role_path }}/templates/ceph-mgr.service.j2"
|
|
||||||
dest: /etc/systemd/system/ceph-mgr@.service
|
|
||||||
owner: "root"
|
|
||||||
group: "root"
|
|
||||||
mode: "0644"
|
|
||||||
notify:
|
|
||||||
- restart ceph mgrs
|
|
||||||
|
|
||||||
- name: systemd start mgr container
|
- name: systemd start mgr container
|
||||||
systemd:
|
systemd:
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
- name: generate systemd unit file
|
||||||
|
become: true
|
||||||
|
template:
|
||||||
|
src: "{{ role_path }}/templates/ceph-mgr.service.j2"
|
||||||
|
dest: /etc/systemd/system/ceph-mgr@.service
|
||||||
|
owner: "root"
|
||||||
|
group: "root"
|
||||||
|
mode: "0644"
|
||||||
|
notify:
|
||||||
|
- restart ceph mgrs
|
|
@ -0,0 +1 @@
|
||||||
|
docker/systemd.yml
|
|
@ -1,15 +1,19 @@
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Ceph Manager
|
Description=Ceph Manager
|
||||||
|
{% if container_binary == 'docker' %}
|
||||||
After=docker.service
|
After=docker.service
|
||||||
Requires=docker.service
|
Requires=docker.service
|
||||||
|
{% else %}
|
||||||
|
After=network.target
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/docker stop ceph-mgr-{{ ansible_hostname }}
|
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_hostname }}
|
||||||
ExecStartPre=-/usr/bin/docker rm ceph-mgr-{{ ansible_hostname }}
|
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mgr-{{ ansible_hostname }}
|
||||||
ExecStart=/usr/bin/docker run --rm --net=host \
|
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
|
||||||
--memory={{ ceph_mgr_docker_memory_limit }} \
|
--memory={{ ceph_mgr_docker_memory_limit }} \
|
||||||
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
|
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
|
||||||
--cpus={{ ceph_mgr_docker_cpu_limit }} \
|
--cpus={{ ceph_mgr_docker_cpu_limit }} \
|
||||||
{% else -%}
|
{% else -%}
|
||||||
--cpu-quota={{ ceph_mgr_docker_cpu_limit * 100000 }} \
|
--cpu-quota={{ ceph_mgr_docker_cpu_limit * 100000 }} \
|
||||||
|
@ -29,7 +33,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \
|
||||||
{{ ceph_mgr_docker_extra_env }} \
|
{{ ceph_mgr_docker_extra_env }} \
|
||||||
--name=ceph-mgr-{{ ansible_hostname }} \
|
--name=ceph-mgr-{{ ansible_hostname }} \
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
ExecStopPost=-/usr/bin/docker stop ceph-mgr-{{ ansible_hostname }}
|
ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_hostname }}
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
TimeoutStartSec=120
|
TimeoutStartSec=120
|
||||||
|
|
|
@ -40,16 +40,8 @@
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
when: containerized_deployment_with_kv
|
when: containerized_deployment_with_kv
|
||||||
|
|
||||||
- name: generate systemd unit file
|
- name: include systemd.yml
|
||||||
become: true
|
include_tasks: systemd.yml
|
||||||
template:
|
|
||||||
src: "{{ role_path }}/templates/ceph-mon.service.j2"
|
|
||||||
dest: /etc/systemd/system/ceph-mon@.service
|
|
||||||
owner: "root"
|
|
||||||
group: "root"
|
|
||||||
mode: "0644"
|
|
||||||
notify:
|
|
||||||
- restart ceph mons
|
|
||||||
|
|
||||||
- name: systemd start mon container
|
- name: systemd start mon container
|
||||||
systemd:
|
systemd:
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
- name: generate systemd unit file
|
||||||
|
become: true
|
||||||
|
template:
|
||||||
|
src: "{{ role_path }}/templates/ceph-mon.service.j2"
|
||||||
|
dest: /etc/systemd/system/ceph-mon@.service
|
||||||
|
owner: "root"
|
||||||
|
group: "root"
|
||||||
|
mode: "0644"
|
||||||
|
notify:
|
||||||
|
- restart ceph mons
|
|
@ -0,0 +1 @@
|
||||||
|
docker/systemd.yml
|
|
@ -1,15 +1,19 @@
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Ceph Monitor
|
Description=Ceph Monitor
|
||||||
|
{% if container_binary == 'docker' %}
|
||||||
After=docker.service
|
After=docker.service
|
||||||
Requires=docker.service
|
Requires=docker.service
|
||||||
|
{% else %}
|
||||||
|
After=network.target
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/docker rm ceph-mon-%i
|
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mon-%i
|
||||||
ExecStartPre=/bin/sh -c '"$(command -v mkdir)" -p /etc/ceph /var/lib/ceph/mon'
|
ExecStartPre=/bin/sh -c '"$(command -v mkdir)" -p /etc/ceph /var/lib/ceph/mon'
|
||||||
ExecStart=/usr/bin/docker run --rm --name ceph-mon-%i \
|
ExecStart=/usr/bin/{{ container_binary }} run --rm --name ceph-mon-%i \
|
||||||
--memory={{ ceph_mon_docker_memory_limit }} \
|
--memory={{ ceph_mon_docker_memory_limit }} \
|
||||||
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
|
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
|
||||||
--cpus={{ ceph_mon_docker_cpu_limit }} \
|
--cpus={{ ceph_mon_docker_cpu_limit }} \
|
||||||
{% else -%}
|
{% else -%}
|
||||||
--cpu-quota={{ ceph_mon_docker_cpu_limit * 100000 }} \
|
--cpu-quota={{ ceph_mon_docker_cpu_limit * 100000 }} \
|
||||||
|
@ -64,7 +68,7 @@ ExecStart=/usr/bin/docker run --rm --name ceph-mon-%i \
|
||||||
-e CEPH_DAEMON=MON \
|
-e CEPH_DAEMON=MON \
|
||||||
{{ ceph_mon_docker_extra_env }} \
|
{{ ceph_mon_docker_extra_env }} \
|
||||||
{{ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
{{ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
ExecStop=-/usr/bin/docker stop ceph-mon-%i
|
ExecStop=-/usr/bin/{{ container_binary }} stop ceph-mon-%i
|
||||||
ExecStopPost=-/bin/rm -f /var/run/ceph/{{ cluster }}-mon.{{ monitor_name }}.asok
|
ExecStopPost=-/bin/rm -f /var/run/ceph/{{ cluster }}-mon.{{ monitor_name }}.asok
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
|
|
|
@ -62,18 +62,8 @@
|
||||||
when:
|
when:
|
||||||
- ceph_nfs_dynamic_exports
|
- ceph_nfs_dynamic_exports
|
||||||
|
|
||||||
- name: generate systemd unit file
|
- name: include systemd.yml
|
||||||
become: true
|
include_tasks: systemd.yml
|
||||||
template:
|
|
||||||
src: "{{ role_path }}/templates/ceph-nfs.service.j2"
|
|
||||||
dest: /etc/systemd/system/ceph-nfs@.service
|
|
||||||
owner: "root"
|
|
||||||
group: "root"
|
|
||||||
mode: "0644"
|
|
||||||
when:
|
|
||||||
- containerized_deployment
|
|
||||||
notify:
|
|
||||||
- restart ceph nfss
|
|
||||||
|
|
||||||
- name: systemd start nfs container
|
- name: systemd start nfs container
|
||||||
systemd:
|
systemd:
|
||||||
|
|
|
@ -0,0 +1,13 @@
|
||||||
|
---
|
||||||
|
- name: generate systemd unit file
|
||||||
|
become: true
|
||||||
|
template:
|
||||||
|
src: "{{ role_path }}/templates/ceph-nfs.service.j2"
|
||||||
|
dest: /etc/systemd/system/ceph-nfs@.service
|
||||||
|
owner: "root"
|
||||||
|
group: "root"
|
||||||
|
mode: "0644"
|
||||||
|
when:
|
||||||
|
- containerized_deployment
|
||||||
|
notify:
|
||||||
|
- restart ceph nfss
|
|
@ -1,14 +1,18 @@
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=NFS-Ganesha file server
|
Description=NFS-Ganesha file server
|
||||||
Documentation=http://github.com/nfs-ganesha/nfs-ganesha/wiki
|
Documentation=http://github.com/nfs-ganesha/nfs-ganesha/wiki
|
||||||
|
{% if container_binary == 'docker' %}
|
||||||
After=docker.service
|
After=docker.service
|
||||||
Requires=docker.service
|
Requires=docker.service
|
||||||
|
{% else %}
|
||||||
|
After=network.target
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/docker rm ceph-nfs-%i
|
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-nfs-%i
|
||||||
ExecStartPre=/usr/bin/mkdir -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha
|
ExecStartPre=/usr/bin/mkdir -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha
|
||||||
ExecStart=/usr/bin/docker run --rm --net=host \
|
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
|
||||||
{% if not containerized_deployment_with_kv -%}
|
{% if not containerized_deployment_with_kv -%}
|
||||||
-v /var/lib/ceph:/var/lib/ceph:z \
|
-v /var/lib/ceph:/var/lib/ceph:z \
|
||||||
-v /etc/ceph:/etc/ceph:z \
|
-v /etc/ceph:/etc/ceph:z \
|
||||||
|
@ -30,7 +34,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \
|
||||||
{{ ceph_nfs_docker_extra_env }} \
|
{{ ceph_nfs_docker_extra_env }} \
|
||||||
--name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_hostname) }} \
|
--name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_hostname) }} \
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
ExecStopPost=-/usr/bin/docker stop ceph-nfs-%i
|
ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-nfs-%i
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
TimeoutStartSec=120
|
TimeoutStartSec=120
|
||||||
|
|
|
@ -58,16 +58,8 @@
|
||||||
- devices is defined
|
- devices is defined
|
||||||
- devices | length > activated_osds.stdout_lines | length
|
- devices | length > activated_osds.stdout_lines | length
|
||||||
|
|
||||||
- name: generate ceph osd docker run script
|
- name: include systemd.yml
|
||||||
become: true
|
include_tasks: systemd.yml
|
||||||
template:
|
|
||||||
src: "{{ role_path }}/templates/ceph-osd-run.sh.j2"
|
|
||||||
dest: "{{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh"
|
|
||||||
owner: "root"
|
|
||||||
group: "root"
|
|
||||||
mode: "0744"
|
|
||||||
notify:
|
|
||||||
- restart ceph osds
|
|
||||||
when:
|
when:
|
||||||
- containerized_deployment
|
- containerized_deployment
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
- name: generate ceph osd docker run script
|
||||||
|
become: true
|
||||||
|
template:
|
||||||
|
src: "{{ role_path }}/templates/ceph-osd-run.sh.j2"
|
||||||
|
dest: "{{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh"
|
||||||
|
owner: "root"
|
||||||
|
group: "root"
|
||||||
|
mode: "0744"
|
||||||
|
notify:
|
||||||
|
- restart ceph osds
|
|
@ -14,7 +14,7 @@ DOCKER_ENV=""
|
||||||
#############
|
#############
|
||||||
function id_to_device () {
|
function id_to_device () {
|
||||||
{% if dmcrypt | bool %}
|
{% if dmcrypt | bool %}
|
||||||
docker run --rm --net=host --ulimit nofile=1024:4096 --ipc=host --pid=host --privileged=true -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /dev:/dev -v /etc/localtime:/etc/localtime:ro -e DEBUG=verbose -e CLUSTER={{ cluster }} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} osd_ceph_disk_dmcrypt_data_map
|
{{ container_binary }} run --rm --net=host --ulimit nofile=1024:4096 --ipc=host --pid=host --privileged=true -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /dev:/dev -v /etc/localtime:/etc/localtime:ro -e DEBUG=verbose -e CLUSTER={{ cluster }} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} osd_ceph_disk_dmcrypt_data_map
|
||||||
{% endif %}
|
{% endif %}
|
||||||
DATA_PART=$(docker run --rm --ulimit nofile=1024:4096 --privileged=true -v /dev/:/dev/ -v /etc/ceph:/etc/ceph:z --entrypoint ceph-disk {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} list | grep ", osd\.${1}," | awk '{ print $1 }')
|
DATA_PART=$(docker run --rm --ulimit nofile=1024:4096 --privileged=true -v /dev/:/dev/ -v /etc/ceph:/etc/ceph:z --entrypoint ceph-disk {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} list | grep ", osd\.${1}," | awk '{ print $1 }')
|
||||||
if [[ "${DATA_PART}" =~ ^/dev/(cciss|nvme|loop) ]]; then
|
if [[ "${DATA_PART}" =~ ^/dev/(cciss|nvme|loop) ]]; then
|
||||||
|
@ -32,14 +32,14 @@ function expose_partitions () {
|
||||||
# Jewel images prior to https://github.com/ceph/ceph-docker/pull/797
|
# Jewel images prior to https://github.com/ceph/ceph-docker/pull/797
|
||||||
REGEX="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
|
REGEX="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
|
||||||
function expose_partitions {
|
function expose_partitions {
|
||||||
if docker ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}; then
|
if {{ container_binary }} ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}; then
|
||||||
if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then
|
if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then
|
||||||
docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log
|
{{ container_binary }} logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
if docker ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-${1}; then
|
if {{ container_binary }} ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-${1}; then
|
||||||
if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log ]]; then
|
if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log ]]; then
|
||||||
docker logs ceph-osd-prepare-{{ ansible_hostname }}-${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log
|
{{ container_binary }} logs ceph-osd-prepare-{{ ansible_hostname }}-${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
if [[ -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then
|
if [[ -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then
|
||||||
|
@ -88,7 +88,7 @@ fi
|
||||||
numactl \
|
numactl \
|
||||||
{{ ceph_osd_numactl_opts }} \
|
{{ ceph_osd_numactl_opts }} \
|
||||||
{% endif %}
|
{% endif %}
|
||||||
/usr/bin/docker run \
|
/usr/bin/{{ container_binary }} run \
|
||||||
--rm \
|
--rm \
|
||||||
--net=host \
|
--net=host \
|
||||||
--privileged=true \
|
--privileged=true \
|
||||||
|
@ -97,7 +97,7 @@ numactl \
|
||||||
{% if osd_objectstore == 'filestore' -%}
|
{% if osd_objectstore == 'filestore' -%}
|
||||||
--memory={{ ceph_osd_docker_memory_limit }} \
|
--memory={{ ceph_osd_docker_memory_limit }} \
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
|
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
|
||||||
--cpus={{ cpu_limit }} \
|
--cpus={{ cpu_limit }} \
|
||||||
{% else -%}
|
{% else -%}
|
||||||
--cpu-quota={{ cpu_limit * 100000 }} \
|
--cpu-quota={{ cpu_limit * 100000 }} \
|
||||||
|
|
|
@ -1,15 +1,19 @@
|
||||||
# {{ ansible_managed }}
|
# {{ ansible_managed }}
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Ceph OSD
|
Description=Ceph OSD
|
||||||
|
{% if container_binary == 'docker' %}
|
||||||
After=docker.service
|
After=docker.service
|
||||||
Requires=docker.service
|
Requires=docker.service
|
||||||
|
{% else %}
|
||||||
|
After=network.target
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/docker stop ceph-osd-%i
|
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
|
||||||
ExecStartPre=-/usr/bin/docker rm -f ceph-osd-%i
|
ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-%i
|
||||||
ExecStart={{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh %i
|
ExecStart={{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh %i
|
||||||
ExecStop=-/usr/bin/docker stop ceph-osd-%i
|
ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
TimeoutStartSec=120
|
TimeoutStartSec=120
|
||||||
|
|
|
@ -1,15 +1,6 @@
|
||||||
---
|
---
|
||||||
# Use systemd to manage container on Atomic host
|
- name: include systemd.yml
|
||||||
- name: generate systemd unit file
|
include_tasks: systemd.yml
|
||||||
become: true
|
|
||||||
template:
|
|
||||||
src: "{{ role_path }}/templates/ceph-rbd-mirror.service.j2"
|
|
||||||
dest: /etc/systemd/system/ceph-rbd-mirror@.service
|
|
||||||
owner: "root"
|
|
||||||
group: "root"
|
|
||||||
mode: "0644"
|
|
||||||
notify:
|
|
||||||
- restart ceph rbdmirrors
|
|
||||||
|
|
||||||
- name: systemd start rbd mirror container
|
- name: systemd start rbd mirror container
|
||||||
systemd:
|
systemd:
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
---
|
||||||
|
# Use systemd to manage container on Atomic host
|
||||||
|
- name: generate systemd unit file
|
||||||
|
become: true
|
||||||
|
template:
|
||||||
|
src: "{{ role_path }}/templates/ceph-rbd-mirror.service.j2"
|
||||||
|
dest: /etc/systemd/system/ceph-rbd-mirror@.service
|
||||||
|
owner: "root"
|
||||||
|
group: "root"
|
||||||
|
mode: "0644"
|
||||||
|
notify:
|
||||||
|
- restart ceph rbdmirrors
|
|
@ -0,0 +1 @@
|
||||||
|
docker/systemd.yml
|
|
@ -1,15 +1,19 @@
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Ceph RBD mirror
|
Description=Ceph RBD mirror
|
||||||
|
{% if container_binary == 'docker' %}
|
||||||
After=docker.service
|
After=docker.service
|
||||||
Requires=docker.service
|
Requires=docker.service
|
||||||
|
{% else %}
|
||||||
|
After=network.target
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/docker stop ceph-rbd-mirror-{{ ansible_hostname }}
|
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_hostname }}
|
||||||
ExecStartPre=-/usr/bin/docker rm ceph-rbd-mirror-{{ ansible_hostname }}
|
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rbd-mirror-{{ ansible_hostname }}
|
||||||
ExecStart=/usr/bin/docker run --rm --net=host \
|
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
|
||||||
--memory={{ ceph_rbd_mirror_docker_memory_limit }} \
|
--memory={{ ceph_rbd_mirror_docker_memory_limit }} \
|
||||||
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
|
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
|
||||||
--cpus={{ ceph_rbd_mirror_docker_cpu_limit }} \
|
--cpus={{ ceph_rbd_mirror_docker_cpu_limit }} \
|
||||||
{% else -%}
|
{% else -%}
|
||||||
--cpu-quota={{ ceph_rbd_mirror_docker_cpu_limit * 100000 }} \
|
--cpu-quota={{ ceph_rbd_mirror_docker_cpu_limit * 100000 }} \
|
||||||
|
@ -29,7 +33,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \
|
||||||
--name=ceph-rbd-mirror-{{ ansible_hostname }} \
|
--name=ceph-rbd-mirror-{{ ansible_hostname }} \
|
||||||
{{ ceph_rbd_mirror_docker_extra_env }} \
|
{{ ceph_rbd_mirror_docker_extra_env }} \
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
ExecStopPost=-/usr/bin/docker stop ceph-rbd-mirror-{{ ansible_hostname }}
|
ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_hostname }}
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
TimeoutStartSec=120
|
TimeoutStartSec=120
|
||||||
|
|
|
@ -1,28 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: generate systemd unit file
|
- name: include systemd.yml
|
||||||
become: true
|
include_tasks: systemd.yml
|
||||||
template:
|
|
||||||
src: "{{ role_path }}/templates/ceph-radosgw.service.j2"
|
|
||||||
dest: /etc/systemd/system/ceph-radosgw@.service
|
|
||||||
owner: "root"
|
|
||||||
group: "root"
|
|
||||||
mode: "0644"
|
|
||||||
notify:
|
|
||||||
- restart ceph rgws
|
|
||||||
|
|
||||||
# For backward compatibility
|
|
||||||
- name: disable old systemd unit ('ceph-rgw@'|'ceph-radosgw@radosgw.'|'ceph-radosgw@') if present
|
|
||||||
systemd:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: stopped
|
|
||||||
enabled: no
|
|
||||||
daemon_reload: yes
|
|
||||||
with_items:
|
|
||||||
- "ceph-rgw@{{ ansible_hostname }}"
|
|
||||||
- "ceph-radosgw@{{ ansible_hostname }}.service"
|
|
||||||
- "ceph-radosgw@radosgw.{{ ansible_hostname }}.service"
|
|
||||||
- ceph-radosgw@radosgw.gateway.service
|
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- name: systemd start rgw container
|
- name: systemd start rgw container
|
||||||
systemd:
|
systemd:
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
---
|
||||||
|
- name: generate systemd unit file
|
||||||
|
become: true
|
||||||
|
template:
|
||||||
|
src: "{{ role_path }}/templates/ceph-radosgw.service.j2"
|
||||||
|
dest: /etc/systemd/system/ceph-radosgw@.service
|
||||||
|
owner: "root"
|
||||||
|
group: "root"
|
||||||
|
mode: "0644"
|
||||||
|
notify:
|
||||||
|
- restart ceph rgws
|
||||||
|
|
||||||
|
# For backward compatibility
|
||||||
|
- name: disable old systemd unit ('ceph-rgw@'|'ceph-radosgw@radosgw.'|'ceph-radosgw@') if present
|
||||||
|
systemd:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: stopped
|
||||||
|
enabled: no
|
||||||
|
daemon_reload: yes
|
||||||
|
with_items:
|
||||||
|
- "ceph-rgw@{{ ansible_hostname }}"
|
||||||
|
- "ceph-radosgw@{{ ansible_hostname }}.service"
|
||||||
|
- "ceph-radosgw@radosgw.{{ ansible_hostname }}.service"
|
||||||
|
- ceph-radosgw@radosgw.gateway.service
|
||||||
|
ignore_errors: true
|
|
@ -0,0 +1 @@
|
||||||
|
docker/systemd.yml
|
|
@ -1,16 +1,20 @@
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Ceph RGW
|
Description=Ceph RGW
|
||||||
|
{% if container_binary == 'docker' %}
|
||||||
After=docker.service
|
After=docker.service
|
||||||
Requires=docker.service
|
Requires=docker.service
|
||||||
|
{% else %}
|
||||||
|
After=network.target
|
||||||
|
{% endif %}
|
||||||
{% set cpu_limit = ansible_processor_vcpus|int if ceph_rgw_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_rgw_docker_cpu_limit|int %}
|
{% set cpu_limit = ansible_processor_vcpus|int if ceph_rgw_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_rgw_docker_cpu_limit|int %}
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }}
|
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}
|
||||||
ExecStartPre=-/usr/bin/docker rm ceph-rgw-{{ ansible_hostname }}
|
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ ansible_hostname }}
|
||||||
ExecStart=/usr/bin/docker run --rm --net=host \
|
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
|
||||||
--memory={{ ceph_rgw_docker_memory_limit }} \
|
--memory={{ ceph_rgw_docker_memory_limit }} \
|
||||||
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
|
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
|
||||||
--cpus={{ cpu_limit }} \
|
--cpus={{ cpu_limit }} \
|
||||||
{% else -%}
|
{% else -%}
|
||||||
--cpu-quota={{ cpu_limit * 100000 }} \
|
--cpu-quota={{ cpu_limit * 100000 }} \
|
||||||
|
@ -33,7 +37,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \
|
||||||
--name=ceph-rgw-{{ ansible_hostname }} \
|
--name=ceph-rgw-{{ ansible_hostname }} \
|
||||||
{{ ceph_rgw_docker_extra_env }} \
|
{{ ceph_rgw_docker_extra_env }} \
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
ExecStopPost=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }}
|
ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
TimeoutStartSec=120
|
TimeoutStartSec=120
|
||||||
|
|
|
@ -38,6 +38,7 @@ def node(host, request):
|
||||||
osd_scenario = ansible_vars.get("osd_scenario")
|
osd_scenario = ansible_vars.get("osd_scenario")
|
||||||
lvm_scenario = osd_scenario in ['lvm', 'lvm-batch']
|
lvm_scenario = osd_scenario in ['lvm', 'lvm-batch']
|
||||||
devices = ansible_vars.get("devices", [])
|
devices = ansible_vars.get("devices", [])
|
||||||
|
container_binary = ''
|
||||||
ceph_release_num = {
|
ceph_release_num = {
|
||||||
'jewel': 10,
|
'jewel': 10,
|
||||||
'kraken': 11,
|
'kraken': 11,
|
||||||
|
@ -98,6 +99,11 @@ def node(host, request):
|
||||||
cluster_name = ansible_vars.get("cluster", "ceph")
|
cluster_name = ansible_vars.get("cluster", "ceph")
|
||||||
conf_path = "/etc/ceph/{}.conf".format(cluster_name)
|
conf_path = "/etc/ceph/{}.conf".format(cluster_name)
|
||||||
|
|
||||||
|
if docker:
|
||||||
|
container_binary = "docker"
|
||||||
|
if docker and str_to_bool(os.environ.get('IS_PODMAN', False)): # noqa E501
|
||||||
|
container_binary = "podman"
|
||||||
|
|
||||||
data = dict(
|
data = dict(
|
||||||
address=address,
|
address=address,
|
||||||
subnet=subnet,
|
subnet=subnet,
|
||||||
|
@ -112,6 +118,7 @@ def node(host, request):
|
||||||
ceph_stable_release=ceph_stable_release,
|
ceph_stable_release=ceph_stable_release,
|
||||||
ceph_release_num=ceph_release_num,
|
ceph_release_num=ceph_release_num,
|
||||||
rolling_update=rolling_update,
|
rolling_update=rolling_update,
|
||||||
|
container_binary=container_binary
|
||||||
)
|
)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
- hosts: all
|
||||||
|
gather_facts: true
|
||||||
|
become: true
|
||||||
|
tasks:
|
||||||
|
- name: install podman
|
||||||
|
package:
|
||||||
|
name: podman
|
||||||
|
state: present
|
|
@ -0,0 +1 @@
|
||||||
|
../../../Vagrantfile
|
|
@ -0,0 +1,37 @@
|
||||||
|
{
|
||||||
|
"ceph_conf_overrides": {
|
||||||
|
"global": {
|
||||||
|
"osd_pool_default_pg_num": 12,
|
||||||
|
"osd_pool_default_size": 1,
|
||||||
|
"mon_allow_pool_size_one": true,
|
||||||
|
"mon_warn_on_pool_no_redundancy": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"cephfs_pools": [
|
||||||
|
{
|
||||||
|
"name": "cephfs_data",
|
||||||
|
"pg_num": 8,
|
||||||
|
"pgp_num": 8,
|
||||||
|
"rule_name": "replicated_rule",
|
||||||
|
"type": 1,
|
||||||
|
"erasure_profile": "",
|
||||||
|
"expected_num_objects": "",
|
||||||
|
"application": "cephfs",
|
||||||
|
"size": 3,
|
||||||
|
"min_size": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "cephfs_metadata",
|
||||||
|
"pg_num": 8,
|
||||||
|
"pgp_num": 8,
|
||||||
|
"rule_name": "replicated_rule",
|
||||||
|
"type": 1,
|
||||||
|
"erasure_profile": "",
|
||||||
|
"expected_num_objects": "",
|
||||||
|
"application": "cephfs",
|
||||||
|
"size": 3,
|
||||||
|
"min_size": 0
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ceph_mon_docker_memory_limit": "2g"
|
||||||
|
}
|
|
@ -0,0 +1,44 @@
|
||||||
|
---
|
||||||
|
# this is only here to let the CI tests know
|
||||||
|
# that this scenario is using docker
|
||||||
|
docker: True
|
||||||
|
container_binary: docker
|
||||||
|
|
||||||
|
containerized_deployment: True
|
||||||
|
monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
||||||
|
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
||||||
|
ceph_mon_docker_subnet: "{{ public_network }}"
|
||||||
|
ceph_docker_on_openstack: False
|
||||||
|
public_network: "192.168.58.0/24"
|
||||||
|
cluster_network: "192.168.59.0/24"
|
||||||
|
rgw_override_bucket_index_max_shards: 16
|
||||||
|
rgw_bucket_default_quota_max_objects: 1638400
|
||||||
|
ceph_conf_overrides:
|
||||||
|
global:
|
||||||
|
mon_allow_pool_size_one: true
|
||||||
|
mon_warn_on_pool_no_redundancy: false
|
||||||
|
osd_pool_default_size: 1
|
||||||
|
openstack_config: False
|
||||||
|
openstack_glance_pool:
|
||||||
|
name: "images"
|
||||||
|
pg_num: "{{ osd_pool_default_pg_num }}"
|
||||||
|
pgp_num: "{{ osd_pool_default_pg_num }}"
|
||||||
|
rule_name: "HDD"
|
||||||
|
type: 1
|
||||||
|
erasure_profile: ""
|
||||||
|
expected_num_objects: ""
|
||||||
|
size: 1
|
||||||
|
openstack_cinder_pool:
|
||||||
|
name: "volumes"
|
||||||
|
pg_num: "{{ osd_pool_default_pg_num }}"
|
||||||
|
pgp_num: "{{ osd_pool_default_pg_num }}"
|
||||||
|
rule_name: "HDD"
|
||||||
|
type: 1
|
||||||
|
erasure_profile: ""
|
||||||
|
expected_num_objects: ""
|
||||||
|
size: 1
|
||||||
|
openstack_pools:
|
||||||
|
- "{{ openstack_glance_pool }}"
|
||||||
|
- "{{ openstack_cinder_pool }}"
|
||||||
|
handler_health_mon_check_delay: 10
|
||||||
|
handler_health_osd_check_delay: 10
|
|
@ -0,0 +1,22 @@
|
||||||
|
---
|
||||||
|
user_config: True
|
||||||
|
copy_admin_key: True
|
||||||
|
test:
|
||||||
|
name: "test"
|
||||||
|
pg_num: "{{ osd_pool_default_pg_num }}"
|
||||||
|
pgp_num: "{{ osd_pool_default_pg_num }}"
|
||||||
|
rule_name: "HDD"
|
||||||
|
type: 1
|
||||||
|
erasure_profile: ""
|
||||||
|
expected_num_objects: ""
|
||||||
|
test2:
|
||||||
|
name: "test2"
|
||||||
|
pg_num: "{{ osd_pool_default_pg_num }}"
|
||||||
|
pgp_num: "{{ osd_pool_default_pg_num }}"
|
||||||
|
rule_name: "HDD"
|
||||||
|
type: 1
|
||||||
|
erasure_profile: ""
|
||||||
|
expected_num_objects: ""
|
||||||
|
pools:
|
||||||
|
- "{{ test }}"
|
||||||
|
- "{{ test2 }}"
|
|
@ -0,0 +1,3 @@
|
||||||
|
---
|
||||||
|
gateway_ip_list: "{{ ansible_all_ipv4_addresses | ipaddr(public_network) | first }}"
|
||||||
|
generate_crt: True
|
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
create_crush_tree: False
|
||||||
|
crush_rule_config: False
|
||||||
|
crush_rule_hdd:
|
||||||
|
name: HDD
|
||||||
|
root: default
|
||||||
|
type: host
|
||||||
|
class: hdd
|
||||||
|
default: true
|
||||||
|
crush_rules:
|
||||||
|
- "{{ crush_rule_hdd }}"
|
|
@ -0,0 +1,7 @@
|
||||||
|
---
|
||||||
|
osd_objectstore: "bluestore"
|
||||||
|
osd_scenario: lvm
|
||||||
|
devices:
|
||||||
|
- /dev/sda
|
||||||
|
- /dev/sdb
|
||||||
|
- /dev/sdc
|
|
@ -0,0 +1,7 @@
|
||||||
|
---
|
||||||
|
copy_admin_key: True
|
||||||
|
rgw_create_pools:
|
||||||
|
foo:
|
||||||
|
pg_num: 16
|
||||||
|
bar:
|
||||||
|
pg_num: 16
|
|
@ -0,0 +1,29 @@
|
||||||
|
[mons]
|
||||||
|
mon0
|
||||||
|
|
||||||
|
[osds]
|
||||||
|
osd0
|
||||||
|
|
||||||
|
[mgrs]
|
||||||
|
mon0
|
||||||
|
|
||||||
|
[mdss]
|
||||||
|
osd0
|
||||||
|
|
||||||
|
[rgws]
|
||||||
|
osd0
|
||||||
|
|
||||||
|
[nfss]
|
||||||
|
nfs0
|
||||||
|
|
||||||
|
[rbdmirrors]
|
||||||
|
rbd-mirror0
|
||||||
|
|
||||||
|
[iscsigws]
|
||||||
|
iscsi-gw0
|
||||||
|
|
||||||
|
[all:vars]
|
||||||
|
nfs_ganesha_stable=True
|
||||||
|
nfs_ganesha_dev=False
|
||||||
|
nfs_ganesha_stable_branch="V2.7-stable"
|
||||||
|
nfs_ganesha_flavor="ceph_master"
|
|
@ -0,0 +1,32 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
# DEPLOY CONTAINERIZED DAEMONS
|
||||||
|
docker: True
|
||||||
|
|
||||||
|
# DEFINE THE NUMBER OF VMS TO RUN
|
||||||
|
mon_vms: 1
|
||||||
|
osd_vms: 1
|
||||||
|
mds_vms: 0
|
||||||
|
rgw_vms: 1
|
||||||
|
nfs_vms: 1
|
||||||
|
rbd_mirror_vms: 1
|
||||||
|
client_vms: 0
|
||||||
|
iscsi_gw_vms: 1
|
||||||
|
mgr_vms: 0
|
||||||
|
|
||||||
|
# SUBNETS TO USE FOR THE VMS
|
||||||
|
public_subnet: 192.168.58
|
||||||
|
cluster_subnet: 192.168.59
|
||||||
|
|
||||||
|
# MEMORY
|
||||||
|
# set 1024 for CentOS
|
||||||
|
memory: 1024
|
||||||
|
|
||||||
|
vagrant_box: centos/7
|
||||||
|
# The sync directory changes based on vagrant box
|
||||||
|
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
|
||||||
|
#vagrant_sync_dir: /home/vagrant/sync
|
||||||
|
vagrant_sync_dir: /vagrant
|
||||||
|
# Disables synced folder creation. Not needed for testing, will skip mounting
|
||||||
|
# the vagrant directory on the remote box regardless of the provider.
|
||||||
|
vagrant_disable_synced_folder: true
|
|
@ -21,13 +21,15 @@ class TestMDSs(object):
|
||||||
|
|
||||||
def test_mds_is_up(self, node, host):
|
def test_mds_is_up(self, node, host):
|
||||||
hostname = node["vars"]["inventory_hostname"]
|
hostname = node["vars"]["inventory_hostname"]
|
||||||
|
container_binary = node['container_binary']
|
||||||
if node['docker']:
|
if node['docker']:
|
||||||
docker_exec_cmd = 'docker exec ceph-mds-{hostname}'.format(hostname=hostname)
|
container_exec_cmd = '{container_binary} exec ceph-mds-{hostname}'.format( # noqa E501
|
||||||
|
hostname=hostname, container_binary=container_binary)
|
||||||
else:
|
else:
|
||||||
docker_exec_cmd = ''
|
container_exec_cmd = ''
|
||||||
|
|
||||||
cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
|
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
|
||||||
docker_exec_cmd=docker_exec_cmd,
|
container_exec_cmd=container_exec_cmd,
|
||||||
cluster=node['cluster_name']
|
cluster=node['cluster_name']
|
||||||
)
|
)
|
||||||
cluster_status = json.loads(host.check_output(cmd))
|
cluster_status = json.loads(host.check_output(cmd))
|
||||||
|
|
|
@ -22,12 +22,13 @@ class TestMGRs(object):
|
||||||
def test_mgr_is_up(self, node, host):
|
def test_mgr_is_up(self, node, host):
|
||||||
hostname=node["vars"]["inventory_hostname"]
|
hostname=node["vars"]["inventory_hostname"]
|
||||||
cluster=node["cluster_name"]
|
cluster=node["cluster_name"]
|
||||||
|
container_binary=node['container_binary']
|
||||||
if node['docker']:
|
if node['docker']:
|
||||||
docker_exec_cmd = 'docker exec ceph-mgr-{hostname}'.format(hostname=hostname)
|
container_exec_cmd = '{container_binary} exec ceph-mgr-{hostname}'.format(container_binary=container_binary, hostname=hostname)
|
||||||
else:
|
else:
|
||||||
docker_exec_cmd = ''
|
container_exec_cmd = ''
|
||||||
cmd = "sudo {docker_exec_cmd} ceph --name mgr.{hostname} --keyring /var/lib/ceph/mgr/{cluster}-{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
|
cmd = "sudo {container_exec_cmd} ceph --name mgr.{hostname} --keyring /var/lib/ceph/mgr/{cluster}-{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
|
||||||
docker_exec_cmd=docker_exec_cmd,
|
container_exec_cmd=container_exec_cmd,
|
||||||
hostname=node["vars"]["inventory_hostname"],
|
hostname=node["vars"]["inventory_hostname"],
|
||||||
cluster=cluster
|
cluster=cluster
|
||||||
)
|
)
|
||||||
|
|
|
@ -28,10 +28,10 @@ class TestMons(object):
|
||||||
output = host.check_output(cmd)
|
output = host.check_output(cmd)
|
||||||
assert output.strip().startswith("cluster")
|
assert output.strip().startswith("cluster")
|
||||||
|
|
||||||
def test_ceph_config_has_inital_members_line(self, node, File):
|
def test_ceph_config_has_inital_members_line(self, node, host):
|
||||||
assert File(node["conf_path"]).contains("^mon initial members = .*$")
|
assert host.file(node["conf_path"]).contains("^mon initial members = .*$")
|
||||||
|
|
||||||
def test_initial_members_line_has_correct_value(self, node, host, File):
|
def test_initial_members_line_has_correct_value(self, node, host):
|
||||||
mon_initial_members_line = host.check_output("grep 'mon initial members = ' /etc/ceph/{cluster}.conf".format(cluster=node['cluster_name']))
|
mon_initial_members_line = host.check_output("grep 'mon initial members = ' /etc/ceph/{cluster}.conf".format(cluster=node['cluster_name']))
|
||||||
result = True
|
result = True
|
||||||
for host in node["vars"]["groups"]["mons"]:
|
for host in node["vars"]["groups"]["mons"]:
|
||||||
|
|
|
@ -26,12 +26,13 @@ class TestNFSs(object):
|
||||||
def test_nfs_is_up(self, node, host):
|
def test_nfs_is_up(self, node, host):
|
||||||
hostname = node["vars"]["inventory_hostname"]
|
hostname = node["vars"]["inventory_hostname"]
|
||||||
cluster = node['cluster_name']
|
cluster = node['cluster_name']
|
||||||
|
container_binary = node['container_binary']
|
||||||
if node['docker']:
|
if node['docker']:
|
||||||
docker_exec_cmd = 'docker exec ceph-nfs-{hostname}'.format(hostname=hostname)
|
container_exec_cmd = '{container_binary} exec ceph-nfs-{hostname}'.format(container_binary=container_binary, hostname=hostname)
|
||||||
else:
|
else:
|
||||||
docker_exec_cmd = ''
|
container_exec_cmd = ''
|
||||||
cmd = "sudo {docker_exec_cmd} ceph --name client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
|
cmd = "sudo {container_exec_cmd} ceph --name client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
|
||||||
docker_exec_cmd=docker_exec_cmd,
|
container_exec_cmd=container_exec_cmd,
|
||||||
hostname=hostname,
|
hostname=hostname,
|
||||||
cluster=cluster
|
cluster=cluster
|
||||||
)
|
)
|
||||||
|
|
|
@ -12,9 +12,9 @@ class TestOSDs(object):
|
||||||
osds = cmd.stdout.rstrip("\n").split("\n")
|
osds = cmd.stdout.rstrip("\n").split("\n")
|
||||||
return osds
|
return osds
|
||||||
|
|
||||||
def _get_docker_exec_cmd(self, host):
|
def _get_docker_exec_cmd(self, node, host):
|
||||||
osd_id = host.check_output(
|
osd_id = host.check_output(
|
||||||
"docker ps -q --filter='name=ceph-osd' | head -1")
|
"{container_binary} ps -q --filter='name=ceph-osd' | head -1".format(container_binary=node['container_binary']))
|
||||||
return osd_id
|
return osd_id
|
||||||
|
|
||||||
|
|
||||||
|
@ -86,8 +86,10 @@ class TestOSDs(object):
|
||||||
|
|
||||||
@pytest.mark.docker
|
@pytest.mark.docker
|
||||||
def test_all_docker_osds_are_up_and_in(self, node, host):
|
def test_all_docker_osds_are_up_and_in(self, node, host):
|
||||||
cmd = "sudo docker exec {osd_id} ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(
|
container_binary= node['container_binary']
|
||||||
osd_id=self._get_docker_exec_cmd(host),
|
cmd = "sudo {container_binary} exec {osd_id} ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(
|
||||||
|
container_binary=container_binary,
|
||||||
|
osd_id=self._get_docker_exec_cmd(node, host),
|
||||||
cluster=node["cluster_name"]
|
cluster=node["cluster_name"]
|
||||||
)
|
)
|
||||||
output = json.loads(host.check_output(cmd))
|
output = json.loads(host.check_output(cmd))
|
||||||
|
|
|
@ -30,15 +30,16 @@ class TestRbdMirrors(object):
|
||||||
def test_rbd_mirror_is_up(self, node, host):
|
def test_rbd_mirror_is_up(self, node, host):
|
||||||
hostname=node["vars"]["inventory_hostname"]
|
hostname=node["vars"]["inventory_hostname"]
|
||||||
cluster=node["cluster_name"]
|
cluster=node["cluster_name"]
|
||||||
|
container_binary = node["container_binary"]
|
||||||
daemons = []
|
daemons = []
|
||||||
if node['docker']:
|
if node['docker']:
|
||||||
docker_exec_cmd = 'docker exec ceph-rbd-mirror-{hostname}'.format(hostname=hostname)
|
container_exec_cmd = '{container_binary} exec ceph-rbd-mirror-{hostname}'.format(container_binary=container_binary, hostname=hostname)
|
||||||
else:
|
else:
|
||||||
docker_exec_cmd = ''
|
container_exec_cmd = ''
|
||||||
hostname = node["vars"]["inventory_hostname"]
|
hostname = node["vars"]["inventory_hostname"]
|
||||||
cluster = node['cluster_name']
|
cluster = node['cluster_name']
|
||||||
cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-rbd --keyring /var/lib/ceph/bootstrap-rbd/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
|
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rbd --keyring /var/lib/ceph/bootstrap-rbd/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
|
||||||
docker_exec_cmd=docker_exec_cmd,
|
container_exec_cmd=container_exec_cmd,
|
||||||
hostname=hostname,
|
hostname=hostname,
|
||||||
cluster=cluster
|
cluster=cluster
|
||||||
)
|
)
|
||||||
|
|
|
@ -25,12 +25,13 @@ class TestRGWs(object):
|
||||||
def test_rgw_is_up(self, node, host):
|
def test_rgw_is_up(self, node, host):
|
||||||
hostname=node["vars"]["inventory_hostname"]
|
hostname=node["vars"]["inventory_hostname"]
|
||||||
cluster=node["cluster_name"]
|
cluster=node["cluster_name"]
|
||||||
|
container_binary=node['container_binary']
|
||||||
if node['docker']:
|
if node['docker']:
|
||||||
docker_exec_cmd = 'docker exec ceph-rgw-{hostname}'.format(hostname=hostname)
|
container_exec_cmd = '{container_binary} exec ceph-rgw-{hostname}'.format(container_binary=container_binary, hostname=hostname)
|
||||||
else:
|
else:
|
||||||
docker_exec_cmd = ''
|
container_exec_cmd = ''
|
||||||
cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
|
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
|
||||||
docker_exec_cmd=docker_exec_cmd,
|
container_exec_cmd=container_exec_cmd,
|
||||||
hostname=hostname,
|
hostname=hostname,
|
||||||
cluster=cluster
|
cluster=cluster
|
||||||
)
|
)
|
||||||
|
|
|
@ -37,7 +37,9 @@ class TestRGWs(object):
|
||||||
def test_docker_rgw_tuning_pools_are_set(self, node, host):
|
def test_docker_rgw_tuning_pools_are_set(self, node, host):
|
||||||
hostname = node["vars"]["inventory_hostname"]
|
hostname = node["vars"]["inventory_hostname"]
|
||||||
cluster = node['cluster_name']
|
cluster = node['cluster_name']
|
||||||
cmd = "sudo docker exec ceph-rgw-{hostname} ceph --cluster={cluster} -n client.rgw.{hostname} --connect-timeout 5 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring osd dump".format(
|
container_binary = node['container_binary']
|
||||||
|
cmd = "sudo {container_binary} exec ceph-rgw-{hostname} ceph --cluster={cluster} -n client.rgw.{hostname} --connect-timeout 5 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring osd dump".format(
|
||||||
|
container_binary=container_binary,
|
||||||
hostname=hostname,
|
hostname=hostname,
|
||||||
cluster=cluster
|
cluster=cluster
|
||||||
)
|
)
|
||||||
|
|
|
@ -22,8 +22,8 @@ class TestInstall(object):
|
||||||
|
|
||||||
class TestCephConf(object):
|
class TestCephConf(object):
|
||||||
|
|
||||||
def test_ceph_config_has_mon_host_line(self, node, File):
|
def test_ceph_config_has_mon_host_line(self, node, host):
|
||||||
assert File(node["conf_path"]).contains("^mon host = .*$")
|
assert host.file(node["conf_path"]).contains("^mon host = .*$")
|
||||||
|
|
||||||
def test_mon_host_line_has_correct_value(self, node, host):
|
def test_mon_host_line_has_correct_value(self, node, host):
|
||||||
mon_host_line = host.check_output("grep 'mon host = ' /etc/ceph/{cluster}.conf".format(cluster=node['cluster_name']))
|
mon_host_line = host.check_output("grep 'mon host = ' /etc/ceph/{cluster}.conf".format(cluster=node['cluster_name']))
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# These are Python requirements needed to run the functional tests
|
# These are Python requirements needed to run the functional tests
|
||||||
six==1.10.0
|
six==1.10.0
|
||||||
testinfra==1.19.0
|
testinfra==3.4.0
|
||||||
pytest-xdist==1.27.0
|
pytest-xdist==1.27.0
|
||||||
pytest==3.6.1
|
pytest==3.6.1
|
||||||
ansible~=2.6,<2.7
|
ansible~=2.6,<2.7
|
||||||
|
|
|
@ -0,0 +1,68 @@
|
||||||
|
[tox]
|
||||||
|
envlist = centos-container-docker_to_podman
|
||||||
|
|
||||||
|
skipsdist = True
|
||||||
|
|
||||||
|
[testenv]
|
||||||
|
whitelist_externals =
|
||||||
|
vagrant
|
||||||
|
bash
|
||||||
|
pip
|
||||||
|
sleep
|
||||||
|
rm
|
||||||
|
cp
|
||||||
|
passenv=*
|
||||||
|
sitepackages=True
|
||||||
|
setenv=
|
||||||
|
ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
|
||||||
|
ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
|
||||||
|
ANSIBLE_CALLBACK_WHITELIST = profile_tasks
|
||||||
|
ANSIBLE_KEEP_REMOTE_FILES = 1
|
||||||
|
ANSIBLE_CACHE_PLUGIN = memory
|
||||||
|
ANSIBLE_GATHERING = implicit
|
||||||
|
# only available for ansible >= 2.5
|
||||||
|
ANSIBLE_STDOUT_CALLBACK = yaml
|
||||||
|
# Set the vagrant box image to use
|
||||||
|
CEPH_ANSIBLE_VAGRANT_BOX = centos/7
|
||||||
|
|
||||||
|
deps= -r{toxinidir}/tests/requirements.txt
|
||||||
|
changedir= {toxinidir}/tests/functional/docker2podman
|
||||||
|
|
||||||
|
commands=
|
||||||
|
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
|
||||||
|
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
|
||||||
|
|
||||||
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/setup.yml
|
||||||
|
|
||||||
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/site-container.yml.sample --extra-vars "\
|
||||||
|
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
|
||||||
|
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
|
||||||
|
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
|
||||||
|
ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \
|
||||||
|
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
|
||||||
|
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
|
||||||
|
"
|
||||||
|
|
||||||
|
pip uninstall -y ansible
|
||||||
|
pip install ansible==2.10.0a2
|
||||||
|
cp {toxinidir}/infrastructure-playbooks/docker-to-podman.yml {toxinidir}/docker-to-podman.yml
|
||||||
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/docker-to-podman.yml --extra-vars "\
|
||||||
|
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
|
||||||
|
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
|
||||||
|
ansible_python_interpreter=/usr/bin/python2 \
|
||||||
|
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
|
||||||
|
ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \
|
||||||
|
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
|
||||||
|
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
|
||||||
|
"
|
||||||
|
# install podman
|
||||||
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/docker2podman.yml -e ansible_python_interpreter=/usr/bin/python2
|
||||||
|
|
||||||
|
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
||||||
|
# reboot machines
|
||||||
|
ansible-playbook -i {changedir}/hosts {toxinidir}/tests/functional/reboot.yml
|
||||||
|
# wait 120 sec and run tests (there's a chance nodes are still downloading container image after the reboot)
|
||||||
|
sleep 120
|
||||||
|
bash -c 'IS_PODMAN=TRUE py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests'
|
||||||
|
|
||||||
|
vagrant destroy -f
|
|
@ -78,6 +78,6 @@ commands=
|
||||||
'dedicated_devices': [/dev/sdc,/dev/sdc], \
|
'dedicated_devices': [/dev/sdc,/dev/sdc], \
|
||||||
'osd_scenario': 'non-collocated' \}"
|
'osd_scenario': 'non-collocated' \}"
|
||||||
|
|
||||||
bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:luminous} ROLLING_UPDATE=TRUE py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests"
|
bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:luminous} ROLLING_UPDATE=TRUE py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
|
||||||
|
|
||||||
vagrant destroy --force
|
vagrant destroy --force
|
12
tox.ini
12
tox.ini
|
@ -73,7 +73,7 @@ commands=
|
||||||
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
|
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
|
||||||
"
|
"
|
||||||
# test that the cluster can be redeployed in a healthy state
|
# test that the cluster can be redeployed in a healthy state
|
||||||
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
|
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
||||||
|
|
||||||
[purge-lvm]
|
[purge-lvm]
|
||||||
commands=
|
commands=
|
||||||
|
@ -99,7 +99,7 @@ commands=
|
||||||
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
|
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
|
||||||
"
|
"
|
||||||
# test that the cluster can be redeployed in a healthy state
|
# test that the cluster can be redeployed in a healthy state
|
||||||
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
|
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
||||||
|
|
||||||
[shrink-mon]
|
[shrink-mon]
|
||||||
commands=
|
commands=
|
||||||
|
@ -141,7 +141,7 @@ commands=
|
||||||
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
|
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
|
||||||
"
|
"
|
||||||
|
|
||||||
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers {toxinidir}/tests/functional/tests
|
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
||||||
|
|
||||||
[add-osds]
|
[add-osds]
|
||||||
commands=
|
commands=
|
||||||
|
@ -157,7 +157,7 @@ commands=
|
||||||
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
|
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
|
||||||
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
|
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
|
||||||
"
|
"
|
||||||
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests
|
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
||||||
|
|
||||||
[rgw-multisite]
|
[rgw-multisite]
|
||||||
commands=
|
commands=
|
||||||
|
@ -268,14 +268,14 @@ commands=
|
||||||
# wait 30sec for services to be ready
|
# wait 30sec for services to be ready
|
||||||
sleep 30
|
sleep 30
|
||||||
# test cluster state using ceph-ansible tests
|
# test cluster state using ceph-ansible tests
|
||||||
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
|
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
||||||
|
|
||||||
# reboot all vms
|
# reboot all vms
|
||||||
all_daemons: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
|
all_daemons: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
|
||||||
|
|
||||||
# wait 30sec for services to be ready
|
# wait 30sec for services to be ready
|
||||||
# retest to ensure cluster came back up correctly after rebooting
|
# retest to ensure cluster came back up correctly after rebooting
|
||||||
all_daemons: py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
|
all_daemons: py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
|
||||||
|
|
||||||
# handlers/idempotency test
|
# handlers/idempotency test
|
||||||
all_daemons,all_in_one: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-luminous} copy_admin_key={env:COPY_ADMIN_KEY:False} " --extra-vars @ceph-override.json
|
all_daemons,all_in_one: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-luminous} copy_admin_key={env:COPY_ADMIN_KEY:False} " --extra-vars @ceph-override.json
|
||||||
|
|
Loading…
Reference in New Issue