infra: introduce docker to podman playbook

This isn't backported from master because there are too many changes
between stable-3.2 and other newer branches.

NOTE:
This playbook  *doesn't* add podman support in stable-3.2 at all.
This is a tripleO dedicated playbook which is intended to be run
early during FFU workflow in order to prepare the OS upgrade.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1853457

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/5524/head
Guillaume Abrioux 2020-07-06 09:27:38 +02:00 committed by Dimitri Savineau
parent 6daa2c9d22
commit 90f3f61548
64 changed files with 1106 additions and 613 deletions

View File

@ -555,6 +555,7 @@ dummy:
##########
# DOCKER #
##########
#container_binary: docker
#docker_exec_cmd:
#docker: false
#ceph_docker_image: "ceph/daemon"

View File

@ -555,6 +555,7 @@ ceph_rhcs_version: 3
##########
# DOCKER #
##########
#container_binary: docker
#docker_exec_cmd:
#docker: false
ceph_docker_image: "rhceph/rhceph-3-rhel7"

View File

@ -0,0 +1,127 @@
# This playbook *doesn't* add podman support in stable-3.2 at all.
# This is a tripleO dedicated playbook which is intended to be run
# early during FFU workflow in order to prepare the OS upgrade.
- hosts:
- mons
- osds
- mdss
- rgws
- nfss
- rbdmirrors
- clients
- iscsigws
- iscsi-gws # for backward compatibility only!
- mgrs
gather_facts: false
become: True
any_errors_fatal: true
vars:
delegate_facts_host: True
roles:
- ceph-defaults
post_tasks:
- name: gather facts
setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
- name: gather and delegate facts
setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
delegate_to: "{{ item }}"
delegate_facts: True
with_items: "{{ groups['all'] | difference(groups.get(client_group_name | default('clients'), [])) }}"
run_once: true
when: delegate_facts_host | bool
- hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ osd_group_name | default('osds') }}"
- "{{ mds_group_name | default('mdss') }}"
- "{{ rgw_group_name | default('rgws') }}"
- "{{ nfs_group_name | default('nfss') }}"
- "{{ mgr_group_name | default('mgrs') }}"
- "{{ iscsi_gw_group_name | default('iscsigws') }}"
- "{{ rbdmirror_group_name | default('rbdmirrors') }}"
gather_facts: false
become: true
roles:
- ceph-defaults
post_tasks:
- import_role:
name: ceph-facts
- import_role:
name: ceph-handler
- import_role:
name: ceph-docker-common
tasks_from: ceph_docker_version.yml
- name: set_fact docker2podman and container_binary
set_fact:
docker2podman: True
container_binary: podman
- import_role:
name: ceph-mon
tasks_from: docker2podman.yml
when: inventory_hostname in groups.get(mon_group_name, [])
- import_role:
name: ceph-iscsi-gw
tasks_from: docker2podman.yml
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- import_role:
name: ceph-mds
tasks_from: systemd.yml
when: inventory_hostname in groups.get(mds_group_name, [])
- import_role:
name: ceph-mgr
tasks_from: docker2podman.yml
when: inventory_hostname in groups.get(mgr_group_name, [])
- import_role:
name: ceph-nfs
tasks_from: systemd.yml
when: inventory_hostname in groups.get(nfs_group_name, [])
- import_role:
name: ceph-osd
tasks_from: systemd.yml
when: inventory_hostname in groups.get(osd_group_name, [])
- import_role:
name: ceph-rbd-mirror
tasks_from: docker2podman.yml
when: inventory_hostname in groups.get(rbdmirror_group_name, [])
- import_role:
name: ceph-rgw
tasks_from: docker2podman.yml
when: inventory_hostname in groups.get(rgw_group_name, [])
# This is needed, otherwise containers won't come back after the reboot
# because this file is added later by the call of rolling_update playbook.
- name: add /etc/tmpfiles.d/ceph-common.conf
copy:
content: "d /run/ceph 0770 root root -"
dest: /etc/tmpfiles.d/ceph-common.conf
owner: root
group: root
mode: 0644
- name: reload systemd daemon
systemd:
daemon_reload: yes

View File

@ -547,6 +547,7 @@ ceph_tcmalloc_max_total_thread_cache: 0
##########
# DOCKER #
##########
container_binary: docker
docker_exec_cmd:
docker: false
ceph_docker_image: "ceph/daemon"

View File

@ -0,0 +1,10 @@
---
- name: get docker version
command: docker --version
changed_when: false
check_mode: no
register: ceph_docker_version
- name: set_fact ceph_docker_version ceph_docker_version.stdout.split
set_fact:
ceph_docker_version: "{{ ceph_docker_version.stdout.split(' ')[2] }}"

View File

@ -23,15 +23,8 @@
when:
- mon_use_fqdn
- name: get docker version
command: docker --version
changed_when: false
check_mode: no
register: ceph_docker_version
- name: set_fact ceph_docker_version ceph_docker_version.stdout.split
set_fact:
ceph_docker_version: "{{ ceph_docker_version.stdout.split(' ')[2] }}"
- name: include ceph_docker_version.yml
include_tasks: ceph_docker_version.yml
# Only include 'checks.yml' when :
# we are deploying containers without kv AND host is either a mon OR a nfs OR an osd,

View File

@ -1,24 +1,28 @@
---
- name: update apt cache
- name: handlers
when:
- not docker2podman | default(False) | bool
block:
- name: update apt cache
apt:
update-cache: yes
when:
- ansible_os_family == 'Debian'
- name: unset noup flag
- name: unset noup flag
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd unset noup"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: False
# We only want to restart on hosts that have called the handler.
# This var is set when he handler is called, and unset after the
# restart to ensure only the correct hosts are restarted.
- name: set _mon_handler_called before restart
# We only want to restart on hosts that have called the handler.
# This var is set when he handler is called, and unset after the
# restart to ensure only the correct hosts are restarted.
- name: set _mon_handler_called before restart
set_fact:
_mon_handler_called: True
listen: "restart ceph mons"
- name: copy mon restart script
- name: copy mon restart script
template:
src: restart_mon_daemon.sh.j2
dest: /tmp/restart_mon_daemon.sh
@ -29,7 +33,7 @@
when:
- mon_group_name in group_names
- name: restart ceph mon daemon(s) - non container
- name: restart ceph mon daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_mon_daemon.sh
listen: "restart ceph mons"
when:
@ -42,7 +46,7 @@
delegate_to: "{{ item }}"
run_once: True
- name: restart ceph mon daemon(s) - container
- name: restart ceph mon daemon(s) - container
command: /usr/bin/env bash /tmp/restart_mon_daemon.sh
listen: "restart ceph mons"
when:
@ -56,23 +60,23 @@
delegate_to: "{{ item }}"
run_once: True
- name: set _mon_handler_called after restart
- name: set _mon_handler_called after restart
set_fact:
_mon_handler_called: False
listen: "restart ceph mons"
- name: set _osd_handler_called before restart
- name: set _osd_handler_called before restart
set_fact:
_osd_handler_called: True
listen: "restart ceph osds"
# This does not just restart OSDs but everything else too. Unfortunately
# at this time the ansible role does not have an OSD id list to use
# for restarting them specifically.
# This does not need to run during a rolling update as the playbook will
# restart all OSDs using the tasks "start ceph osd" or
# "restart containerized ceph osd"
- name: copy osd restart script
# This does not just restart OSDs but everything else too. Unfortunately
# at this time the ansible role does not have an OSD id list to use
# for restarting them specifically.
# This does not need to run during a rolling update as the playbook will
# restart all OSDs using the tasks "start ceph osd" or
# "restart containerized ceph osd"
- name: copy osd restart script
template:
src: restart_osd_daemon.sh.j2
dest: /tmp/restart_osd_daemon.sh
@ -84,7 +88,7 @@
- osd_group_name in group_names
- not rolling_update
- name: restart ceph osds daemon(s) - non container
- name: restart ceph osds daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_osd_daemon.sh
listen: "restart ceph osds"
when:
@ -101,7 +105,7 @@
delegate_to: "{{ item }}"
run_once: True
- name: restart ceph osds daemon(s) - container
- name: restart ceph osds daemon(s) - container
command: /usr/bin/env bash /tmp/restart_osd_daemon.sh
listen: "restart ceph osds"
when:
@ -118,17 +122,17 @@
delegate_to: "{{ item }}"
run_once: True
- name: set _osd_handler_called after restart
- name: set _osd_handler_called after restart
set_fact:
_osd_handler_called: False
listen: "restart ceph osds"
- name: set _mds_handler_called before restart
- name: set _mds_handler_called before restart
set_fact:
_mds_handler_called: True
listen: "restart ceph mdss"
- name: copy mds restart script
- name: copy mds restart script
template:
src: restart_mds_daemon.sh.j2
dest: /tmp/restart_mds_daemon.sh
@ -139,7 +143,7 @@
when:
- mds_group_name in group_names
- name: restart ceph mds daemon(s) - non container
- name: restart ceph mds daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_mds_daemon.sh
listen: "restart ceph mdss"
when:
@ -152,7 +156,7 @@
delegate_to: "{{ item }}"
run_once: True
- name: restart ceph mds daemon(s) - container
- name: restart ceph mds daemon(s) - container
command: /usr/bin/env bash /tmp/restart_mds_daemon.sh
listen: "restart ceph mdss"
when:
@ -166,17 +170,17 @@
delegate_to: "{{ item }}"
run_once: True
- name: set _mds_handler_called after restart
- name: set _mds_handler_called after restart
set_fact:
_mds_handler_called: False
listen: "restart ceph mdss"
- name: set _rgw_handler_called before restart
- name: set _rgw_handler_called before restart
set_fact:
_rgw_handler_called: True
listen: "restart ceph rgws"
- name: copy rgw restart script
- name: copy rgw restart script
template:
src: restart_rgw_daemon.sh.j2
dest: /tmp/restart_rgw_daemon.sh
@ -187,7 +191,7 @@
when:
- rgw_group_name in group_names
- name: restart ceph rgw daemon(s) - non container
- name: restart ceph rgw daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_rgw_daemon.sh
listen: "restart ceph rgws"
when:
@ -200,7 +204,7 @@
delegate_to: "{{ item }}"
run_once: True
- name: restart ceph rgw daemon(s) - container
- name: restart ceph rgw daemon(s) - container
command: /usr/bin/env bash /tmp/restart_rgw_daemon.sh
listen: "restart ceph rgws"
when:
@ -214,17 +218,17 @@
delegate_to: "{{ item }}"
run_once: True
- name: set _rgw_handler_called after restart
- name: set _rgw_handler_called after restart
set_fact:
_rgw_handler_called: False
listen: "restart ceph rgws"
- name: set _nfs_handler_called before restart
- name: set _nfs_handler_called before restart
set_fact:
_nfs_handler_called: True
listen: "restart ceph nfss"
- name: copy nfs restart script
- name: copy nfs restart script
template:
src: restart_nfs_daemon.sh.j2
dest: /tmp/restart_nfs_daemon.sh
@ -235,7 +239,7 @@
when:
- nfs_group_name in group_names
- name: restart ceph nfs daemon(s) - non container
- name: restart ceph nfs daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_nfs_daemon.sh
listen: "restart ceph nfss"
when:
@ -248,7 +252,7 @@
delegate_to: "{{ item }}"
run_once: True
- name: restart ceph nfs daemon(s) - container
- name: restart ceph nfs daemon(s) - container
command: /usr/bin/env bash /tmp/restart_nfs_daemon.sh
listen: "restart ceph nfss"
when:
@ -262,17 +266,17 @@
delegate_to: "{{ item }}"
run_once: True
- name: set _nfs_handler_called after restart
- name: set _nfs_handler_called after restart
set_fact:
_nfs_handler_called: False
listen: "restart ceph nfss"
- name: set _rbdmirror_handler_called before restart
- name: set _rbdmirror_handler_called before restart
set_fact:
_rbdmirror_handler_called: True
listen: "restart ceph rbdmirrors"
- name: copy rbd mirror restart script
- name: copy rbd mirror restart script
template:
src: restart_rbd_mirror_daemon.sh.j2
dest: /tmp/restart_rbd_mirror_daemon.sh
@ -283,7 +287,7 @@
when:
- rbdmirror_group_name in group_names
- name: restart ceph rbd mirror daemon(s) - non container
- name: restart ceph rbd mirror daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_rbd_mirror_daemon.sh
listen: "restart ceph rbdmirrors"
when:
@ -296,7 +300,7 @@
delegate_to: "{{ item }}"
run_once: True
- name: restart ceph rbd mirror daemon(s) - container
- name: restart ceph rbd mirror daemon(s) - container
command: /usr/bin/env bash /tmp/restart_rbd_mirror_daemon.sh
listen: "restart ceph rbdmirrors"
when:
@ -310,17 +314,17 @@
delegate_to: "{{ item }}"
run_once: True
- name: set _rbdmirror_handler_called after restart
- name: set _rbdmirror_handler_called after restart
set_fact:
_rbdmirror_handler_called: False
listen: "restart ceph rbdmirrors"
- name: set _mgr_handler_called before restart
- name: set _mgr_handler_called before restart
set_fact:
_mgr_handler_called: True
listen: "restart ceph mgrs"
- name: copy mgr restart script
- name: copy mgr restart script
template:
src: restart_mgr_daemon.sh.j2
dest: /tmp/restart_mgr_daemon.sh
@ -331,7 +335,7 @@
when:
- mgr_group_name in group_names
- name: restart ceph mgr daemon(s) - non container
- name: restart ceph mgr daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_mgr_daemon.sh
listen: "restart ceph mgrs"
when:
@ -344,7 +348,7 @@
delegate_to: "{{ item }}"
run_once: True
- name: restart ceph mgr daemon(s) - container
- name: restart ceph mgr daemon(s) - container
command: /usr/bin/env bash /tmp/restart_mgr_daemon.sh
listen: "restart ceph mgrs"
when:
@ -358,17 +362,17 @@
delegate_to: "{{ item }}"
run_once: True
- name: set _mgr_handler_called after restart
- name: set _mgr_handler_called after restart
set_fact:
_mgr_handler_called: False
listen: "restart ceph mgrs"
- name: set _tcmu_runner_handler_called before restart
- name: set _tcmu_runner_handler_called before restart
set_fact:
_tcmu_runner_handler_called: True
listen: "restart ceph tcmu-runner"
- name: copy tcmu-runner restart script
- name: copy tcmu-runner restart script
template:
src: restart_tcmu_runner.sh.j2
dest: /tmp/restart_tcmu_runner.sh
@ -379,7 +383,7 @@
when:
- iscsi_gw_group_name in group_names
- name: restart tcmu-runner
- name: restart tcmu-runner
command: /usr/bin/env bash /tmp/restart_tcmu_runner.sh
listen: "restart ceph tcmu-runner"
when:
@ -391,17 +395,17 @@
delegate_to: "{{ item }}"
run_once: True
- name: set _tcmu_runner_handler_called after restart
- name: set _tcmu_runner_handler_called after restart
set_fact:
_tcmu_runner_handler_called: False
listen: "restart ceph tcmu-runner"
- name: set _rbd_target_gw_handler_called before restart
- name: set _rbd_target_gw_handler_called before restart
set_fact:
_rbd_target_gw_handler_called: True
listen: "restart ceph rbd-target-gw"
- name: copy rbd-target-gw restart script
- name: copy rbd-target-gw restart script
template:
src: restart_rbd_target_gw.sh.j2
dest: /tmp/restart_rbd_target_gw.sh
@ -412,7 +416,7 @@
when:
- iscsi_gw_group_name in group_names
- name: restart rbd-target-gw
- name: restart rbd-target-gw
command: /usr/bin/env bash /tmp/restart_rbd_target_gw.sh
listen: "restart ceph rbd-target-gw"
when:
@ -424,17 +428,17 @@
delegate_to: "{{ item }}"
run_once: True
- name: set _rbd_target_gw_handler_called after restart
- name: set _rbd_target_gw_handler_called after restart
set_fact:
_rbd_target_gw_handler_called: False
listen: "restart ceph rbd-target-gw"
- name: set _rbd_target_api_handler_called before restart
- name: set _rbd_target_api_handler_called before restart
set_fact:
_rbd_target_api_handler_called: True
listen: "restart ceph rbd-target-api"
- name: copy rbd-target-api restart script
- name: copy rbd-target-api restart script
template:
src: restart_rbd_target_api.sh.j2
dest: /tmp/restart_rbd_target_api.sh
@ -445,7 +449,7 @@
when:
- iscsi_gw_group_name in group_names
- name: restart rbd-target-api
- name: restart rbd-target-api
command: /usr/bin/env bash /tmp/restart_rbd_target_api.sh
listen: "restart ceph rbd-target-api"
when:
@ -457,7 +461,7 @@
delegate_to: "{{ item }}"
run_once: True
- name: set _rbd_target_api_handler_called after restart
- name: set _rbd_target_api_handler_called after restart
set_fact:
_rbd_target_api_handler_called: False
listen: "restart ceph rbd-target-api"

View File

@ -1,18 +1,6 @@
---
- name: generate systemd unit files for tcmu-runner, rbd-target-api and rbd-target-gw
become: true
template:
src: "{{ role_path }}/templates/{{ item }}.service.j2"
dest: /etc/systemd/system/{{ item }}.service
owner: "root"
group: "root"
mode: "0644"
with_items:
- tcmu-runner
- rbd-target-gw
- rbd-target-api
notify:
- restart ceph {{ item }}
- name: include systemd.yml
include_tasks: systemd.yml
- name: systemd start tcmu-runner, rbd-target-api and rbd-target-gw containers
systemd:

View File

@ -0,0 +1,15 @@
---
- name: generate systemd unit files for tcmu-runner, rbd-target-api and rbd-target-gw
become: true
template:
src: "{{ role_path }}/templates/{{ item }}.service.j2"
dest: /etc/systemd/system/{{ item }}.service
owner: "root"
group: "root"
mode: "0644"
with_items:
- tcmu-runner
- rbd-target-gw
- rbd-target-api
notify:
- restart ceph {{ item }}

View File

@ -0,0 +1 @@
container/systemd.yml

View File

@ -1,15 +1,19 @@
[Unit]
Description=RBD Target API Service
{% if container_binary == 'docker' %}
After=docker.service
Requires=docker.service
{% else %}
After=network.target
{% endif %}
[Service]
EnvironmentFile=-/etc/environment
ExecStartPre=-/usr/bin/docker stop rbd-target-api
ExecStartPre=-/usr/bin/docker rm rbd-target-api
ExecStart=/usr/bin/docker run --rm \
ExecStartPre=-/usr/bin/{{ container_binary }} stop rbd-target-api
ExecStartPre=-/usr/bin/{{ container_binary }} rm rbd-target-api
ExecStart=/usr/bin/{{ container_binary }} run --rm \
--memory={{ ceph_rbd_target_api_docker_memory_limit }} \
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
--cpus={{ ceph_rbd_target_api_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_rbd_target_api_docker_cpu_limit * 100000 }} \
@ -25,7 +29,7 @@ ExecStart=/usr/bin/docker run --rm \
-e CEPH_DAEMON=RBD_TARGET_API \
--name=rbd-target-api \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
ExecStopPost=-/usr/bin/docker stop rbd-target-api
ExecStopPost=-/usr/bin/{{ container_binary }} stop rbd-target-api
Restart=always
RestartSec=10s
TimeoutStartSec=120

View File

@ -1,15 +1,19 @@
[Unit]
Description=RBD Target Gateway Service
{% if container_binary == 'docker' %}
After=docker.service
Requires=docker.service
{% else %}
After=network.target
{% endif %}
[Service]
EnvironmentFile=-/etc/environment
ExecStartPre=-/usr/bin/docker stop rbd-target-gw
ExecStartPre=-/usr/bin/docker rm rbd-target-gw
ExecStart=/usr/bin/docker run --rm \
ExecStartPre=-/usr/bin/{{ container_binary }} stop rbd-target-gw
ExecStartPre=-/usr/bin/{{ container_binary }} rm rbd-target-gw
ExecStart=/usr/bin/{{ container_binary }} run --rm \
--memory={{ ceph_rbd_target_gw_docker_memory_limit }} \
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
--cpus={{ ceph_rbd_target_gw_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_rbd_target_gw_docker_cpu_limit * 100000 }} \
@ -25,7 +29,8 @@ ExecStart=/usr/bin/docker run --rm \
-e CEPH_DAEMON=RBD_TARGET_GW \
--name=rbd-target-gw \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
ExecStopPost=-/usr/bin/docker stop rbd-target-gw
ExecStopPost=-/usr/bin/{{ container_binary }} stop rbd-target-gw
KillMode=none
Restart=always
RestartSec=10s
TimeoutStartSec=120

View File

@ -1,15 +1,19 @@
[Unit]
Description=TCMU Runner
{% if container_binary == 'docker' %}
After=docker.service
Requires=docker.service
{% else %}
After=network.target
{% endif %}
[Service]
EnvironmentFile=-/etc/environment
ExecStartPre=-/usr/bin/docker stop tcmu-runner
ExecStartPre=-/usr/bin/docker rm tcmu-runner
ExecStart=/usr/bin/docker run --rm \
ExecStartPre=-/usr/bin/{{ container_binary }} stop tcmu-runner
ExecStartPre=-/usr/bin/{{ container_binary }} rm tcmu-runner
ExecStart=/usr/bin/{{ container_binary }} run --rm \
--memory={{ ceph_tcmu_runner_docker_memory_limit }} \
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
--cpus={{ ceph_tcmu_runner_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_tcmu_runner_docker_cpu_limit * 100000 }} \
@ -25,7 +29,7 @@ ExecStart=/usr/bin/docker run --rm \
-e CEPH_DAEMON=TCMU_RUNNER \
--name=tcmu-runner \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
ExecStopPost=-/usr/bin/docker stop tcmu-runner
ExecStopPost=-/usr/bin/{{ container_binary }} stop tcmu-runner
Restart=always
RestartSec=10s
TimeoutStartSec=120

View File

@ -45,16 +45,8 @@
- "{{ statconfig.results }}"
when: item.1.stat.exists == true
- name: generate systemd unit file
become: true
template:
src: "{{ role_path }}/templates/ceph-mds.service.j2"
dest: /etc/systemd/system/ceph-mds@.service
owner: "root"
group: "root"
mode: "0644"
notify:
- restart ceph mdss
- name: include systemd.yml
include_tasks: systemd.yml
- name: systemd start mds container
systemd:

View File

@ -0,0 +1,11 @@
---
- name: generate systemd unit file
become: true
template:
src: "{{ role_path }}/templates/ceph-mds.service.j2"
dest: /etc/systemd/system/ceph-mds@.service
owner: "root"
group: "root"
mode: "0644"
notify:
- restart ceph mdss

View File

@ -1,16 +1,20 @@
[Unit]
Description=Ceph MDS
{% if container_binary == 'docker' %}
After=docker.service
Requires=docker.service
{% else %}
After=network.target
{% endif %}
{% set cpu_limit = ansible_processor_vcpus|int if ceph_mds_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_mds_docker_cpu_limit|int %}
[Service]
EnvironmentFile=-/etc/environment
ExecStartPre=-/usr/bin/docker stop ceph-mds-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/docker rm ceph-mds-{{ ansible_hostname }}
ExecStart=/usr/bin/docker run --rm --net=host \
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mds-{{ ansible_hostname }}
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
--memory={{ ceph_mds_docker_memory_limit }} \
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
--cpus={{ cpu_limit }} \
{% else -%}
--cpu-quota={{ cpu_limit * 100000 }} \
@ -30,7 +34,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \
{{ ceph_mds_docker_extra_env }} \
--name=ceph-mds-{{ ansible_hostname }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
ExecStopPost=-/usr/bin/docker stop ceph-mds-{{ ansible_hostname }}
ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}
Restart=always
RestartSec=10s
TimeoutStartSec=120

View File

@ -1,14 +1,6 @@
---
- name: generate systemd unit file
become: true
template:
src: "{{ role_path }}/templates/ceph-mgr.service.j2"
dest: /etc/systemd/system/ceph-mgr@.service
owner: "root"
group: "root"
mode: "0644"
notify:
- restart ceph mgrs
- name: include systemd.yml
include_tasks: systemd.yml
- name: systemd start mgr container
systemd:

View File

@ -0,0 +1,11 @@
---
- name: generate systemd unit file
become: true
template:
src: "{{ role_path }}/templates/ceph-mgr.service.j2"
dest: /etc/systemd/system/ceph-mgr@.service
owner: "root"
group: "root"
mode: "0644"
notify:
- restart ceph mgrs

View File

@ -0,0 +1 @@
docker/systemd.yml

View File

@ -1,15 +1,19 @@
[Unit]
Description=Ceph Manager
{% if container_binary == 'docker' %}
After=docker.service
Requires=docker.service
{% else %}
After=network.target
{% endif %}
[Service]
EnvironmentFile=-/etc/environment
ExecStartPre=-/usr/bin/docker stop ceph-mgr-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/docker rm ceph-mgr-{{ ansible_hostname }}
ExecStart=/usr/bin/docker run --rm --net=host \
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mgr-{{ ansible_hostname }}
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
--memory={{ ceph_mgr_docker_memory_limit }} \
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
--cpus={{ ceph_mgr_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_mgr_docker_cpu_limit * 100000 }} \
@ -29,7 +33,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \
{{ ceph_mgr_docker_extra_env }} \
--name=ceph-mgr-{{ ansible_hostname }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
ExecStopPost=-/usr/bin/docker stop ceph-mgr-{{ ansible_hostname }}
ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_hostname }}
Restart=always
RestartSec=10s
TimeoutStartSec=120

View File

@ -40,16 +40,8 @@
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
when: containerized_deployment_with_kv
- name: generate systemd unit file
become: true
template:
src: "{{ role_path }}/templates/ceph-mon.service.j2"
dest: /etc/systemd/system/ceph-mon@.service
owner: "root"
group: "root"
mode: "0644"
notify:
- restart ceph mons
- name: include systemd.yml
include_tasks: systemd.yml
- name: systemd start mon container
systemd:

View File

@ -0,0 +1,11 @@
---
- name: generate systemd unit file
become: true
template:
src: "{{ role_path }}/templates/ceph-mon.service.j2"
dest: /etc/systemd/system/ceph-mon@.service
owner: "root"
group: "root"
mode: "0644"
notify:
- restart ceph mons

View File

@ -0,0 +1 @@
docker/systemd.yml

View File

@ -1,15 +1,19 @@
[Unit]
Description=Ceph Monitor
{% if container_binary == 'docker' %}
After=docker.service
Requires=docker.service
{% else %}
After=network.target
{% endif %}
[Service]
EnvironmentFile=-/etc/environment
ExecStartPre=-/usr/bin/docker rm ceph-mon-%i
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mon-%i
ExecStartPre=/bin/sh -c '"$(command -v mkdir)" -p /etc/ceph /var/lib/ceph/mon'
ExecStart=/usr/bin/docker run --rm --name ceph-mon-%i \
ExecStart=/usr/bin/{{ container_binary }} run --rm --name ceph-mon-%i \
--memory={{ ceph_mon_docker_memory_limit }} \
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
--cpus={{ ceph_mon_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_mon_docker_cpu_limit * 100000 }} \
@ -64,7 +68,7 @@ ExecStart=/usr/bin/docker run --rm --name ceph-mon-%i \
-e CEPH_DAEMON=MON \
{{ ceph_mon_docker_extra_env }} \
{{ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
ExecStop=-/usr/bin/docker stop ceph-mon-%i
ExecStop=-/usr/bin/{{ container_binary }} stop ceph-mon-%i
ExecStopPost=-/bin/rm -f /var/run/ceph/{{ cluster }}-mon.{{ monitor_name }}.asok
Restart=always
RestartSec=10s

View File

@ -62,18 +62,8 @@
when:
- ceph_nfs_dynamic_exports
- name: generate systemd unit file
become: true
template:
src: "{{ role_path }}/templates/ceph-nfs.service.j2"
dest: /etc/systemd/system/ceph-nfs@.service
owner: "root"
group: "root"
mode: "0644"
when:
- containerized_deployment
notify:
- restart ceph nfss
- name: include systemd.yml
include_tasks: systemd.yml
- name: systemd start nfs container
systemd:

View File

@ -0,0 +1,13 @@
---
- name: generate systemd unit file
become: true
template:
src: "{{ role_path }}/templates/ceph-nfs.service.j2"
dest: /etc/systemd/system/ceph-nfs@.service
owner: "root"
group: "root"
mode: "0644"
when:
- containerized_deployment
notify:
- restart ceph nfss

View File

@ -1,14 +1,18 @@
[Unit]
Description=NFS-Ganesha file server
Documentation=http://github.com/nfs-ganesha/nfs-ganesha/wiki
{% if container_binary == 'docker' %}
After=docker.service
Requires=docker.service
{% else %}
After=network.target
{% endif %}
[Service]
EnvironmentFile=-/etc/environment
ExecStartPre=-/usr/bin/docker rm ceph-nfs-%i
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-nfs-%i
ExecStartPre=/usr/bin/mkdir -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha
ExecStart=/usr/bin/docker run --rm --net=host \
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
{% if not containerized_deployment_with_kv -%}
-v /var/lib/ceph:/var/lib/ceph:z \
-v /etc/ceph:/etc/ceph:z \
@ -30,7 +34,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \
{{ ceph_nfs_docker_extra_env }} \
--name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_hostname) }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
ExecStopPost=-/usr/bin/docker stop ceph-nfs-%i
ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-nfs-%i
Restart=always
RestartSec=10s
TimeoutStartSec=120

View File

@ -58,16 +58,8 @@
- devices is defined
- devices | length > activated_osds.stdout_lines | length
- name: generate ceph osd docker run script
become: true
template:
src: "{{ role_path }}/templates/ceph-osd-run.sh.j2"
dest: "{{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh"
owner: "root"
group: "root"
mode: "0744"
notify:
- restart ceph osds
- name: include systemd.yml
include_tasks: systemd.yml
when:
- containerized_deployment

View File

@ -0,0 +1,11 @@
---
- name: generate ceph osd docker run script
become: true
template:
src: "{{ role_path }}/templates/ceph-osd-run.sh.j2"
dest: "{{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh"
owner: "root"
group: "root"
mode: "0744"
notify:
- restart ceph osds

View File

@ -14,7 +14,7 @@ DOCKER_ENV=""
#############
function id_to_device () {
{% if dmcrypt | bool %}
docker run --rm --net=host --ulimit nofile=1024:4096 --ipc=host --pid=host --privileged=true -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /dev:/dev -v /etc/localtime:/etc/localtime:ro -e DEBUG=verbose -e CLUSTER={{ cluster }} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} osd_ceph_disk_dmcrypt_data_map
{{ container_binary }} run --rm --net=host --ulimit nofile=1024:4096 --ipc=host --pid=host --privileged=true -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /dev:/dev -v /etc/localtime:/etc/localtime:ro -e DEBUG=verbose -e CLUSTER={{ cluster }} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} osd_ceph_disk_dmcrypt_data_map
{% endif %}
DATA_PART=$(docker run --rm --ulimit nofile=1024:4096 --privileged=true -v /dev/:/dev/ -v /etc/ceph:/etc/ceph:z --entrypoint ceph-disk {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} list | grep ", osd\.${1}," | awk '{ print $1 }')
if [[ "${DATA_PART}" =~ ^/dev/(cciss|nvme|loop) ]]; then
@ -32,14 +32,14 @@ function expose_partitions () {
# Jewel images prior to https://github.com/ceph/ceph-docker/pull/797
REGEX="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
function expose_partitions {
if docker ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}; then
if {{ container_binary }} ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}; then
if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then
docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log
{{ container_binary }} logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log
fi
fi
if docker ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-${1}; then
if {{ container_binary }} ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-${1}; then
if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log ]]; then
docker logs ceph-osd-prepare-{{ ansible_hostname }}-${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log
{{ container_binary }} logs ceph-osd-prepare-{{ ansible_hostname }}-${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log
fi
fi
if [[ -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then
@ -88,7 +88,7 @@ fi
numactl \
{{ ceph_osd_numactl_opts }} \
{% endif %}
/usr/bin/docker run \
/usr/bin/{{ container_binary }} run \
--rm \
--net=host \
--privileged=true \
@ -97,7 +97,7 @@ numactl \
{% if osd_objectstore == 'filestore' -%}
--memory={{ ceph_osd_docker_memory_limit }} \
{% endif -%}
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
--cpus={{ cpu_limit }} \
{% else -%}
--cpu-quota={{ cpu_limit * 100000 }} \

View File

@ -1,15 +1,19 @@
# {{ ansible_managed }}
[Unit]
Description=Ceph OSD
{% if container_binary == 'docker' %}
After=docker.service
Requires=docker.service
{% else %}
After=network.target
{% endif %}
[Service]
EnvironmentFile=-/etc/environment
ExecStartPre=-/usr/bin/docker stop ceph-osd-%i
ExecStartPre=-/usr/bin/docker rm -f ceph-osd-%i
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-%i
ExecStart={{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh %i
ExecStop=-/usr/bin/docker stop ceph-osd-%i
ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
Restart=always
RestartSec=10s
TimeoutStartSec=120

View File

@ -1,15 +1,6 @@
---
# Use systemd to manage container on Atomic host
- name: generate systemd unit file
become: true
template:
src: "{{ role_path }}/templates/ceph-rbd-mirror.service.j2"
dest: /etc/systemd/system/ceph-rbd-mirror@.service
owner: "root"
group: "root"
mode: "0644"
notify:
- restart ceph rbdmirrors
- name: include systemd.yml
include_tasks: systemd.yml
- name: systemd start rbd mirror container
systemd:

View File

@ -0,0 +1,12 @@
---
# Use systemd to manage container on Atomic host
- name: generate systemd unit file
become: true
template:
src: "{{ role_path }}/templates/ceph-rbd-mirror.service.j2"
dest: /etc/systemd/system/ceph-rbd-mirror@.service
owner: "root"
group: "root"
mode: "0644"
notify:
- restart ceph rbdmirrors

View File

@ -0,0 +1 @@
docker/systemd.yml

View File

@ -1,15 +1,19 @@
[Unit]
Description=Ceph RBD mirror
{% if container_binary == 'docker' %}
After=docker.service
Requires=docker.service
{% else %}
After=network.target
{% endif %}
[Service]
EnvironmentFile=-/etc/environment
ExecStartPre=-/usr/bin/docker stop ceph-rbd-mirror-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/docker rm ceph-rbd-mirror-{{ ansible_hostname }}
ExecStart=/usr/bin/docker run --rm --net=host \
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rbd-mirror-{{ ansible_hostname }}
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
--memory={{ ceph_rbd_mirror_docker_memory_limit }} \
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
--cpus={{ ceph_rbd_mirror_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_rbd_mirror_docker_cpu_limit * 100000 }} \
@ -29,7 +33,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \
--name=ceph-rbd-mirror-{{ ansible_hostname }} \
{{ ceph_rbd_mirror_docker_extra_env }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
ExecStopPost=-/usr/bin/docker stop ceph-rbd-mirror-{{ ansible_hostname }}
ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_hostname }}
Restart=always
RestartSec=10s
TimeoutStartSec=120

View File

@ -1,28 +1,6 @@
---
- name: generate systemd unit file
become: true
template:
src: "{{ role_path }}/templates/ceph-radosgw.service.j2"
dest: /etc/systemd/system/ceph-radosgw@.service
owner: "root"
group: "root"
mode: "0644"
notify:
- restart ceph rgws
# For backward compatibility
- name: disable old systemd unit ('ceph-rgw@'|'ceph-radosgw@radosgw.'|'ceph-radosgw@') if present
systemd:
name: "{{ item }}"
state: stopped
enabled: no
daemon_reload: yes
with_items:
- "ceph-rgw@{{ ansible_hostname }}"
- "ceph-radosgw@{{ ansible_hostname }}.service"
- "ceph-radosgw@radosgw.{{ ansible_hostname }}.service"
- ceph-radosgw@radosgw.gateway.service
ignore_errors: true
- name: include systemd.yml
include_tasks: systemd.yml
- name: systemd start rgw container
systemd:

View File

@ -0,0 +1,25 @@
---
- name: generate systemd unit file
become: true
template:
src: "{{ role_path }}/templates/ceph-radosgw.service.j2"
dest: /etc/systemd/system/ceph-radosgw@.service
owner: "root"
group: "root"
mode: "0644"
notify:
- restart ceph rgws
# For backward compatibility
- name: disable old systemd unit ('ceph-rgw@'|'ceph-radosgw@radosgw.'|'ceph-radosgw@') if present
systemd:
name: "{{ item }}"
state: stopped
enabled: no
daemon_reload: yes
with_items:
- "ceph-rgw@{{ ansible_hostname }}"
- "ceph-radosgw@{{ ansible_hostname }}.service"
- "ceph-radosgw@radosgw.{{ ansible_hostname }}.service"
- ceph-radosgw@radosgw.gateway.service
ignore_errors: true

View File

@ -0,0 +1 @@
docker/systemd.yml

View File

@ -1,16 +1,20 @@
[Unit]
Description=Ceph RGW
{% if container_binary == 'docker' %}
After=docker.service
Requires=docker.service
{% else %}
After=network.target
{% endif %}
{% set cpu_limit = ansible_processor_vcpus|int if ceph_rgw_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_rgw_docker_cpu_limit|int %}
[Service]
EnvironmentFile=-/etc/environment
ExecStartPre=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/docker rm ceph-rgw-{{ ansible_hostname }}
ExecStart=/usr/bin/docker run --rm --net=host \
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ ansible_hostname }}
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
--memory={{ ceph_rgw_docker_memory_limit }} \
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or container_binary == 'podman' -%}
--cpus={{ cpu_limit }} \
{% else -%}
--cpu-quota={{ cpu_limit * 100000 }} \
@ -33,7 +37,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \
--name=ceph-rgw-{{ ansible_hostname }} \
{{ ceph_rgw_docker_extra_env }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
ExecStopPost=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }}
ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}
Restart=always
RestartSec=10s
TimeoutStartSec=120

View File

@ -38,6 +38,7 @@ def node(host, request):
osd_scenario = ansible_vars.get("osd_scenario")
lvm_scenario = osd_scenario in ['lvm', 'lvm-batch']
devices = ansible_vars.get("devices", [])
container_binary = ''
ceph_release_num = {
'jewel': 10,
'kraken': 11,
@ -98,6 +99,11 @@ def node(host, request):
cluster_name = ansible_vars.get("cluster", "ceph")
conf_path = "/etc/ceph/{}.conf".format(cluster_name)
if docker:
container_binary = "docker"
if docker and str_to_bool(os.environ.get('IS_PODMAN', False)): # noqa E501
container_binary = "podman"
data = dict(
address=address,
subnet=subnet,
@ -112,6 +118,7 @@ def node(host, request):
ceph_stable_release=ceph_stable_release,
ceph_release_num=ceph_release_num,
rolling_update=rolling_update,
container_binary=container_binary
)
return data

View File

@ -0,0 +1,8 @@
- hosts: all
gather_facts: true
become: true
tasks:
- name: install podman
package:
name: podman
state: present

View File

@ -0,0 +1 @@
../../../Vagrantfile

View File

@ -0,0 +1,37 @@
{
"ceph_conf_overrides": {
"global": {
"osd_pool_default_pg_num": 12,
"osd_pool_default_size": 1,
"mon_allow_pool_size_one": true,
"mon_warn_on_pool_no_redundancy": false
}
},
"cephfs_pools": [
{
"name": "cephfs_data",
"pg_num": 8,
"pgp_num": 8,
"rule_name": "replicated_rule",
"type": 1,
"erasure_profile": "",
"expected_num_objects": "",
"application": "cephfs",
"size": 3,
"min_size": 0
},
{
"name": "cephfs_metadata",
"pg_num": 8,
"pgp_num": 8,
"rule_name": "replicated_rule",
"type": 1,
"erasure_profile": "",
"expected_num_objects": "",
"application": "cephfs",
"size": 3,
"min_size": 0
}
],
"ceph_mon_docker_memory_limit": "2g"
}

View File

@ -0,0 +1,44 @@
---
# this is only here to let the CI tests know
# that this scenario is using docker
docker: True
container_binary: docker
containerized_deployment: True
monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
ceph_docker_on_openstack: False
public_network: "192.168.58.0/24"
cluster_network: "192.168.59.0/24"
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
openstack_config: False
openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: 1
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: 1
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10

View File

@ -0,0 +1,22 @@
---
user_config: True
copy_admin_key: True
test:
name: "test"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
test2:
name: "test2"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
pools:
- "{{ test }}"
- "{{ test2 }}"

View File

@ -0,0 +1,3 @@
---
gateway_ip_list: "{{ ansible_all_ipv4_addresses | ipaddr(public_network) | first }}"
generate_crt: True

View File

@ -0,0 +1,11 @@
---
create_crush_tree: False
crush_rule_config: False
crush_rule_hdd:
name: HDD
root: default
type: host
class: hdd
default: true
crush_rules:
- "{{ crush_rule_hdd }}"

View File

@ -0,0 +1,7 @@
---
osd_objectstore: "bluestore"
osd_scenario: lvm
devices:
- /dev/sda
- /dev/sdb
- /dev/sdc

View File

@ -0,0 +1,7 @@
---
copy_admin_key: True
rgw_create_pools:
foo:
pg_num: 16
bar:
pg_num: 16

View File

@ -0,0 +1,29 @@
[mons]
mon0
[osds]
osd0
[mgrs]
mon0
[mdss]
osd0
[rgws]
osd0
[nfss]
nfs0
[rbdmirrors]
rbd-mirror0
[iscsigws]
iscsi-gw0
[all:vars]
nfs_ganesha_stable=True
nfs_ganesha_dev=False
nfs_ganesha_stable_branch="V2.7-stable"
nfs_ganesha_flavor="ceph_master"

View File

@ -0,0 +1,32 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: True
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 1
nfs_vms: 1
rbd_mirror_vms: 1
client_vms: 0
iscsi_gw_vms: 1
mgr_vms: 0
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.58
cluster_subnet: 192.168.59
# MEMORY
# set 1024 for CentOS
memory: 1024
vagrant_box: centos/7
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true

View File

@ -21,13 +21,15 @@ class TestMDSs(object):
def test_mds_is_up(self, node, host):
hostname = node["vars"]["inventory_hostname"]
container_binary = node['container_binary']
if node['docker']:
docker_exec_cmd = 'docker exec ceph-mds-{hostname}'.format(hostname=hostname)
container_exec_cmd = '{container_binary} exec ceph-mds-{hostname}'.format( # noqa E501
hostname=hostname, container_binary=container_binary)
else:
docker_exec_cmd = ''
container_exec_cmd = ''
cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
docker_exec_cmd=docker_exec_cmd,
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
container_exec_cmd=container_exec_cmd,
cluster=node['cluster_name']
)
cluster_status = json.loads(host.check_output(cmd))

View File

@ -22,12 +22,13 @@ class TestMGRs(object):
def test_mgr_is_up(self, node, host):
hostname=node["vars"]["inventory_hostname"]
cluster=node["cluster_name"]
container_binary=node['container_binary']
if node['docker']:
docker_exec_cmd = 'docker exec ceph-mgr-{hostname}'.format(hostname=hostname)
container_exec_cmd = '{container_binary} exec ceph-mgr-{hostname}'.format(container_binary=container_binary, hostname=hostname)
else:
docker_exec_cmd = ''
cmd = "sudo {docker_exec_cmd} ceph --name mgr.{hostname} --keyring /var/lib/ceph/mgr/{cluster}-{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
docker_exec_cmd=docker_exec_cmd,
container_exec_cmd = ''
cmd = "sudo {container_exec_cmd} ceph --name mgr.{hostname} --keyring /var/lib/ceph/mgr/{cluster}-{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
container_exec_cmd=container_exec_cmd,
hostname=node["vars"]["inventory_hostname"],
cluster=cluster
)

View File

@ -28,10 +28,10 @@ class TestMons(object):
output = host.check_output(cmd)
assert output.strip().startswith("cluster")
def test_ceph_config_has_inital_members_line(self, node, File):
assert File(node["conf_path"]).contains("^mon initial members = .*$")
def test_ceph_config_has_inital_members_line(self, node, host):
assert host.file(node["conf_path"]).contains("^mon initial members = .*$")
def test_initial_members_line_has_correct_value(self, node, host, File):
def test_initial_members_line_has_correct_value(self, node, host):
mon_initial_members_line = host.check_output("grep 'mon initial members = ' /etc/ceph/{cluster}.conf".format(cluster=node['cluster_name']))
result = True
for host in node["vars"]["groups"]["mons"]:

View File

@ -26,12 +26,13 @@ class TestNFSs(object):
def test_nfs_is_up(self, node, host):
hostname = node["vars"]["inventory_hostname"]
cluster = node['cluster_name']
container_binary = node['container_binary']
if node['docker']:
docker_exec_cmd = 'docker exec ceph-nfs-{hostname}'.format(hostname=hostname)
container_exec_cmd = '{container_binary} exec ceph-nfs-{hostname}'.format(container_binary=container_binary, hostname=hostname)
else:
docker_exec_cmd = ''
cmd = "sudo {docker_exec_cmd} ceph --name client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
docker_exec_cmd=docker_exec_cmd,
container_exec_cmd = ''
cmd = "sudo {container_exec_cmd} ceph --name client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
container_exec_cmd=container_exec_cmd,
hostname=hostname,
cluster=cluster
)

View File

@ -12,9 +12,9 @@ class TestOSDs(object):
osds = cmd.stdout.rstrip("\n").split("\n")
return osds
def _get_docker_exec_cmd(self, host):
def _get_docker_exec_cmd(self, node, host):
osd_id = host.check_output(
"docker ps -q --filter='name=ceph-osd' | head -1")
"{container_binary} ps -q --filter='name=ceph-osd' | head -1".format(container_binary=node['container_binary']))
return osd_id
@ -86,8 +86,10 @@ class TestOSDs(object):
@pytest.mark.docker
def test_all_docker_osds_are_up_and_in(self, node, host):
cmd = "sudo docker exec {osd_id} ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(
osd_id=self._get_docker_exec_cmd(host),
container_binary= node['container_binary']
cmd = "sudo {container_binary} exec {osd_id} ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(
container_binary=container_binary,
osd_id=self._get_docker_exec_cmd(node, host),
cluster=node["cluster_name"]
)
output = json.loads(host.check_output(cmd))

View File

@ -30,15 +30,16 @@ class TestRbdMirrors(object):
def test_rbd_mirror_is_up(self, node, host):
hostname=node["vars"]["inventory_hostname"]
cluster=node["cluster_name"]
container_binary = node["container_binary"]
daemons = []
if node['docker']:
docker_exec_cmd = 'docker exec ceph-rbd-mirror-{hostname}'.format(hostname=hostname)
container_exec_cmd = '{container_binary} exec ceph-rbd-mirror-{hostname}'.format(container_binary=container_binary, hostname=hostname)
else:
docker_exec_cmd = ''
container_exec_cmd = ''
hostname = node["vars"]["inventory_hostname"]
cluster = node['cluster_name']
cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-rbd --keyring /var/lib/ceph/bootstrap-rbd/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
docker_exec_cmd=docker_exec_cmd,
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rbd --keyring /var/lib/ceph/bootstrap-rbd/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
container_exec_cmd=container_exec_cmd,
hostname=hostname,
cluster=cluster
)

View File

@ -25,12 +25,13 @@ class TestRGWs(object):
def test_rgw_is_up(self, node, host):
hostname=node["vars"]["inventory_hostname"]
cluster=node["cluster_name"]
container_binary=node['container_binary']
if node['docker']:
docker_exec_cmd = 'docker exec ceph-rgw-{hostname}'.format(hostname=hostname)
container_exec_cmd = '{container_binary} exec ceph-rgw-{hostname}'.format(container_binary=container_binary, hostname=hostname)
else:
docker_exec_cmd = ''
cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
docker_exec_cmd=docker_exec_cmd,
container_exec_cmd = ''
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
container_exec_cmd=container_exec_cmd,
hostname=hostname,
cluster=cluster
)

View File

@ -37,7 +37,9 @@ class TestRGWs(object):
def test_docker_rgw_tuning_pools_are_set(self, node, host):
hostname = node["vars"]["inventory_hostname"]
cluster = node['cluster_name']
cmd = "sudo docker exec ceph-rgw-{hostname} ceph --cluster={cluster} -n client.rgw.{hostname} --connect-timeout 5 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring osd dump".format(
container_binary = node['container_binary']
cmd = "sudo {container_binary} exec ceph-rgw-{hostname} ceph --cluster={cluster} -n client.rgw.{hostname} --connect-timeout 5 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring osd dump".format(
container_binary=container_binary,
hostname=hostname,
cluster=cluster
)

View File

@ -22,8 +22,8 @@ class TestInstall(object):
class TestCephConf(object):
def test_ceph_config_has_mon_host_line(self, node, File):
assert File(node["conf_path"]).contains("^mon host = .*$")
def test_ceph_config_has_mon_host_line(self, node, host):
assert host.file(node["conf_path"]).contains("^mon host = .*$")
def test_mon_host_line_has_correct_value(self, node, host):
mon_host_line = host.check_output("grep 'mon host = ' /etc/ceph/{cluster}.conf".format(cluster=node['cluster_name']))

View File

@ -1,6 +1,6 @@
# These are Python requirements needed to run the functional tests
six==1.10.0
testinfra==1.19.0
testinfra==3.4.0
pytest-xdist==1.27.0
pytest==3.6.1
ansible~=2.6,<2.7

View File

@ -0,0 +1,68 @@
[tox]
envlist = centos-container-docker_to_podman
skipsdist = True
[testenv]
whitelist_externals =
vagrant
bash
pip
sleep
rm
cp
passenv=*
sitepackages=True
setenv=
ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
ANSIBLE_CALLBACK_WHITELIST = profile_tasks
ANSIBLE_KEEP_REMOTE_FILES = 1
ANSIBLE_CACHE_PLUGIN = memory
ANSIBLE_GATHERING = implicit
# only available for ansible >= 2.5
ANSIBLE_STDOUT_CALLBACK = yaml
# Set the vagrant box image to use
CEPH_ANSIBLE_VAGRANT_BOX = centos/7
deps= -r{toxinidir}/tests/requirements.txt
changedir= {toxinidir}/tests/functional/docker2podman
commands=
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/site-container.yml.sample --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
"
pip uninstall -y ansible
pip install ansible==2.10.0a2
cp {toxinidir}/infrastructure-playbooks/docker-to-podman.yml {toxinidir}/docker-to-podman.yml
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/docker-to-podman.yml --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ansible_python_interpreter=/usr/bin/python2 \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
"
# install podman
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/docker2podman.yml -e ansible_python_interpreter=/usr/bin/python2
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# reboot machines
ansible-playbook -i {changedir}/hosts {toxinidir}/tests/functional/reboot.yml
# wait 120 sec and run tests (there's a chance nodes are still downloading container image after the reboot)
sleep 120
bash -c 'IS_PODMAN=TRUE py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests'
vagrant destroy -f

View File

@ -78,6 +78,6 @@ commands=
'dedicated_devices': [/dev/sdc,/dev/sdc], \
'osd_scenario': 'non-collocated' \}"
bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:luminous} ROLLING_UPDATE=TRUE py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests"
bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:luminous} ROLLING_UPDATE=TRUE py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
vagrant destroy --force

12
tox.ini
View File

@ -73,7 +73,7 @@ commands=
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
"
# test that the cluster can be redeployed in a healthy state
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
[purge-lvm]
commands=
@ -99,7 +99,7 @@ commands=
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
"
# test that the cluster can be redeployed in a healthy state
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
[shrink-mon]
commands=
@ -141,7 +141,7 @@ commands=
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
"
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers {toxinidir}/tests/functional/tests
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
[add-osds]
commands=
@ -157,7 +157,7 @@ commands=
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
"
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
[rgw-multisite]
commands=
@ -268,14 +268,14 @@ commands=
# wait 30sec for services to be ready
sleep 30
# test cluster state using ceph-ansible tests
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# reboot all vms
all_daemons: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
# wait 30sec for services to be ready
# retest to ensure cluster came back up correctly after rebooting
all_daemons: py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
all_daemons: py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# handlers/idempotency test
all_daemons,all_in_one: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-luminous} copy_admin_key={env:COPY_ADMIN_KEY:False} " --extra-vars @ceph-override.json