mirror of https://github.com/ceph/ceph-ansible.git
commit
f146829e9e
|
@ -5,24 +5,36 @@
|
||||||
when:
|
when:
|
||||||
- ansible_os_family == 'Debian'
|
- ansible_os_family == 'Debian'
|
||||||
|
|
||||||
- block:
|
- name: copy mon restart script
|
||||||
- name: copy mon restart script
|
template:
|
||||||
template:
|
src: restart_mon_daemon.sh.j2
|
||||||
src: restart_mon_daemon.sh.j2
|
dest: /tmp/restart_mon_daemon.sh
|
||||||
dest: /tmp/restart_mon_daemon.sh
|
owner: root
|
||||||
owner: root
|
group: root
|
||||||
group: root
|
mode: 0750
|
||||||
mode: 0750
|
listen: "restart ceph mons"
|
||||||
listen: "restart ceph mons"
|
when:
|
||||||
|
- mon_group_name in group_names
|
||||||
|
- inventory_hostname in play_hosts
|
||||||
|
|
||||||
- name: restart ceph mon daemon(s)
|
- name: restart ceph mon daemon(s) - non container
|
||||||
command: /tmp/restart_mon_daemon.sh
|
command: /tmp/restart_mon_daemon.sh
|
||||||
listen: "restart ceph mons"
|
listen: "restart ceph mons"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mon_group_name in group_names
|
- mon_group_name in group_names
|
||||||
|
- not containerized_deployment
|
||||||
- mon_socket_stat.rc == 0
|
- mon_socket_stat.rc == 0
|
||||||
|
|
||||||
|
- name: restart ceph mon daemon(s) - container
|
||||||
|
command: /tmp/restart_mon_daemon.sh
|
||||||
|
listen: "restart ceph mons"
|
||||||
|
when:
|
||||||
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
|
- mon_group_name in group_names
|
||||||
|
- containerized_deployment
|
||||||
|
- ceph_mon_container_stat.stdout_lines|length != 0
|
||||||
|
|
||||||
# This does not just restart OSDs but everything else too. Unfortunately
|
# This does not just restart OSDs but everything else too. Unfortunately
|
||||||
# at this time the ansible role does not have an OSD id list to use
|
# at this time the ansible role does not have an OSD id list to use
|
||||||
# for restarting them specifically.
|
# for restarting them specifically.
|
||||||
|
@ -38,21 +50,7 @@
|
||||||
- osd_group_name in group_names
|
- osd_group_name in group_names
|
||||||
- inventory_hostname in play_hosts
|
- inventory_hostname in play_hosts
|
||||||
|
|
||||||
- name: restart containerized ceph osds daemon(s)
|
- name: restart ceph osds daemon(s) - non container
|
||||||
command: /tmp/restart_osd_daemon.sh
|
|
||||||
listen: "restart ceph osds"
|
|
||||||
with_items: "{{ socket_osd_container_stat.results | default([]) }}"
|
|
||||||
when:
|
|
||||||
# We do not want to run these checks on initial deployment (`socket_osd_container_stat.results[n].rc == 0`)
|
|
||||||
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
|
||||||
- osd_group_name in group_names
|
|
||||||
- containerized_deployment
|
|
||||||
- ((crush_location is defined and crush_location) or item.get('rc') == 0)
|
|
||||||
- handler_health_osd_check
|
|
||||||
# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
|
|
||||||
- inventory_hostname in play_hosts
|
|
||||||
|
|
||||||
- name: restart non-containerized ceph osds daemon(s)
|
|
||||||
command: /tmp/restart_osd_daemon.sh
|
command: /tmp/restart_osd_daemon.sh
|
||||||
listen: "restart ceph osds"
|
listen: "restart ceph osds"
|
||||||
when:
|
when:
|
||||||
|
@ -66,27 +64,49 @@
|
||||||
# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
|
# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
|
||||||
- inventory_hostname in play_hosts
|
- inventory_hostname in play_hosts
|
||||||
|
|
||||||
- block:
|
- name: restart ceph osds daemon(s) - container
|
||||||
- name: copy mds restart script
|
command: /tmp/restart_osd_daemon.sh
|
||||||
template:
|
listen: "restart ceph osds"
|
||||||
src: restart_mds_daemon.sh.j2
|
when:
|
||||||
dest: /tmp/restart_mds_daemon.sh
|
# We do not want to run these checks on initial deployment (`socket_osd_container_stat.results[n].rc == 0`)
|
||||||
owner: root
|
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
||||||
group: root
|
- osd_group_name in group_names
|
||||||
mode: 0750
|
- containerized_deployment
|
||||||
listen: "restart ceph mdss"
|
- ((crush_location is defined and crush_location) or ceph_osd_container_stat.stdout_lines|length != 0)
|
||||||
when:
|
- handler_health_osd_check
|
||||||
- mds_group_name in group_names
|
# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
|
||||||
- inventory_hostname in play_hosts
|
- inventory_hostname in play_hosts
|
||||||
|
|
||||||
- name: restart ceph mds daemon(s)
|
- name: copy mds restart script
|
||||||
command: /tmp/restart_mds_daemon.sh
|
template:
|
||||||
listen: "restart ceph mdss"
|
src: restart_mds_daemon.sh.j2
|
||||||
|
dest: /tmp/restart_mds_daemon.sh
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0750
|
||||||
|
listen: "restart ceph mdss"
|
||||||
|
when:
|
||||||
|
- mds_group_name in group_names
|
||||||
|
- inventory_hostname in play_hosts
|
||||||
|
|
||||||
|
- name: restart ceph mds daemon(s) - non container
|
||||||
|
command: /tmp/restart_mds_daemon.sh
|
||||||
|
listen: "restart ceph mdss"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mds_group_name in group_names
|
- mds_group_name in group_names
|
||||||
|
- not containerized_deployment
|
||||||
- mds_socket_stat.rc == 0
|
- mds_socket_stat.rc == 0
|
||||||
|
|
||||||
|
- name: restart ceph mds daemon(s) - container
|
||||||
|
command: /tmp/restart_mds_daemon.sh
|
||||||
|
listen: "restart ceph mdss"
|
||||||
|
when:
|
||||||
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
|
- mds_group_name in group_names
|
||||||
|
- containerized_deployment
|
||||||
|
- ceph_mds_container_stat.stdout_lines|length != 0
|
||||||
|
|
||||||
- name: copy rgw restart script
|
- name: copy rgw restart script
|
||||||
template:
|
template:
|
||||||
src: restart_rgw_daemon.sh.j2
|
src: restart_rgw_daemon.sh.j2
|
||||||
|
@ -99,14 +119,24 @@
|
||||||
- rgw_group_name in group_names
|
- rgw_group_name in group_names
|
||||||
- inventory_hostname in play_hosts
|
- inventory_hostname in play_hosts
|
||||||
|
|
||||||
- name: restart ceph rgw daemon(s)
|
- name: restart ceph rgw daemon(s) - non container
|
||||||
command: /tmp/restart_rgw_daemon.sh
|
command: /tmp/restart_rgw_daemon.sh
|
||||||
listen: "restart ceph rgws"
|
listen: "restart ceph rgws"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- rgw_group_name in group_names
|
- rgw_group_name in group_names
|
||||||
|
- not containerized_deployment
|
||||||
- rgw_socket_stat.rc == 0
|
- rgw_socket_stat.rc == 0
|
||||||
|
|
||||||
|
- name: restart ceph rgw daemon(s) - container
|
||||||
|
command: /tmp/restart_rgw_daemon.sh
|
||||||
|
listen: "restart ceph rgws"
|
||||||
|
when:
|
||||||
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
|
- rgw_group_name in group_names
|
||||||
|
- containerized_deployment
|
||||||
|
- ceph_rgw_container_stat.stdout_lines|length != 0
|
||||||
|
|
||||||
- name: copy nfs restart script
|
- name: copy nfs restart script
|
||||||
template:
|
template:
|
||||||
src: restart_nfs_daemon.sh.j2
|
src: restart_nfs_daemon.sh.j2
|
||||||
|
@ -119,14 +149,24 @@
|
||||||
- nfs_group_name in group_names
|
- nfs_group_name in group_names
|
||||||
- inventory_hostname in play_hosts
|
- inventory_hostname in play_hosts
|
||||||
|
|
||||||
- name: restart ceph nfs daemon(s)
|
- name: restart ceph nfs daemon(s) - non container
|
||||||
command: /tmp/restart_nfs_daemon.sh
|
command: /tmp/restart_nfs_daemon.sh
|
||||||
listen: "restart ceph nfss"
|
listen: "restart ceph nfss"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- nfs_group_name in group_names
|
- nfs_group_name in group_names
|
||||||
|
- not containerized_deployment
|
||||||
- nfs_socket_stat.rc == 0
|
- nfs_socket_stat.rc == 0
|
||||||
|
|
||||||
|
- name: restart ceph nfs daemon(s) - container
|
||||||
|
command: /tmp/restart_nfs_daemon.sh
|
||||||
|
listen: "restart ceph nfss"
|
||||||
|
when:
|
||||||
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
|
- nfs_group_name in group_names
|
||||||
|
- containerized_deployment
|
||||||
|
- ceph_nfs_container_stat.stdout_lines|length != 0
|
||||||
|
|
||||||
- name: copy rbd mirror restart script
|
- name: copy rbd mirror restart script
|
||||||
template:
|
template:
|
||||||
src: restart_rbd_mirror_daemon.sh.j2
|
src: restart_rbd_mirror_daemon.sh.j2
|
||||||
|
@ -139,14 +179,24 @@
|
||||||
- rbdmirror_group_name in group_names
|
- rbdmirror_group_name in group_names
|
||||||
- inventory_hostname in play_hosts
|
- inventory_hostname in play_hosts
|
||||||
|
|
||||||
- name: restart ceph rbd mirror daemon(s)
|
- name: restart ceph rbd mirror daemon(s) - non container
|
||||||
command: /tmp/restart_rbd_mirror_daemon.sh
|
command: /tmp/restart_rbd_mirror_daemon.sh
|
||||||
listen: "restart ceph rbdmirrors"
|
listen: "restart ceph rbdmirrors"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- rbdmirror_group_name in group_names
|
- rbdmirror_group_name in group_names
|
||||||
|
- not containerized_deployment
|
||||||
- rbd_mirror_socket_stat.rc == 0
|
- rbd_mirror_socket_stat.rc == 0
|
||||||
|
|
||||||
|
- name: restart ceph rbd mirror daemon(s) - container
|
||||||
|
command: /tmp/restart_rbd_mirror_daemon.sh
|
||||||
|
listen: "restart ceph rbdmirrors"
|
||||||
|
when:
|
||||||
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
|
- rbdmirror_group_name in group_names
|
||||||
|
- containerized_deployment
|
||||||
|
- ceph_rbd_mirror_container_stat.stdout_lines|length != 0
|
||||||
|
|
||||||
- name: copy mgr restart script
|
- name: copy mgr restart script
|
||||||
template:
|
template:
|
||||||
src: restart_mgr_daemon.sh.j2
|
src: restart_mgr_daemon.sh.j2
|
||||||
|
@ -159,10 +209,20 @@
|
||||||
- mgr_group_name in group_names
|
- mgr_group_name in group_names
|
||||||
- inventory_hostname in play_hosts
|
- inventory_hostname in play_hosts
|
||||||
|
|
||||||
- name: restart ceph mgr daemon(s)
|
- name: restart ceph mgr daemon(s) - non container
|
||||||
command: /tmp/restart_mgr_daemon.sh
|
command: /tmp/restart_mgr_daemon.sh
|
||||||
listen: "restart ceph mgrs"
|
listen: "restart ceph mgrs"
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mgr_group_name in group_names
|
- mgr_group_name in group_names
|
||||||
|
- not containerized_deployment
|
||||||
- mgr_socket_stat.rc == 0
|
- mgr_socket_stat.rc == 0
|
||||||
|
|
||||||
|
- name: restart ceph mgr daemon(s) - container
|
||||||
|
command: /tmp/restart_mgr_daemon.sh
|
||||||
|
listen: "restart ceph mgrs"
|
||||||
|
when:
|
||||||
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
|
- mgr_group_name in group_names
|
||||||
|
- containerized_deployment
|
||||||
|
- ceph_mgr_container_stat.stdout_lines|length != 0
|
||||||
|
|
|
@ -1,275 +1,10 @@
|
||||||
---
|
---
|
||||||
# These checks are used to avoid running handlers at initial deployment.
|
- name: include check_socket_container.yml
|
||||||
- name: set_fact docker_exec_cmd mon
|
include: check_socket_container.yml
|
||||||
set_fact:
|
|
||||||
docker_exec_cmd: "docker exec ceph-mon-{{ ansible_hostname }}"
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(mon_group_name, [])
|
|
||||||
- containerized_deployment
|
|
||||||
|
|
||||||
- name: check for a ceph mon socket
|
|
||||||
shell: |
|
|
||||||
{{ docker_exec_cmd | default('') }} bash -c 'stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mon*.asok'
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
always_run: true
|
|
||||||
register: mon_socket_stat
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(mon_group_name, [])
|
|
||||||
|
|
||||||
- name: check if the ceph mon socket is in-use
|
|
||||||
shell: |
|
|
||||||
{{ docker_exec_cmd | default('') }} bash -c 'fuser --silent {{ mon_socket_stat.stdout }}'
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
always_run: true
|
|
||||||
register: mon_socket
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(mon_group_name, [])
|
|
||||||
- mon_socket_stat.rc == 0
|
|
||||||
|
|
||||||
- name: remove ceph mon socket if exists and not used by a process
|
|
||||||
file:
|
|
||||||
name: "{{ mon_socket_stat.stdout }}"
|
|
||||||
state: absent
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(mon_group_name, [])
|
|
||||||
- not containerized_deployment
|
|
||||||
- mon_socket_stat.rc == 0
|
|
||||||
- mon_socket.rc != 0
|
|
||||||
|
|
||||||
- name: check for a ceph osd socket
|
|
||||||
shell: |
|
|
||||||
stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-osd*.asok
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
always_run: true
|
|
||||||
register: osd_socket_stat
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(osd_group_name, [])
|
|
||||||
- not containerized_deployment
|
|
||||||
|
|
||||||
- name: check if the ceph osd socket is in-use
|
|
||||||
shell: |
|
|
||||||
fuser --silent {{ osd_socket_stat.stdout }}
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
always_run: true
|
|
||||||
register: osd_socket
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(osd_group_name, [])
|
|
||||||
- not containerized_deployment
|
|
||||||
- osd_socket_stat.rc == 0
|
|
||||||
|
|
||||||
- name: remove ceph osd socket if exists and not used by a process
|
|
||||||
file:
|
|
||||||
name: "{{ osd_socket_stat.stdout }}"
|
|
||||||
state: absent
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(osd_group_name, [])
|
|
||||||
- not containerized_deployment
|
|
||||||
- osd_socket_stat.rc == 0
|
|
||||||
- osd_socket.rc != 0
|
|
||||||
|
|
||||||
- name: set_fact docker_exec_cmd mds
|
|
||||||
set_fact:
|
|
||||||
docker_exec_cmd: "docker exec ceph-mds-{{ ansible_hostname }}"
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(mds_group_name, [])
|
|
||||||
- containerized_deployment
|
|
||||||
|
|
||||||
- name: check for a ceph mds socket
|
|
||||||
shell: |
|
|
||||||
{{ docker_exec_cmd | default('') }} bash -c 'stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mds*.asok'
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
always_run: true
|
|
||||||
register: mds_socket_stat
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(mds_group_name, [])
|
|
||||||
|
|
||||||
- name: check if the ceph mds socket is in-use
|
|
||||||
shell: |
|
|
||||||
{{ docker_exec_cmd | default('') }} bash -c 'fuser --silent {{ mds_socket_stat.stdout }}'
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
always_run: true
|
|
||||||
register: mds_socket
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(mds_group_name, [])
|
|
||||||
- mds_socket_stat.rc == 0
|
|
||||||
|
|
||||||
- name: remove ceph mds socket if exists and not used by a process
|
|
||||||
file:
|
|
||||||
name: "{{ mds_socket_stat.stdout }}"
|
|
||||||
state: absent
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(mds_group_name, [])
|
|
||||||
- not containerized_deployment
|
|
||||||
- mds_socket_stat.rc == 0
|
|
||||||
- mds_socket.rc != 0
|
|
||||||
|
|
||||||
- name: set_fact docker_exec_cmd rgw
|
|
||||||
set_fact:
|
|
||||||
docker_exec_cmd: "docker exec ceph-rgw-{{ ansible_hostname }}"
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(rgw_group_name, [])
|
|
||||||
- containerized_deployment
|
|
||||||
|
|
||||||
- name: check for a ceph rgw socket
|
|
||||||
shell: |
|
|
||||||
{{ docker_exec_cmd | default('') }} bash -c 'stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-client.rgw*.asok'
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
always_run: true
|
|
||||||
register: rgw_socket_stat
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(rgw_group_name, [])
|
|
||||||
|
|
||||||
- name: check if the ceph rgw socket is in-use
|
|
||||||
shell: |
|
|
||||||
{{ docker_exec_cmd | default('') }} bash -c 'fuser --silent {{ rgw_socket_stat.stdout }}'
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
always_run: true
|
|
||||||
register: rgw_socket
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(rgw_group_name, [])
|
|
||||||
- rgw_socket_stat.rc == 0
|
|
||||||
|
|
||||||
- name: remove ceph rgw socket if exists and not used by a process
|
|
||||||
file:
|
|
||||||
name: "{{ rgw_socket_stat.stdout }}"
|
|
||||||
state: absent
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(rgw_group_name, [])
|
|
||||||
- not containerized_deployment
|
|
||||||
- rgw_socket_stat.rc == 0
|
|
||||||
- rgw_socket.rc != 0
|
|
||||||
|
|
||||||
- name: set_fact docker_exec_cmd mgr
|
|
||||||
set_fact:
|
|
||||||
docker_exec_cmd: "docker exec ceph-mgr-{{ ansible_hostname }}"
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(mgr_group_name, [])
|
|
||||||
- containerized_deployment
|
|
||||||
|
|
||||||
- name: check for a ceph mgr socket
|
|
||||||
shell: |
|
|
||||||
{{ docker_exec_cmd | default('') }} bash -c 'stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mgr*.asok'
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
always_run: true
|
|
||||||
register: mgr_socket_stat
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(mgr_group_name, [])
|
|
||||||
|
|
||||||
- name: check if the ceph mgr socket is in-use
|
|
||||||
shell: |
|
|
||||||
{{ docker_exec_cmd | default('') }} bash -c 'fuser --silent {{ mgr_socket_stat.stdout }}'
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
always_run: true
|
|
||||||
register: mgr_socket
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(mgr_group_name, [])
|
|
||||||
- mgr_socket_stat.rc == 0
|
|
||||||
|
|
||||||
- name: remove ceph mgr socket if exists and not used by a process
|
|
||||||
file:
|
|
||||||
name: "{{ mgr_socket_stat.stdout }}"
|
|
||||||
state: absent
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(mgr_group_name, [])
|
|
||||||
- not containerized_deployment
|
|
||||||
- mgr_socket_stat.rc == 0
|
|
||||||
- mgr_socket.rc != 0
|
|
||||||
|
|
||||||
- name: set_fact docker_exec_cmd rbd mirror
|
|
||||||
set_fact:
|
|
||||||
docker_exec_cmd: "docker exec ceph-rbd-mirror-{{ ansible_hostname }}"
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(rbdmirror_group_name, [])
|
|
||||||
- containerized_deployment
|
|
||||||
|
|
||||||
- name: check for a ceph rbd mirror socket
|
|
||||||
shell: |
|
|
||||||
{{ docker_exec_cmd | default('') }} bash -c 'stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-client.rbd-mirror*.asok'
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
always_run: true
|
|
||||||
register: rbd_mirror_socket_stat
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(rbdmirror_group_name, [])
|
|
||||||
|
|
||||||
- name: check if the ceph rbd mirror socket is in-use
|
|
||||||
shell: |
|
|
||||||
{{ docker_exec_cmd | default('') }} bash -c 'fuser --silent {{ rbd_mirror_socket_stat.stdout }}'
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
always_run: true
|
|
||||||
register: rbd_mirror_socket
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(rbdmirror_group_name, [])
|
|
||||||
- rbd_mirror_socket_stat.rc == 0
|
|
||||||
|
|
||||||
- name: remove ceph rbd mirror socket if exists and not used by a process
|
|
||||||
file:
|
|
||||||
name: "{{ rbd_mirror_socket_stat.stdout }}"
|
|
||||||
state: absent
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(rbdmirror_group_name, [])
|
|
||||||
- not containerized_deployment
|
|
||||||
- rbd_mirror_socket_stat.rc == 0
|
|
||||||
- rbd_mirror_socket.rc != 0
|
|
||||||
|
|
||||||
- name: set_fact docker_exec_cmd nfs ganesha
|
|
||||||
set_fact:
|
|
||||||
docker_exec_cmd: "docker exec ceph-rbd-mirror-{{ ansible_hostname }}"
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(nfs_group_name, [])
|
|
||||||
- containerized_deployment
|
|
||||||
|
|
||||||
- name: check for a ceph nfs ganesha socket
|
|
||||||
shell: |
|
|
||||||
{{ docker_exec_cmd | default('') }} bash -c 'stat --printf=%n /var/run/ganesha.pid'
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
always_run: true
|
|
||||||
register: nfs_socket_stat
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(nfs_group_name, [])
|
|
||||||
|
|
||||||
- name: check if the ceph nfs ganesha socket is in-use
|
|
||||||
shell: |
|
|
||||||
{{ docker_exec_cmd | default('') }} bash -c 'fuser --silent {{ nfs_socket_stat.stdout }}'
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
always_run: true
|
|
||||||
register: nfs_socket
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(nfs_group_name, [])
|
|
||||||
- nfs_socket_stat.rc == 0
|
|
||||||
|
|
||||||
- name: remove ceph nfs ganesha socket if exists and not used by a process
|
|
||||||
file:
|
|
||||||
name: "{{ nfs_socket_stat.stdout }}"
|
|
||||||
state: absent
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups.get(nfs_group_name, [])
|
|
||||||
- not containerized_deployment
|
|
||||||
- nfs_socket_stat.rc == 0
|
|
||||||
- nfs_socket.rc != 0
|
|
||||||
|
|
||||||
- name: check for a ceph socket in containerized deployment (osds)
|
|
||||||
shell: |
|
|
||||||
docker exec ceph-osd-"{{ ansible_hostname }}"-"{{ item | replace('/dev/', '') }}" bash -c 'stat --printf=%n /var/run/ceph/*.asok'
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
always_run: true
|
|
||||||
register: socket_osd_container_stat
|
|
||||||
with_items: "{{ devices }}"
|
|
||||||
when:
|
when:
|
||||||
- containerized_deployment
|
- containerized_deployment
|
||||||
- inventory_hostname in groups.get(osd_group_name, [])
|
|
||||||
|
|
||||||
|
- name: include check_socket_non_container.yml
|
||||||
|
include: check_socket_container.yml
|
||||||
|
when:
|
||||||
|
- containerized_deployment
|
||||||
|
|
|
@ -0,0 +1,63 @@
|
||||||
|
---
|
||||||
|
- name: check for a mon container
|
||||||
|
command: "docker ps -q --filter='name=ceph-mon-{{ ansible_hostname }}'"
|
||||||
|
register: ceph_mon_container_stat
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(mon_group_name, [])
|
||||||
|
|
||||||
|
- name: check for an osd container
|
||||||
|
command: "docker ps -q --filter='name=ceph-osd-{{ ansible_hostname }}'"
|
||||||
|
register: ceph_osd_container_stat
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(osd_group_name, [])
|
||||||
|
|
||||||
|
- name: check for a mds container
|
||||||
|
command: "docker ps -q --filter='name=ceph-mds-{{ ansible_hostname }}'"
|
||||||
|
register: ceph_mds_container_stat
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(mds_group_name, [])
|
||||||
|
|
||||||
|
- name: check for a rgw container
|
||||||
|
command: "docker ps -q --filter='name=ceph-rgw-{{ ansible_hostname }}'"
|
||||||
|
register: ceph_rgw_container_stat
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(rgw_group_name, [])
|
||||||
|
|
||||||
|
- name: check for a mgr container
|
||||||
|
command: "docker ps -q --filter='name=ceph-mgr-{{ ansible_hostname }}'"
|
||||||
|
register: ceph_mgr_container_stat
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(mgr_group_name, [])
|
||||||
|
|
||||||
|
- name: check for a rbd mirror container
|
||||||
|
command: "docker ps -q --filter='name=ceph-rbd-mirror-{{ ansible_hostname }}'"
|
||||||
|
register: ceph_rbd_mirror_container_stat
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(rbdmirror_group_name, [])
|
||||||
|
|
||||||
|
- name: check for a nfs container
|
||||||
|
command: "docker ps -q --filter='name=ceph-nfs-{{ ansible_hostname }}'"
|
||||||
|
register: ceph_nfs_container_stat
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(nfs_group_name, [])
|
|
@ -0,0 +1,201 @@
|
||||||
|
---
|
||||||
|
- name: check for a ceph mon socket
|
||||||
|
shell: stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mon*.asok
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
register: mon_socket_stat
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(mon_group_name, [])
|
||||||
|
|
||||||
|
- name: check if the ceph mon socket is in-use
|
||||||
|
command: fuser --silent {{ mon_socket_stat.stdout }}
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
register: mon_socket
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(mon_group_name, [])
|
||||||
|
- mon_socket_stat.rc == 0
|
||||||
|
|
||||||
|
- name: remove ceph mon socket if exists and not used by a process
|
||||||
|
file:
|
||||||
|
name: "{{ mon_socket_stat.stdout }}"
|
||||||
|
state: absent
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(mon_group_name, [])
|
||||||
|
- mon_socket_stat.rc == 0
|
||||||
|
- mon_socket.rc != 0
|
||||||
|
|
||||||
|
- name: check for a ceph osd socket
|
||||||
|
shell: |
|
||||||
|
stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-osd*.asok
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
register: osd_socket_stat
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(osd_group_name, [])
|
||||||
|
|
||||||
|
- name: check if the ceph osd socket is in-use
|
||||||
|
command: fuser --silent {{ osd_socket_stat.stdout }}
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
register: osd_socket
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(osd_group_name, [])
|
||||||
|
- osd_socket_stat.rc == 0
|
||||||
|
|
||||||
|
- name: remove ceph osd socket if exists and not used by a process
|
||||||
|
file:
|
||||||
|
name: "{{ osd_socket_stat.stdout }}"
|
||||||
|
state: absent
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(osd_group_name, [])
|
||||||
|
- osd_socket_stat.rc == 0
|
||||||
|
- osd_socket.rc != 0
|
||||||
|
|
||||||
|
- name: check for a ceph mds socket
|
||||||
|
shell: |
|
||||||
|
stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mds*.asok
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
register: mds_socket_stat
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(mds_group_name, [])
|
||||||
|
|
||||||
|
- name: check if the ceph mds socket is in-use
|
||||||
|
command: fuser --silent {{ mds_socket_stat.stdout }}
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
register: mds_socket
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(mds_group_name, [])
|
||||||
|
- mds_socket_stat.rc == 0
|
||||||
|
|
||||||
|
- name: remove ceph mds socket if exists and not used by a process
|
||||||
|
file:
|
||||||
|
name: "{{ mds_socket_stat.stdout }}"
|
||||||
|
state: absent
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(mds_group_name, [])
|
||||||
|
- mds_socket_stat.rc == 0
|
||||||
|
- mds_socket.rc != 0
|
||||||
|
|
||||||
|
- name: check for a ceph rgw socket
|
||||||
|
shell: |
|
||||||
|
stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-client.rgw*.asok
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
register: rgw_socket_stat
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(rgw_group_name, [])
|
||||||
|
|
||||||
|
- name: check if the ceph rgw socket is in-use
|
||||||
|
command: fuser --silent {{ rgw_socket_stat.stdout }}
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
register: rgw_socket
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(rgw_group_name, [])
|
||||||
|
- rgw_socket_stat.rc == 0
|
||||||
|
|
||||||
|
- name: remove ceph rgw socket if exists and not used by a process
|
||||||
|
file:
|
||||||
|
name: "{{ rgw_socket_stat.stdout }}"
|
||||||
|
state: absent
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(rgw_group_name, [])
|
||||||
|
- rgw_socket_stat.rc == 0
|
||||||
|
- rgw_socket.rc != 0
|
||||||
|
|
||||||
|
- name: check for a ceph mgr socket
|
||||||
|
shell: |
|
||||||
|
stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mgr*.asok
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
register: mgr_socket_stat
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(mgr_group_name, [])
|
||||||
|
|
||||||
|
- name: check if the ceph mgr socket is in-use
|
||||||
|
command: fuser --silent {{ mgr_socket_stat.stdout }}
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
register: mgr_socket
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(mgr_group_name, [])
|
||||||
|
- mgr_socket_stat.rc == 0
|
||||||
|
|
||||||
|
- name: remove ceph mgr socket if exists and not used by a process
|
||||||
|
file:
|
||||||
|
name: "{{ mgr_socket_stat.stdout }}"
|
||||||
|
state: absent
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(mgr_group_name, [])
|
||||||
|
- mgr_socket_stat.rc == 0
|
||||||
|
- mgr_socket.rc != 0
|
||||||
|
|
||||||
|
- name: check for a ceph rbd mirror socket
|
||||||
|
shell: |
|
||||||
|
stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-client.rbd-mirror*.asok
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
register: rbd_mirror_socket_stat
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(rbdmirror_group_name, [])
|
||||||
|
|
||||||
|
- name: check if the ceph rbd mirror socket is in-use
|
||||||
|
command: fuser --silent {{ rbd_mirror_socket_stat.stdout }}
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
register: rbd_mirror_socket
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(rbdmirror_group_name, [])
|
||||||
|
- rbd_mirror_socket_stat.rc == 0
|
||||||
|
|
||||||
|
- name: remove ceph rbd mirror socket if exists and not used by a process
|
||||||
|
file:
|
||||||
|
name: "{{ rbd_mirror_socket_stat.stdout }}"
|
||||||
|
state: absent
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(rbdmirror_group_name, [])
|
||||||
|
- rbd_mirror_socket_stat.rc == 0
|
||||||
|
- rbd_mirror_socket.rc != 0
|
||||||
|
|
||||||
|
- name: check for a ceph nfs ganesha socket
|
||||||
|
command: stat --printf=%n /var/run/ganesha.pid
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
register: nfs_socket_stat
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(nfs_group_name, [])
|
||||||
|
|
||||||
|
- name: check if the ceph nfs ganesha socket is in-use
|
||||||
|
command: fuser --silent {{ nfs_socket_stat.stdout }}
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
always_run: true
|
||||||
|
register: nfs_socket
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(nfs_group_name, [])
|
||||||
|
- nfs_socket_stat.rc == 0
|
||||||
|
|
||||||
|
- name: remove ceph nfs ganesha socket if exists and not used by a process
|
||||||
|
file:
|
||||||
|
name: "{{ nfs_socket_stat.stdout }}"
|
||||||
|
state: absent
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups.get(nfs_group_name, [])
|
||||||
|
- nfs_socket_stat.rc == 0
|
||||||
|
- nfs_socket.rc != 0
|
|
@ -145,3 +145,4 @@
|
||||||
devices: "{{ devices | reject('search','/dev/disk') | list | unique }}"
|
devices: "{{ devices | reject('search','/dev/disk') | list | unique }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups.get(osd_group_name, [])
|
- inventory_hostname in groups.get(osd_group_name, [])
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,9 @@ RETRIES="{{ handler_health_mds_check_retries }}"
|
||||||
DELAY="{{ handler_health_mds_check_delay }}"
|
DELAY="{{ handler_health_mds_check_delay }}"
|
||||||
MDS_NAME="{{ ansible_hostname }}"
|
MDS_NAME="{{ ansible_hostname }}"
|
||||||
SOCKET=/var/run/ceph/{{ cluster }}-mds.${MDS_NAME}.asok
|
SOCKET=/var/run/ceph/{{ cluster }}-mds.${MDS_NAME}.asok
|
||||||
|
{% if containerized_deployment %}
|
||||||
|
DOCKER_EXEC="docker exec ceph-mds-{{ ansible_hostname }}"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# First, restart the daemon
|
# First, restart the daemon
|
||||||
systemctl restart ceph-mds@${MDS_NAME}
|
systemctl restart ceph-mds@${MDS_NAME}
|
||||||
|
@ -11,7 +14,7 @@ systemctl restart ceph-mds@${MDS_NAME}
|
||||||
COUNT=10
|
COUNT=10
|
||||||
# Wait and ensure the socket exists after restarting the daemds
|
# Wait and ensure the socket exists after restarting the daemds
|
||||||
while [ $RETRIES -ne 0 ]; do
|
while [ $RETRIES -ne 0 ]; do
|
||||||
{{ docker_exec_cmd }} test -S $SOCKET && exit 0
|
$DOCKER_EXEC test -S $SOCKET && exit 0
|
||||||
sleep $DELAY
|
sleep $DELAY
|
||||||
let RETRIES=RETRIES-1
|
let RETRIES=RETRIES-1
|
||||||
done
|
done
|
||||||
|
|
|
@ -4,6 +4,9 @@ RETRIES="{{ handler_health_mgr_check_retries }}"
|
||||||
DELAY="{{ handler_health_mgr_check_delay }}"
|
DELAY="{{ handler_health_mgr_check_delay }}"
|
||||||
MGR_NAME="{{ ansible_hostname }}"
|
MGR_NAME="{{ ansible_hostname }}"
|
||||||
SOCKET=/var/run/ceph/{{ cluster }}-mgr.${MGR_NAME}.asok
|
SOCKET=/var/run/ceph/{{ cluster }}-mgr.${MGR_NAME}.asok
|
||||||
|
{% if containerized_deployment %}
|
||||||
|
DOCKER_EXEC="docker exec ceph-mgr-{{ ansible_hostname }}"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# First, restart the daemon
|
# First, restart the daemon
|
||||||
systemctl restart ceph-mgr@${MGR_NAME}
|
systemctl restart ceph-mgr@${MGR_NAME}
|
||||||
|
@ -11,7 +14,7 @@ systemctl restart ceph-mgr@${MGR_NAME}
|
||||||
COUNT=10
|
COUNT=10
|
||||||
# Wait and ensure the socket exists after restarting the daemds
|
# Wait and ensure the socket exists after restarting the daemds
|
||||||
while [ $RETRIES -ne 0 ]; do
|
while [ $RETRIES -ne 0 ]; do
|
||||||
{{ docker_exec_cmd }} test -S $SOCKET && exit 0
|
$DOCKER_EXEC test -S $SOCKET && exit 0
|
||||||
sleep $DELAY
|
sleep $DELAY
|
||||||
let RETRIES=RETRIES-1
|
let RETRIES=RETRIES-1
|
||||||
done
|
done
|
||||||
|
|
|
@ -4,11 +4,14 @@ RETRIES="{{ handler_health_mon_check_retries }}"
|
||||||
DELAY="{{ handler_health_mon_check_delay }}"
|
DELAY="{{ handler_health_mon_check_delay }}"
|
||||||
MONITOR_NAME="{{ monitor_name }}"
|
MONITOR_NAME="{{ monitor_name }}"
|
||||||
SOCKET=/var/run/ceph/{{ cluster }}-mon.${MONITOR_NAME}.asok
|
SOCKET=/var/run/ceph/{{ cluster }}-mon.${MONITOR_NAME}.asok
|
||||||
|
{% if containerized_deployment %}
|
||||||
|
DOCKER_EXEC="docker exec ceph-mon-{{ ansible_hostname }}"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
check_quorum() {
|
check_quorum() {
|
||||||
while [ $RETRIES -ne 0 ]; do
|
while [ $RETRIES -ne 0 ]; do
|
||||||
MEMBERS=$({{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s --format json | sed -r 's/.*"quorum_names":(\[[^]]+\]).*/\1/')
|
MEMBERS=$($DOCKER_EXEC ceph --cluster {{ cluster }} -s --format json | sed -r 's/.*"quorum_names":(\[[^]]+\]).*/\1/')
|
||||||
test "${MEMBERS/$MONITOR_NAME}" != "$MEMBERS" && exit 0
|
test "${MEMBERS/$MONITOR_NAME}" != "$MEMBERS" && exit 0
|
||||||
sleep $DELAY
|
sleep $DELAY
|
||||||
let RETRIES=RETRIES-1
|
let RETRIES=RETRIES-1
|
||||||
|
@ -16,7 +19,7 @@ done
|
||||||
# If we reach this point, it means there is a problem with the quorum
|
# If we reach this point, it means there is a problem with the quorum
|
||||||
echo "Error with quorum."
|
echo "Error with quorum."
|
||||||
echo "cluster status:"
|
echo "cluster status:"
|
||||||
{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s
|
$DOCKER_EXEC ceph --cluster {{ cluster }} -s
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,7 +29,7 @@ systemctl restart ceph-mon@${MONITOR_NAME}
|
||||||
COUNT=10
|
COUNT=10
|
||||||
# Wait and ensure the socket exists after restarting the daemon
|
# Wait and ensure the socket exists after restarting the daemon
|
||||||
while [ $COUNT -ne 0 ]; do
|
while [ $COUNT -ne 0 ]; do
|
||||||
{{ docker_exec_cmd }} test -S $SOCKET && check_quorum
|
$DOCKER_EXEC test -S $SOCKET && check_quorum
|
||||||
sleep $DELAY
|
sleep $DELAY
|
||||||
let COUNT=COUNT-1
|
let COUNT=COUNT-1
|
||||||
done
|
done
|
||||||
|
|
|
@ -4,6 +4,9 @@ RETRIES="{{ handler_health_nfs_check_retries }}"
|
||||||
DELAY="{{ handler_health_nfs_check_delay }}"
|
DELAY="{{ handler_health_nfs_check_delay }}"
|
||||||
NFS_NAME="{{ ansible_hostname }}"
|
NFS_NAME="{{ ansible_hostname }}"
|
||||||
PID=/var/run/ganesha.pid
|
PID=/var/run/ganesha.pid
|
||||||
|
{% if containerized_deployment %}
|
||||||
|
DOCKER_EXEC="docker exec ceph-nfs-{{ ansible_hostname }}"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# First, restart the daemon
|
# First, restart the daemon
|
||||||
{% if containerized_deployment -%}
|
{% if containerized_deployment -%}
|
||||||
|
@ -11,7 +14,7 @@ systemctl restart ceph-nfs@${NFS_NAME}
|
||||||
COUNT=10
|
COUNT=10
|
||||||
# Wait and ensure the pid exists after restarting the daemon
|
# Wait and ensure the pid exists after restarting the daemon
|
||||||
while [ $RETRIES -ne 0 ]; do
|
while [ $RETRIES -ne 0 ]; do
|
||||||
{{ docker_exec_cmd }} test -f $PID && exit 0
|
$DOCKER_EXEC test -f $PID && exit 0
|
||||||
sleep $DELAY
|
sleep $DELAY
|
||||||
let RETRIES=RETRIES-1
|
let RETRIES=RETRIES-1
|
||||||
done
|
done
|
||||||
|
|
|
@ -4,6 +4,9 @@ RETRIES="{{ handler_health_rbd_mirror_check_retries }}"
|
||||||
DELAY="{{ handler_health_rbd_mirror_check_delay }}"
|
DELAY="{{ handler_health_rbd_mirror_check_delay }}"
|
||||||
RBD_MIRROR_NAME="{{ ansible_hostname }}"
|
RBD_MIRROR_NAME="{{ ansible_hostname }}"
|
||||||
SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.${RBD_MIRROR_NAME}.asok
|
SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.${RBD_MIRROR_NAME}.asok
|
||||||
|
{% if containerized_deployment %}
|
||||||
|
DOCKER_EXEC="docker exec ceph-rbd-mirror-{{ ansible_hostname }}"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# First, restart the daemon
|
# First, restart the daemon
|
||||||
systemctl restart ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME}
|
systemctl restart ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME}
|
||||||
|
@ -11,7 +14,7 @@ systemctl restart ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME}
|
||||||
COUNT=10
|
COUNT=10
|
||||||
# Wait and ensure the socket exists after restarting the daemon
|
# Wait and ensure the socket exists after restarting the daemon
|
||||||
while [ $RETRIES -ne 0 ]; do
|
while [ $RETRIES -ne 0 ]; do
|
||||||
{{ docker_exec_cmd }} test -S $SOCKET && exit 0
|
$DOCKER_EXEC test -S $SOCKET && exit 0
|
||||||
sleep $DELAY
|
sleep $DELAY
|
||||||
let RETRIES=RETRIES-1
|
let RETRIES=RETRIES-1
|
||||||
done
|
done
|
||||||
|
|
|
@ -5,6 +5,9 @@ DELAY="{{ handler_health_rgw_check_delay }}"
|
||||||
RGW_NAME="{{ ansible_hostname }}"
|
RGW_NAME="{{ ansible_hostname }}"
|
||||||
RGW_PORT="{{ radosgw_civetweb_port }}"
|
RGW_PORT="{{ radosgw_civetweb_port }}"
|
||||||
SOCKET=/var/run/ceph/{{ cluster }}-client.rgw.${RGW_NAME}.asok
|
SOCKET=/var/run/ceph/{{ cluster }}-client.rgw.${RGW_NAME}.asok
|
||||||
|
{% if containerized_deployment %}
|
||||||
|
DOCKER_EXEC="docker exec ceph-rgw-{{ ansible_hostname }}"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
{% if radosgw_address_block | length > 0 %}
|
{% if radosgw_address_block | length > 0 %}
|
||||||
{% if ip_version == 'ipv4' -%}
|
{% if ip_version == 'ipv4' -%}
|
||||||
|
@ -28,9 +31,9 @@ RGW_IP={{ hostvars[inventory_hostname][interface][ip_version]['address'] }}
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
check_for_curl_or_wget() {
|
check_for_curl_or_wget() {
|
||||||
if {{ docker_exec_cmd }} command -v wget &>/dev/null; then
|
if $DOCKER_EXEC command -v wget &>/dev/null; then
|
||||||
rgw_test_command="wget --quiet"
|
rgw_test_command="wget --quiet"
|
||||||
elif {{ docker_exec_cmd }} command -v curl &>/dev/null; then
|
elif $DOCKER_EXEC command -v curl &>/dev/null; then
|
||||||
rgw_test_command="curl --fail --silent --output /dev/null"
|
rgw_test_command="curl --fail --silent --output /dev/null"
|
||||||
else
|
else
|
||||||
echo "It seems that neither curl or wget are available on your system."
|
echo "It seems that neither curl or wget are available on your system."
|
||||||
|
@ -57,7 +60,7 @@ systemctl restart ceph-radosgw@rgw.${RGW_NAME}
|
||||||
COUNT=10
|
COUNT=10
|
||||||
# Wait and ensure the socket exists after restarting the daemon
|
# Wait and ensure the socket exists after restarting the daemon
|
||||||
while [ $COUNT -ne 0 ]; do
|
while [ $COUNT -ne 0 ]; do
|
||||||
{{ docker_exec_cmd }} test -S $SOCKET && check_rest
|
$DOCKER_EXEC test -S $SOCKET && check_rest
|
||||||
sleep $DELAY
|
sleep $DELAY
|
||||||
let COUNT=COUNT-1
|
let COUNT=COUNT-1
|
||||||
done
|
done
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
---
|
---
|
||||||
|
- name: set_fact docker_exec_cmd
|
||||||
|
set_fact:
|
||||||
|
docker_exec_cmd: "docker exec ceph-iscsi-gw-{{ ansible_hostname }}"
|
||||||
|
when:
|
||||||
|
- containerized_deployment
|
||||||
|
|
||||||
- name: check_mandatory_vars.yml
|
- name: check_mandatory_vars.yml
|
||||||
include: check_mandatory_vars.yml
|
include: check_mandatory_vars.yml
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,8 @@
|
||||||
---
|
---
|
||||||
|
- name: set_fact docker_exec_cmd mds
|
||||||
|
set_fact:
|
||||||
|
docker_exec_cmd: "docker exec ceph-mds-{{ ansible_hostname }}"
|
||||||
|
|
||||||
- name: set_fact ceph_config_keys
|
- name: set_fact ceph_config_keys
|
||||||
set_fact:
|
set_fact:
|
||||||
ceph_config_keys:
|
ceph_config_keys:
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
---
|
---
|
||||||
|
- name: set_fact docker_exec_cmd
|
||||||
|
set_fact:
|
||||||
|
docker_exec_cmd: "docker exec ceph-mds-{{ ansible_hostname }}"
|
||||||
|
when:
|
||||||
|
- containerized_deployment
|
||||||
|
|
||||||
- name: non_containerized.yml
|
- name: non_containerized.yml
|
||||||
include: non_containerized.yml
|
include: non_containerized.yml
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
---
|
---
|
||||||
|
- name: set_fact docker_exec_cmd
|
||||||
|
set_fact:
|
||||||
|
docker_exec_cmd: "docker exec ceph-mgr-{{ ansible_hostname }}"
|
||||||
|
when:
|
||||||
|
- containerized_deployment
|
||||||
|
|
||||||
- name: include pre_requisite.yml
|
- name: include pre_requisite.yml
|
||||||
include: pre_requisite.yml
|
include: pre_requisite.yml
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
---
|
---
|
||||||
|
- name: set_fact docker_exec_cmd
|
||||||
|
set_fact:
|
||||||
|
docker_exec_cmd: "docker exec ceph-mon-{{ ansible_hostname }}"
|
||||||
|
when:
|
||||||
|
- containerized_deployment
|
||||||
|
|
||||||
- name: include check_mandatory_vars.yml
|
- name: include check_mandatory_vars.yml
|
||||||
include: check_mandatory_vars.yml
|
include: check_mandatory_vars.yml
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
|
|
||||||
- name: create rgw nfs user
|
- name: create rgw nfs user
|
||||||
command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user create --uid={{ ceph_nfs_rgw_user }} --display-name='RGW NFS User'"
|
command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user create --uid={{ ceph_nfs_rgw_user }} --display-name='RGW NFS User'"
|
||||||
|
run_once: true
|
||||||
register: rgwuser
|
register: rgwuser
|
||||||
changed_when: false
|
changed_when: false
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
---
|
---
|
||||||
|
- name: set_fact docker_exec_cmd
|
||||||
|
set_fact:
|
||||||
|
docker_exec_cmd: "docker exec ceph-nfs-{{ ansible_hostname }}"
|
||||||
|
when:
|
||||||
|
- containerized_deployment
|
||||||
|
|
||||||
- name: include pre_requisite_non_container.yml
|
- name: include pre_requisite_non_container.yml
|
||||||
include: pre_requisite_non_container.yml
|
include: pre_requisite_non_container.yml
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
---
|
---
|
||||||
|
- name: set_fact docker_exec_cmd
|
||||||
|
set_fact:
|
||||||
|
docker_exec_cmd: "docker exec ceph-osd-{{ ansible_hostname }}"
|
||||||
|
when:
|
||||||
|
- containerized_deployment
|
||||||
|
|
||||||
- name: include check_mandatory_vars.yml
|
- name: include check_mandatory_vars.yml
|
||||||
include: check_mandatory_vars.yml
|
include: check_mandatory_vars.yml
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
---
|
---
|
||||||
|
- name: set_fact docker_exec_cmd
|
||||||
|
set_fact:
|
||||||
|
docker_exec_cmd: "docker exec ceph-rbd-mirror-{{ ansible_hostname }}"
|
||||||
|
when:
|
||||||
|
- containerized_deployment
|
||||||
|
|
||||||
- name: include pre_requisite.yml
|
- name: include pre_requisite.yml
|
||||||
include: pre_requisite.yml
|
include: pre_requisite.yml
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
---
|
---
|
||||||
|
- name: set_fact docker_exec_cmd
|
||||||
|
set_fact:
|
||||||
|
docker_exec_cmd: "docker exec ceph-restapi-{{ ansible_hostname }}"
|
||||||
|
when:
|
||||||
|
- containerized_deployment
|
||||||
|
|
||||||
- name: include pre_requisite.yml
|
- name: include pre_requisite.yml
|
||||||
include: pre_requisite.yml
|
include: pre_requisite.yml
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
---
|
---
|
||||||
|
- name: set_fact docker_exec_cmd
|
||||||
|
set_fact:
|
||||||
|
docker_exec_cmd: "docker exec ceph-rgw-{{ ansible_hostname }}"
|
||||||
|
when:
|
||||||
|
- containerized_deployment
|
||||||
|
|
||||||
- name: include pre_requisite.yml
|
- name: include pre_requisite.yml
|
||||||
include: pre_requisite.yml
|
include: pre_requisite.yml
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
../../../../../Vagrantfile
|
|
@ -0,0 +1,32 @@
|
||||||
|
---
|
||||||
|
# this is only here to let the CI tests know
|
||||||
|
# that this scenario is using docker
|
||||||
|
docker: True
|
||||||
|
|
||||||
|
containerized_deployment: True
|
||||||
|
cluster: test
|
||||||
|
monitor_interface: eth1
|
||||||
|
radosgw_interface: eth1
|
||||||
|
ceph_mon_docker_subnet: "{{ public_network }}"
|
||||||
|
journal_size: 100
|
||||||
|
ceph_docker_on_openstack: False
|
||||||
|
public_network: "192.168.15.0/24"
|
||||||
|
cluster_network: "192.168.16.0/24"
|
||||||
|
osd_scenario: collocated
|
||||||
|
ceph_rgw_civetweb_port: 8080
|
||||||
|
osd_objectstore: filestore
|
||||||
|
ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
|
||||||
|
devices:
|
||||||
|
- /dev/sda
|
||||||
|
- /dev/sdb
|
||||||
|
ceph_osd_docker_run_script_path: /var/tmp
|
||||||
|
rgw_override_bucket_index_max_shards: 16
|
||||||
|
rgw_bucket_default_quota_max_objects: 1638400
|
||||||
|
ceph_conf_overrides:
|
||||||
|
global:
|
||||||
|
osd_pool_default_pg_num: 8
|
||||||
|
osd_pool_default_size: 1
|
||||||
|
user_config: True
|
||||||
|
keys:
|
||||||
|
- { name: client.test, key: "AQAin8tUoMPDGRAACcfAQHbq4eTuUoTCZdW1Uw==", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test", mode: "0600", acls: [] }
|
||||||
|
- { name: client.test2, key: "AQAin8tUAJkGGhAA8WZ8Lz5c7IkT8QZ5s7bI1A==", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test2", mode: "0600", acls: [] }
|
|
@ -0,0 +1,27 @@
|
||||||
|
[mons]
|
||||||
|
mon0
|
||||||
|
mon1
|
||||||
|
mon2
|
||||||
|
|
||||||
|
[osds]
|
||||||
|
osd0
|
||||||
|
|
||||||
|
[mdss]
|
||||||
|
mds0
|
||||||
|
rgw0
|
||||||
|
|
||||||
|
[rgws]
|
||||||
|
rgw0
|
||||||
|
mds0
|
||||||
|
|
||||||
|
[mgrs]
|
||||||
|
mon0
|
||||||
|
osd0
|
||||||
|
|
||||||
|
[rbdmirrors]
|
||||||
|
rgw0
|
||||||
|
mds0
|
||||||
|
|
||||||
|
[nfss]
|
||||||
|
rgw0
|
||||||
|
mds0
|
|
@ -0,0 +1,63 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
# DEPLOY CONTAINERIZED DAEMONS
|
||||||
|
docker: True
|
||||||
|
|
||||||
|
# DEFINE THE NUMBER OF VMS TO RUN
|
||||||
|
mon_vms: 3
|
||||||
|
osd_vms: 1
|
||||||
|
mds_vms: 1
|
||||||
|
rgw_vms: 1
|
||||||
|
nfs_vms: 0
|
||||||
|
rbd_mirror_vms: 0
|
||||||
|
client_vms: 0
|
||||||
|
iscsi_gw_vms: 0
|
||||||
|
mgr_vms: 0
|
||||||
|
|
||||||
|
# Deploy RESTAPI on each of the Monitors
|
||||||
|
restapi: true
|
||||||
|
|
||||||
|
# SUBNETS TO USE FOR THE VMS
|
||||||
|
public_subnet: 192.168.15
|
||||||
|
cluster_subnet: 192.168.16
|
||||||
|
|
||||||
|
# MEMORY
|
||||||
|
# set 1024 for CentOS
|
||||||
|
memory: 1024
|
||||||
|
|
||||||
|
# Disks
|
||||||
|
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
|
||||||
|
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
|
||||||
|
disks: "[ '/dev/sda', '/dev/sdb' ]"
|
||||||
|
|
||||||
|
# VAGRANT BOX
|
||||||
|
# Ceph boxes are *strongly* suggested. They are under better control and will
|
||||||
|
# not get updated frequently unless required for build systems. These are (for
|
||||||
|
# now):
|
||||||
|
#
|
||||||
|
# * ceph/ubuntu-xenial
|
||||||
|
#
|
||||||
|
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
|
||||||
|
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
|
||||||
|
# libvirt CentOS: centos/7
|
||||||
|
# parallels Ubuntu: parallels/ubuntu-14.04
|
||||||
|
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
|
||||||
|
# For more boxes have a look at:
|
||||||
|
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
|
||||||
|
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
|
||||||
|
vagrant_box: centos/atomic-host
|
||||||
|
client_vagrant_box: centos/7
|
||||||
|
#ssh_private_key_path: "~/.ssh/id_rsa"
|
||||||
|
# The sync directory changes based on vagrant box
|
||||||
|
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
|
||||||
|
#vagrant_sync_dir: /home/vagrant/sync
|
||||||
|
#vagrant_sync_dir: /
|
||||||
|
# Disables synced folder creation. Not needed for testing, will skip mounting
|
||||||
|
# the vagrant directory on the remote box regardless of the provider.
|
||||||
|
vagrant_disable_synced_folder: true
|
||||||
|
# VAGRANT URL
|
||||||
|
# This is a URL to download an image from an alternate location. vagrant_box
|
||||||
|
# above should be set to the filename of the image.
|
||||||
|
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
|
||||||
|
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
|
||||||
|
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
|
|
@ -30,6 +30,7 @@ class TestMDSs(object):
|
||||||
@pytest.mark.docker
|
@pytest.mark.docker
|
||||||
def test_docker_mds_is_up(self, node, host):
|
def test_docker_mds_is_up(self, node, host):
|
||||||
hostname = node["vars"]["inventory_hostname"]
|
hostname = node["vars"]["inventory_hostname"]
|
||||||
|
hostname = node["groups"]["mons"][0]["inventory_hostname"]
|
||||||
cmd = "sudo docker exec ceph-mds-{hostname} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
|
cmd = "sudo docker exec ceph-mds-{hostname} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
|
||||||
hostname=node["vars"]["inventory_hostname"],
|
hostname=node["vars"]["inventory_hostname"],
|
||||||
cluster=node["cluster_name"]
|
cluster=node["cluster_name"]
|
||||||
|
|
4
tox.ini
4
tox.ini
|
@ -1,6 +1,6 @@
|
||||||
[tox]
|
[tox]
|
||||||
envlist = {dev,jewel,luminous,rhcs}-{ansible2.2,ansible2.3}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,docker_dedicated_journal,docker_dmcrypt_journal_collocation,update_dmcrypt,update_cluster,cluster,purge_docker_cluster,update_docker_cluster,switch_to_containers}
|
envlist = {dev,jewel,luminous,rhcs}-{ansible2.2,ansible2.3}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,docker_dedicated_journal,docker_dmcrypt_journal_collocation,update_dmcrypt,update_cluster,cluster,purge_docker_cluster,update_docker_cluster,switch_to_containers}
|
||||||
{dev,luminous}-{ansible2.2,ansible2.3}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,journal_collocation_auto,journal_collocation_auto_dmcrypt,shrink_mon_container,shrink_osd_container}
|
{dev,luminous}-{ansible2.2,ansible2.3}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,journal_collocation_auto,journal_collocation_auto_dmcrypt,shrink_mon_container,shrink_osd_container,docker_cluster_collocation}
|
||||||
|
|
||||||
skipsdist = True
|
skipsdist = True
|
||||||
|
|
||||||
|
@ -126,6 +126,7 @@ setenv=
|
||||||
# only available for ansible >= 2.2
|
# only available for ansible >= 2.2
|
||||||
ANSIBLE_STDOUT_CALLBACK = debug
|
ANSIBLE_STDOUT_CALLBACK = debug
|
||||||
docker_cluster: PLAYBOOK = site-docker.yml.sample
|
docker_cluster: PLAYBOOK = site-docker.yml.sample
|
||||||
|
docker_cluster_collocation: PLAYBOOK = site-docker.yml.sample
|
||||||
update_docker_cluster: PLAYBOOK = site-docker.yml.sample
|
update_docker_cluster: PLAYBOOK = site-docker.yml.sample
|
||||||
purge_docker_cluster: PLAYBOOK = site-docker.yml.sample
|
purge_docker_cluster: PLAYBOOK = site-docker.yml.sample
|
||||||
purge_docker_cluster: PURGE_PLAYBOOK = purge-docker-cluster.yml
|
purge_docker_cluster: PURGE_PLAYBOOK = purge-docker-cluster.yml
|
||||||
|
@ -173,6 +174,7 @@ changedir=
|
||||||
cluster: {toxinidir}/tests/functional/centos/7/cluster
|
cluster: {toxinidir}/tests/functional/centos/7/cluster
|
||||||
# tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using docker
|
# tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using docker
|
||||||
docker_cluster: {toxinidir}/tests/functional/centos/7/docker
|
docker_cluster: {toxinidir}/tests/functional/centos/7/docker
|
||||||
|
docker_cluster_collocation: {toxinidir}/tests/functional/centos/7/docker-collocation
|
||||||
update_docker_cluster: {toxinidir}/tests/functional/centos/7/docker
|
update_docker_cluster: {toxinidir}/tests/functional/centos/7/docker
|
||||||
purge_docker_cluster: {toxinidir}/tests/functional/centos/7/docker
|
purge_docker_cluster: {toxinidir}/tests/functional/centos/7/docker
|
||||||
docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/docker-ded-jrn
|
docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/docker-ded-jrn
|
||||||
|
|
Loading…
Reference in New Issue