rbd: fix restart script for jewel

In Jewel, we don't use bootstrap-rbd keyring for rbd-mirror nodes, it
results with a socket path/name different according to which ceph
release you are deploying.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/2073/head
Guillaume Abrioux 2017-10-17 18:28:06 +02:00
parent 897136b368
commit c2850b11be
2 changed files with 7 additions and 2 deletions

View File

@ -51,7 +51,8 @@
- restart ceph osds - restart ceph osds
- restart ceph mdss - restart ceph mdss
- restart ceph rgws - restart ceph rgws
- restart ceph nfss - restart ceph mgrs
- restart ceph rbdmirrors
when: when:
- not containerized_deployment|bool - not containerized_deployment|bool
@ -118,8 +119,8 @@
- restart ceph osds - restart ceph osds
- restart ceph mdss - restart ceph mdss
- restart ceph rgws - restart ceph rgws
- restart ceph rbdmirrors
- restart ceph mgrs - restart ceph mgrs
- restart ceph rbdmirrors
- name: set fsid fact when generate_fsid = true - name: set fsid fact when generate_fsid = true
set_fact: set_fact:

View File

@ -3,7 +3,11 @@
RETRIES="{{ handler_health_rbd_mirror_check_retries }}" RETRIES="{{ handler_health_rbd_mirror_check_retries }}"
DELAY="{{ handler_health_rbd_mirror_check_delay }}" DELAY="{{ handler_health_rbd_mirror_check_delay }}"
RBD_MIRROR_NAME="{{ ansible_hostname }}" RBD_MIRROR_NAME="{{ ansible_hostname }}"
{% if ceph_release_num[ceph_release] < ceph_release_num['luminous'] %}
SOCKET=/var/run/ceph/{{ cluster }}-client.admin.asok
{% else %}
SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.${RBD_MIRROR_NAME}.asok SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.${RBD_MIRROR_NAME}.asok
{% endif %}
{% if containerized_deployment %} {% if containerized_deployment %}
DOCKER_EXEC="docker exec ceph-rbd-mirror-{{ ansible_hostname }}" DOCKER_EXEC="docker exec ceph-rbd-mirror-{{ ansible_hostname }}"
{% endif %} {% endif %}