ceph-ansible/roles/ceph-defaults/handlers/main.yml

115 lines
3.6 KiB
YAML
Raw Normal View History

---
2014-09-05 03:14:11 +08:00
- name: update apt cache
2015-09-04 00:18:53 +08:00
apt:
update-cache: yes
when:
- ansible_os_family == 'Debian'
- block:
- name: copy mon restart script
template:
src: restart_mon_daemon.sh.j2
dest: /tmp/restart_mon_daemon.sh
owner: root
group: root
mode: 0750
listen: "restart ceph mons"
- name: restart ceph mon daemon(s)
command: /tmp/restart_mon_daemon.sh
listen: "restart ceph mons"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- mon_socket_stat.rc == 0
- mon_group_name in group_names
# This does not just restart OSDs but everything else too. Unfortunately
# at this time the ansible role does not have an OSD id list to use
# for restarting them specifically.
- name: copy osd restart script
template:
src: restart_osd_daemon.sh.j2
dest: /tmp/restart_osd_daemon.sh
owner: root
group: root
mode: 0750
listen: "restart ceph osds"
when:
- inventory_hostname in play_hosts
- osd_group_name in group_names
- name: restart containerized ceph osds daemon(s)
command: /tmp/restart_osd_daemon.sh
listen: "restart ceph osds"
with_items: "{{ socket_osd_container_stat.results | default([]) }}"
when:
# We do not want to run these checks on initial deployment (`socket_osd_container_stat.results[n].rc == 0`)
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
- containerized_deployment
- ((crush_location is defined and crush_location) or item.get('rc') == 0)
- handler_health_osd_check
# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
- inventory_hostname in play_hosts
- osd_group_name in group_names
- name: restart non-containerized ceph osds daemon(s)
command: /tmp/restart_osd_daemon.sh
listen: "restart ceph osds"
when:
- not containerized_deployment
# We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
- ((crush_location is defined and crush_location) or osd_socket_stat.rc == 0)
- ceph_current_fsid.rc == 0
- handler_health_osd_check
# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
- inventory_hostname in play_hosts
- osd_group_name in group_names
- name: copy mds restart script
template:
src: restart_mds_daemon.sh.j2
dest: /tmp/restart_mds_daemon.sh
owner: root
group: root
mode: 0750
listen: "restart ceph mdss"
when:
- inventory_hostname in play_hosts
- mds_group_name in group_names
- name: restart ceph mds daemon(s)
command: /tmp/restart_mds_daemon.sh
listen: "restart ceph mdss"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- mds_socket_stat.rc == 0
- mds_group_name in group_names
- name: copy rgw restart script
template:
src: restart_rgw_daemon.sh.j2
dest: /tmp/restart_rgw_daemon.sh
owner: root
group: root
mode: 0750
listen: "restart ceph rgws"
when:
- inventory_hostname in play_hosts
- rgw_group_name in group_names
- name: restart ceph rgw daemon(s)
command: /tmp/restart_rgw_daemon.sh
listen: "restart ceph rgws"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- rgw_socket_stat.rc == 0
- rgw_group_name in group_names
- name: restart ceph nfss
service:
name: nfs-ganesha
state: restarted
when:
- nfs_group_name in group_names