mirror of https://github.com/ceph/ceph-ansible.git
119 lines
3.6 KiB
YAML
119 lines
3.6 KiB
YAML
---
|
|
- name: update apt cache
|
|
apt:
|
|
update-cache: yes
|
|
when: ansible_os_family == 'Debian'
|
|
|
|
- block:
|
|
- name: copy mon restart script
|
|
template:
|
|
src: restart_mon_daemon.sh.j2
|
|
dest: /tmp/restart_mon_daemon.sh
|
|
owner: root
|
|
group: root
|
|
mode: 0750
|
|
listen: "restart ceph mons"
|
|
|
|
- name: restart ceph mon daemon(s)
|
|
command: /tmp/restart_mon_daemon.sh
|
|
listen: "restart ceph mons"
|
|
when:
|
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
|
- mon_socket.rc == 0
|
|
- mon_group_name in group_names
|
|
|
|
# This does not just restart OSDs but everything else too. Unfortunately
|
|
# at this time the ansible role does not have an OSD id list to use
|
|
# for restarting them specifically.
|
|
- name: copy osd restart script
|
|
template:
|
|
src: restart_osd_daemon.sh.j2
|
|
dest: /tmp/restart_osd_daemon.sh
|
|
owner: root
|
|
group: root
|
|
mode: 0750
|
|
listen: "restart ceph osds"
|
|
when:
|
|
- inventory_hostname in play_hosts
|
|
- osd_group_name in group_names
|
|
|
|
- name: restart containerized ceph osds daemon(s)
|
|
command: /tmp/restart_osd_daemon.sh
|
|
listen: "restart ceph osds"
|
|
with_items: "{{ socket_osd_container.results | default([]) }}"
|
|
when:
|
|
# We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
|
|
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
|
- containerized_deployment
|
|
- ((crush_location is defined and crush_location) or item.get('rc') == 0)
|
|
- handler_health_osd_check
|
|
# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
|
|
- inventory_hostname in play_hosts
|
|
- osd_group_name in group_names
|
|
|
|
- name: restart non-containerized ceph osds daemon(s)
|
|
command: /tmp/restart_osd_daemon.sh
|
|
listen: "restart ceph osds"
|
|
when:
|
|
# We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
|
|
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
|
- ((crush_location is defined and crush_location) or osd_socket.rc == 0)
|
|
- ceph_current_fsid.rc == 0
|
|
- handler_health_osd_check
|
|
# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
|
|
- inventory_hostname in play_hosts
|
|
- osd_group_name in group_names
|
|
|
|
- name: copy mds restart script
|
|
template:
|
|
src: restart_mds_daemon.sh.j2
|
|
dest: /tmp/restart_mds_daemon.sh
|
|
owner: root
|
|
group: root
|
|
mode: 0750
|
|
listen: "restart ceph mdss"
|
|
when:
|
|
- inventory_hostname in play_hosts
|
|
- mds_group_name in group_names
|
|
|
|
- name: debug socket mds
|
|
debug: msg="{{mds_socket}}"
|
|
listen: "restart ceph mdss"
|
|
when:
|
|
- mds_group_name in group_names
|
|
|
|
- name: restart ceph mds daemon(s)
|
|
command: /tmp/restart_mds_daemon.sh
|
|
listen: "restart ceph mdss"
|
|
when:
|
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
|
- mds_socket.rc == 0
|
|
- mds_group_name in group_names
|
|
|
|
- name: copy rgw restart script
|
|
template:
|
|
src: restart_rgw_daemon.sh.j2
|
|
dest: /tmp/restart_rgw_daemon.sh
|
|
owner: root
|
|
group: root
|
|
mode: 0750
|
|
listen: "restart ceph rgws"
|
|
when:
|
|
- inventory_hostname in play_hosts
|
|
- rgw_group_name in group_names
|
|
|
|
- name: restart ceph rgw daemon(s)
|
|
command: /tmp/restart_rgw_daemon.sh
|
|
listen: "restart ceph rgws"
|
|
when:
|
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
|
- rgw_socket.rc == 0
|
|
- rgw_group_name in group_names
|
|
|
|
- name: restart ceph nfss
|
|
service:
|
|
name: nfs-ganesha
|
|
state: restarted
|
|
when:
|
|
- nfs_group_name in group_names
|