mirror of https://github.com/ceph/ceph-ansible.git
97 lines
3.2 KiB
YAML
97 lines
3.2 KiB
YAML
---
|
|
- name: update apt cache
|
|
apt:
|
|
update-cache: yes
|
|
when: ansible_os_family == 'Debian'
|
|
|
|
- block:
|
|
- name: copy mon restart script
|
|
template:
|
|
src: restart_mon_daemon.sh.j2
|
|
dest: /tmp/restart_mon_daemon.sh
|
|
owner: root
|
|
group: root
|
|
mode: 0750
|
|
listen: "restart ceph mons"
|
|
|
|
- name: restart ceph mon daemon(s)
|
|
command: /tmp/restart_mon_daemon.sh
|
|
listen: "restart ceph mons"
|
|
when:
|
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
|
- socket.rc == 0
|
|
- mon_group_name in group_names
|
|
|
|
# This does not just restart OSDs but everything else too. Unfortunately
|
|
# at this time the ansible role does not have an OSD id list to use
|
|
# for restarting them specifically.
|
|
- name: copy osd restart script
|
|
template:
|
|
src: restart_osd_daemon.sh.j2
|
|
dest: /tmp/restart_osd_daemon.sh
|
|
owner: root
|
|
group: root
|
|
mode: 0750
|
|
listen: "restart ceph osds"
|
|
when:
|
|
- inventory_hostname in play_hosts
|
|
- osd_group_name in group_names
|
|
|
|
- name: restart containerized ceph osds daemon(s)
|
|
command: /tmp/restart_osd_daemon.sh
|
|
listen: "restart ceph osds"
|
|
with_items: "{{ socket_osd_container.results }}"
|
|
when:
|
|
# We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
|
|
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
|
- containerized_deployment
|
|
- ((crush_location is defined and crush_location) or item.get('rc') == 0)
|
|
- handler_health_osd_check
|
|
# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
|
|
- inventory_hostname in play_hosts
|
|
- osd_group_name in group_names
|
|
|
|
- name: restart non-containerized ceph osds daemon(s)
|
|
command: /tmp/restart_osd_daemon.sh
|
|
listen: "restart ceph osds"
|
|
when:
|
|
# We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
|
|
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
|
- ((crush_location is defined and crush_location) or socket.rc == 0)
|
|
- ceph_current_fsid.rc == 0
|
|
- handler_health_osd_check
|
|
# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
|
|
- inventory_hostname in play_hosts
|
|
- osd_group_name in group_names
|
|
|
|
- name: restart ceph mdss
|
|
service:
|
|
name: ceph-mds@{{ mds_name }}
|
|
state: restarted
|
|
# serial: 1 would be the proper solution here, but that can only be set on play level
|
|
# upstream issue: https://github.com/ansible/ansible/issues/12170
|
|
run_once: true
|
|
with_items: "{{ groups.get(mds_group_name, []) }}"
|
|
delegate_to: "{{ item }}"
|
|
when:
|
|
- mds_group_name in group_names
|
|
|
|
- name: restart ceph rgws
|
|
service:
|
|
name: ceph-radosgw@rgw.{{ ansible_hostname }}
|
|
state: restarted
|
|
# serial: 1 would be the proper solution here, but that can only be set on play level
|
|
# upstream issue: https://github.com/ansible/ansible/issues/12170
|
|
run_once: true
|
|
with_items: "{{ groups.get(rgw_group_name, []) }}"
|
|
delegate_to: "{{ item }}"
|
|
when:
|
|
- rgw_group_name in group_names
|
|
|
|
- name: restart ceph nfss
|
|
service:
|
|
name: nfs-ganesha
|
|
state: restarted
|
|
when:
|
|
- nfs_group_name in group_names
|