rolling_update: unmask monitor service after a failure

if for some reason the playbook fails after the service was
stopped, disabled and masked and before it got restarted, enabled and
unmasked, the playbook leaves the service masked and which can make users
confused and forces them to unmask the unit manually.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1917680

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 07029e1bf1)
pull/6397/head
Guillaume Abrioux 2021-03-18 09:08:51 +01:00
parent fcd9544048
commit 1fd0661d3e
1 changed files with 137 additions and 119 deletions

View File

@ -128,6 +128,8 @@
serial: 1
become: True
tasks:
- name: upgrade ceph mon cluster
block:
- name: remove ceph aliases
file:
path: /etc/profile.d/ceph-aliases.sh
@ -266,6 +268,22 @@
delay: "{{ health_mon_check_delay }}"
when: containerized_deployment | bool
rescue:
- name: unmask the mon service
systemd:
name: ceph-mon@{{ item }}
enabled: yes
masked: no
with_items:
- "{{ ansible_facts['hostname'] }}"
- "{{ ansible_facts['fqdn'] }}"
- name: unmask the mgr service
systemd:
name: ceph-mgr@{{ ansible_facts['hostname'] }}
masked: no
when: inventory_hostname in groups[mgr_group_name] | default([])
or groups[mgr_group_name] | default([]) | length == 0
- name: reset mon_host
hosts: "{{ mon_group_name|default('mons') }}"