mirror of https://github.com/ceph/ceph-ansible.git
shrink-mds: fix condition on fs deletion
the new ceph status registered in `ceph_status` will report `fsmap.up` =
0 when it's the last mds given that it's done after we shrink the mds,
it means the condition is wrong. Also adding a condition so we don't try
to delete the fs if a standby node is going to rejoin the cluster.
Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1787543
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 3d0898aa5d
)
pull/4947/head
parent
09a71e4a8c
commit
0db611ebf8
|
@ -149,7 +149,9 @@
|
|||
- name: delete the filesystem when killing last mds
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs rm --yes-i-really-mean-it {{ cephfs }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when: (ceph_status.stdout | from_json)['fsmap']['up'] | int == 1
|
||||
when:
|
||||
- (ceph_status.stdout | from_json)['fsmap']['up'] | int == 0
|
||||
- (ceph_status.stdout | from_json)['fsmap']['up:standby'] | int == 0
|
||||
|
||||
- name: purge mds store
|
||||
file:
|
||||
|
|
Loading…
Reference in New Issue