From 3d0898aa5db7b264d17a6948747a55b0834629e2 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Wed, 15 Jan 2020 07:17:08 +0100 Subject: [PATCH] shrink-mds: fix condition on fs deletion the new ceph status registered in `ceph_status` will report `fsmap.up` = 0 when it's the last mds given that it's done after we shrink the mds, it means the condition is wrong. Also adding a condition so we don't try to delete the fs if a standby node is going to rejoin the cluster. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1787543 Signed-off-by: Guillaume Abrioux --- infrastructure-playbooks/shrink-mds.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/infrastructure-playbooks/shrink-mds.yml b/infrastructure-playbooks/shrink-mds.yml index 5c974af91..282aeb5e4 100644 --- a/infrastructure-playbooks/shrink-mds.yml +++ b/infrastructure-playbooks/shrink-mds.yml @@ -149,7 +149,9 @@ - name: delete the filesystem when killing last mds command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs rm --yes-i-really-mean-it {{ cephfs }}" delegate_to: "{{ groups[mon_group_name][0] }}" - when: (ceph_status.stdout | from_json)['fsmap']['up'] | int == 1 + when: + - (ceph_status.stdout | from_json)['fsmap']['up'] | int == 0 + - (ceph_status.stdout | from_json)['fsmap']['up:standby'] | int == 0 - name: purge mds store file: