From ca728dcd7076d7710dc48bd178f17bf64d51a92d Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Mon, 9 Dec 2019 15:52:26 +0100 Subject: [PATCH] shrink-osd: support fqdn in inventory When using fqdn in inventory, that playbook fails because of some tasks using the result of ceph osd tree (which returns shortname) to get some datas in hostvars[]. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1779021 Signed-off-by: Guillaume Abrioux (cherry picked from commit 6d9ca6b05b52694dec53ce61fdc16bb83c93979d) --- .../shrink-osd-ceph-disk.yml | 24 ++++++++++++------- infrastructure-playbooks/shrink-osd.yml | 12 ++++++++-- 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/infrastructure-playbooks/shrink-osd-ceph-disk.yml b/infrastructure-playbooks/shrink-osd-ceph-disk.yml index 8059e2e8e..8bf1dba3f 100644 --- a/infrastructure-playbooks/shrink-osd-ceph-disk.yml +++ b/infrastructure-playbooks/shrink-osd-ceph-disk.yml @@ -171,6 +171,14 @@ - "{{ osd_to_kill_disks.results }}" - "{{ osd_hosts }}" + - name: set_fact _osd_hosts + set_fact: + _osd_hosts: "{{ _osd_hosts | default([]) + [hostvars[item.0]['inventory_hostname']] }}" + with_nested: + - "{{ groups.get(osd_group_name) }}" + - "{{ osd_hosts }}" + when: hostvars[item.0]['ansible_hostname'] == item.1 + - name: zap ceph osd disks shell: | docker run --rm \ @@ -184,7 +192,7 @@ delegate_to: "{{ item.1 }}" with_together: - "{{ resolved_parent_device.results }}" - - "{{ osd_hosts }}" + - "{{ _osd_hosts }}" when: - containerized_deployment @@ -193,7 +201,7 @@ delegate_to: "{{ item.1 }}" with_together: - "{{ resolved_parent_device.results }}" - - "{{ osd_hosts }}" + - "{{ _osd_hosts }}" when: - containerized_deployment @@ -216,7 +224,7 @@ fi done with_together: - - "{{ osd_hosts }}" + - "{{ _osd_hosts }}" - "{{ osd_to_kill_disks_dedicated.results }}" delegate_to: "{{ item.0 }}" when: @@ -228,7 +236,7 @@ run_once: true with_together: - "{{ osd_to_kill.split(',') }}" - - "{{ osd_hosts }}" + - "{{ _osd_hosts }}" delegate_to: "{{ item.1 }}" when: - not containerized_deployment @@ -238,7 +246,7 @@ run_once: true with_together: - "{{ osd_to_kill.split(',') }}" - - "{{ osd_hosts }}" + - "{{ _osd_hosts }}" delegate_to: "{{ item.1 }}" when: - not containerized_deployment @@ -248,7 +256,7 @@ delegate_to: "{{ item.1 }}" with_together: - "{{ resolved_parent_device.results }}" - - "{{ osd_hosts }}" + - "{{ _osd_hosts }}" - name: remove osd(s) from crush_map when ceph-disk destroy fail command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd crush remove osd.{{ item }}" @@ -282,7 +290,7 @@ fi done with_together: - - "{{ osd_hosts }}" + - "{{ _osd_hosts }}" - "{{ osd_to_kill_disks_dedicated_non_container.results }}" delegate_to: "{{ item.0 }}" when: @@ -295,7 +303,7 @@ state: absent with_together: - "{{ osd_to_kill.split(',') }}" - - "{{ osd_hosts }}" + - "{{ _osd_hosts }}" delegate_to: "{{ item.1 }}" - name: show ceph health diff --git a/infrastructure-playbooks/shrink-osd.yml b/infrastructure-playbooks/shrink-osd.yml index e5d8e8d5b..3cac706df 100644 --- a/infrastructure-playbooks/shrink-osd.yml +++ b/infrastructure-playbooks/shrink-osd.yml @@ -84,6 +84,14 @@ osd_hosts: "{{ osd_hosts | default([]) + [ [ (item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid ] ] }}" with_items: "{{ find_osd_hosts.results }}" + - name: set_fact _osd_hosts + set_fact: + _osd_hosts: "{{ _osd_hosts | default([]) + [ [ item.0, item.2 ] ] }}" + with_nested: + - "{{ groups.get(osd_group_name) }}" + - "{{ osd_hosts }}" + when: hostvars[item.0]['ansible_hostname'] == item.1 + - name: mark osd(s) out of the cluster command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ item }}" run_once: true @@ -95,7 +103,7 @@ name: ceph-osd@{{ item.0 }} state: stopped enabled: no - loop: "{{ osd_to_kill.split(',')|zip(osd_hosts)|list }}" + loop: "{{ osd_to_kill.split(',')|zip(_osd_hosts)|list }}" delegate_to: "{{ item.1.0 }}" - name: zap osd devices @@ -106,7 +114,7 @@ CEPH_VOLUME_DEBUG: 1 CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}" delegate_to: "{{ item.0 }}" - loop: "{{ osd_hosts }}" + loop: "{{ _osd_hosts }}" - name: purge osd(s) from the cluster command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it"