shrink-osd: support fqdn in inventory

When using fqdn in inventory, that playbook fails because of some tasks
using the result of ceph osd tree (which returns shortname) to get
some datas in hostvars[].

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1779021

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 6d9ca6b05b)
pull/4786/head
Guillaume Abrioux 2019-12-09 15:52:26 +01:00
parent 193ce4f572
commit ca728dcd70
2 changed files with 26 additions and 10 deletions

View File

@ -171,6 +171,14 @@
- "{{ osd_to_kill_disks.results }}" - "{{ osd_to_kill_disks.results }}"
- "{{ osd_hosts }}" - "{{ osd_hosts }}"
- name: set_fact _osd_hosts
set_fact:
_osd_hosts: "{{ _osd_hosts | default([]) + [hostvars[item.0]['inventory_hostname']] }}"
with_nested:
- "{{ groups.get(osd_group_name) }}"
- "{{ osd_hosts }}"
when: hostvars[item.0]['ansible_hostname'] == item.1
- name: zap ceph osd disks - name: zap ceph osd disks
shell: | shell: |
docker run --rm \ docker run --rm \
@ -184,7 +192,7 @@
delegate_to: "{{ item.1 }}" delegate_to: "{{ item.1 }}"
with_together: with_together:
- "{{ resolved_parent_device.results }}" - "{{ resolved_parent_device.results }}"
- "{{ osd_hosts }}" - "{{ _osd_hosts }}"
when: when:
- containerized_deployment - containerized_deployment
@ -193,7 +201,7 @@
delegate_to: "{{ item.1 }}" delegate_to: "{{ item.1 }}"
with_together: with_together:
- "{{ resolved_parent_device.results }}" - "{{ resolved_parent_device.results }}"
- "{{ osd_hosts }}" - "{{ _osd_hosts }}"
when: when:
- containerized_deployment - containerized_deployment
@ -216,7 +224,7 @@
fi fi
done done
with_together: with_together:
- "{{ osd_hosts }}" - "{{ _osd_hosts }}"
- "{{ osd_to_kill_disks_dedicated.results }}" - "{{ osd_to_kill_disks_dedicated.results }}"
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
when: when:
@ -228,7 +236,7 @@
run_once: true run_once: true
with_together: with_together:
- "{{ osd_to_kill.split(',') }}" - "{{ osd_to_kill.split(',') }}"
- "{{ osd_hosts }}" - "{{ _osd_hosts }}"
delegate_to: "{{ item.1 }}" delegate_to: "{{ item.1 }}"
when: when:
- not containerized_deployment - not containerized_deployment
@ -238,7 +246,7 @@
run_once: true run_once: true
with_together: with_together:
- "{{ osd_to_kill.split(',') }}" - "{{ osd_to_kill.split(',') }}"
- "{{ osd_hosts }}" - "{{ _osd_hosts }}"
delegate_to: "{{ item.1 }}" delegate_to: "{{ item.1 }}"
when: when:
- not containerized_deployment - not containerized_deployment
@ -248,7 +256,7 @@
delegate_to: "{{ item.1 }}" delegate_to: "{{ item.1 }}"
with_together: with_together:
- "{{ resolved_parent_device.results }}" - "{{ resolved_parent_device.results }}"
- "{{ osd_hosts }}" - "{{ _osd_hosts }}"
- name: remove osd(s) from crush_map when ceph-disk destroy fail - name: remove osd(s) from crush_map when ceph-disk destroy fail
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd crush remove osd.{{ item }}" command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd crush remove osd.{{ item }}"
@ -282,7 +290,7 @@
fi fi
done done
with_together: with_together:
- "{{ osd_hosts }}" - "{{ _osd_hosts }}"
- "{{ osd_to_kill_disks_dedicated_non_container.results }}" - "{{ osd_to_kill_disks_dedicated_non_container.results }}"
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
when: when:
@ -295,7 +303,7 @@
state: absent state: absent
with_together: with_together:
- "{{ osd_to_kill.split(',') }}" - "{{ osd_to_kill.split(',') }}"
- "{{ osd_hosts }}" - "{{ _osd_hosts }}"
delegate_to: "{{ item.1 }}" delegate_to: "{{ item.1 }}"
- name: show ceph health - name: show ceph health

View File

@ -84,6 +84,14 @@
osd_hosts: "{{ osd_hosts | default([]) + [ [ (item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid ] ] }}" osd_hosts: "{{ osd_hosts | default([]) + [ [ (item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid ] ] }}"
with_items: "{{ find_osd_hosts.results }}" with_items: "{{ find_osd_hosts.results }}"
- name: set_fact _osd_hosts
set_fact:
_osd_hosts: "{{ _osd_hosts | default([]) + [ [ item.0, item.2 ] ] }}"
with_nested:
- "{{ groups.get(osd_group_name) }}"
- "{{ osd_hosts }}"
when: hostvars[item.0]['ansible_hostname'] == item.1
- name: mark osd(s) out of the cluster - name: mark osd(s) out of the cluster
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ item }}" command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ item }}"
run_once: true run_once: true
@ -95,7 +103,7 @@
name: ceph-osd@{{ item.0 }} name: ceph-osd@{{ item.0 }}
state: stopped state: stopped
enabled: no enabled: no
loop: "{{ osd_to_kill.split(',')|zip(osd_hosts)|list }}" loop: "{{ osd_to_kill.split(',')|zip(_osd_hosts)|list }}"
delegate_to: "{{ item.1.0 }}" delegate_to: "{{ item.1.0 }}"
- name: zap osd devices - name: zap osd devices
@ -106,7 +114,7 @@
CEPH_VOLUME_DEBUG: 1 CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}" CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
loop: "{{ osd_hosts }}" loop: "{{ _osd_hosts }}"
- name: purge osd(s) from the cluster - name: purge osd(s) from the cluster
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it" command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it"