shrink_osd: use cv zap by fsid to remove parts/lvs

Fixes:
  https://bugzilla.redhat.com/show_bug.cgi?id=1569413
  https://bugzilla.redhat.com/show_bug.cgi?id=1572933

Signed-off-by: Noah Watkins <noahwatkins@gmail.com>
pull/3535/head
Noah Watkins 2019-01-17 15:08:19 -08:00 committed by Sébastien Han
parent 8a5530ee98
commit 9a43674d2e
2 changed files with 8 additions and 36 deletions

View File

@ -84,37 +84,9 @@
- name: set_fact osd_hosts
set_fact:
osd_hosts: "{{ osd_hosts | default([]) + [ (item.stdout | from_json).crush_location.host ] }}"
osd_hosts: "{{ osd_hosts | default([]) + [ [ (item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid ] ] }}"
with_items: "{{ find_osd_hosts.results }}"
- name: find lvm osd volumes on each host
ceph_volume:
action: "list"
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items: "{{ osd_hosts }}"
delegate_to: "{{ item }}"
register: osd_volumes
- name: filter osd volumes to kill by osd - non container
set_fact:
osd_volumes_to_kill_non_container: "{{ osd_volumes_to_kill_non_container | default([]) + [ (item.1.stdout|from_json)[item.0] ] }}"
with_together:
- "{{ osd_to_kill.split(',') }}"
- "{{ osd_volumes.results }}"
- name: generate (host / volume) pairs to zap - non container
set_fact:
osd_host_volumes_to_kill_non_container: "{%- set _val = namespace(devs=[]) -%}
{%- for host in osd_hosts -%}
{%- for dev in osd_volumes_to_kill_non_container[loop.index-1] -%}
{%- set _val.devs = _val.devs + [{\"host\": host, \"path\": dev.path}] -%}
{%- endfor -%}
{%- endfor -%}
{{ _val.devs }}"
- name: mark osd(s) out of the cluster
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ item }}"
run_once: true
@ -126,21 +98,19 @@
name: ceph-osd@{{ item.0 }}
state: stopped
enabled: no
with_together:
- "{{ osd_to_kill.split(',') }}"
- "{{ osd_hosts }}"
delegate_to: "{{ item.1 }}"
loop: "{{ osd_to_kill.split(',')|zip(osd_hosts)|list }}"
delegate_to: "{{ item.1.0 }}"
- name: zap osd devices
ceph_volume:
action: "zap"
data: "{{ item.path }}"
osd_fsid: "{{ item.1 }}"
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
delegate_to: "{{ item.host }}"
with_items: "{{ osd_host_volumes_to_kill_non_container }}"
delegate_to: "{{ item.0 }}"
loop: "{{ osd_hosts }}"
- name: purge osd(s) from the cluster
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it"

View File

@ -130,6 +130,8 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=0 \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
"