--- # This playbook shrinks Ceph OSDs that have been created with ceph-volume. # It can remove any number of OSD(s) from the cluster and ALL THEIR DATA # # Use it like this: # ansible-playbook lvm-shrink-osd.yml -e osd_to_kill=0,2,6 # Prompts for confirmation to shrink, defaults to no and # doesn't shrink the cluster. yes shrinks the cluster. # # ansible-playbook -e ireallymeanit=yes|no lvm-shrink-osd.yml # Overrides the prompt using -e option. Can be used in # automation scripts to avoid interactive prompt. - name: gather facts and check the init system hosts: - "{{ mon_group_name|default('mons') }}" - "{{ osd_group_name|default('osds') }}" become: True tasks: - debug: msg="gather facts on all Ceph hosts for following reference" - name: confirm whether user really meant to remove osd(s) from the cluster hosts: - localhost become: true vars_prompt: - name: ireallymeanit prompt: Are you sure you want to shrink the cluster? default: 'no' private: no vars: mon_group_name: mons osd_group_name: osds pre_tasks: - name: exit playbook, if user did not mean to shrink cluster fail: msg: "Exiting shrink-osd playbook, no osd(s) was/were removed.. To shrink the cluster, either say 'yes' on the prompt or or use `-e ireallymeanit=yes` on the command line when invoking the playbook" when: ireallymeanit != 'yes' - name: exit playbook, if no osd(s) was/were given fail: msg: "osd_to_kill must be declared Exiting shrink-osd playbook, no OSD(s) was/were removed. On the command line when invoking the playbook, you can use -e osd_to_kill=0,1,2,3 argument." when: osd_to_kill is not defined roles: - ceph-defaults post_tasks: - name: set_fact docker_exec_cmd build docker exec command (containerized) set_fact: docker_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" when: containerized_deployment - name: exit playbook, if can not connect to the cluster command: "{{ docker_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health" register: ceph_health until: ceph_health.stdout.find("HEALTH") > -1 delegate_to: "{{ groups[mon_group_name][0] }}" retries: 5 delay: 2 - name: find the host(s) where the osd(s) is/are running on command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}" with_items: "{{ osd_to_kill.split(',') }}" delegate_to: "{{ groups[mon_group_name][0] }}" register: find_osd_hosts - name: set_fact osd_hosts set_fact: osd_hosts: "{{ osd_hosts | default([]) + [ [ (item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid ] ] }}" with_items: "{{ find_osd_hosts.results }}" - name: mark osd(s) out of the cluster command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ item }}" run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" with_items: "{{ osd_to_kill.split(',') }}" - name: stop osd(s) service service: name: ceph-osd@{{ item.0 }} state: stopped enabled: no loop: "{{ osd_to_kill.split(',')|zip(osd_hosts)|list }}" delegate_to: "{{ item.1.0 }}" - name: zap osd devices ceph_volume: action: "zap" osd_fsid: "{{ item.1 }}" environment: CEPH_VOLUME_DEBUG: 1 CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}" delegate_to: "{{ item.0 }}" loop: "{{ osd_hosts }}" - name: purge osd(s) from the cluster command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it" run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" with_items: "{{ osd_to_kill.split(',') }}" - name: show ceph health command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s" delegate_to: "{{ groups[mon_group_name][0] }}" - name: show ceph osd tree command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd tree" delegate_to: "{{ groups[mon_group_name][0] }}"