mirror of https://github.com/ceph/ceph-ansible.git
299 lines
11 KiB
YAML
299 lines
11 KiB
YAML
---
|
|
# This playbook shrinks Ceph OSDs that have been created with ceph-volume.
|
|
# It can remove any number of OSD(s) from the cluster and ALL THEIR DATA
|
|
#
|
|
# Use it like this:
|
|
# ansible-playbook shrink-osd.yml -e osd_to_kill=0,2,6
|
|
# Prompts for confirmation to shrink, defaults to no and
|
|
# doesn't shrink the cluster. yes shrinks the cluster.
|
|
#
|
|
# ansible-playbook -e ireallymeanit=yes|no shrink-osd.yml
|
|
# Overrides the prompt using -e option. Can be used in
|
|
# automation scripts to avoid interactive prompt.
|
|
|
|
- name: gather facts and check the init system
|
|
|
|
hosts:
|
|
- "{{ mon_group_name|default('mons') }}"
|
|
- "{{ osd_group_name|default('osds') }}"
|
|
|
|
become: True
|
|
tasks:
|
|
- debug: msg="gather facts on all Ceph hosts for following reference"
|
|
|
|
- name: confirm whether user really meant to remove osd(s) from the cluster
|
|
|
|
hosts: "{{ groups[mon_group_name][0] }}"
|
|
|
|
become: true
|
|
|
|
vars_prompt:
|
|
- name: ireallymeanit
|
|
prompt: Are you sure you want to shrink the cluster?
|
|
default: 'no'
|
|
private: no
|
|
|
|
vars:
|
|
mon_group_name: mons
|
|
osd_group_name: osds
|
|
|
|
pre_tasks:
|
|
- name: exit playbook, if user did not mean to shrink cluster
|
|
fail:
|
|
msg: "Exiting shrink-osd playbook, no osd(s) was/were removed..
|
|
To shrink the cluster, either say 'yes' on the prompt or
|
|
or use `-e ireallymeanit=yes` on the command line when
|
|
invoking the playbook"
|
|
when: ireallymeanit != 'yes'
|
|
|
|
- name: exit playbook, if no osd(s) was/were given
|
|
fail:
|
|
msg: "osd_to_kill must be declared
|
|
Exiting shrink-osd playbook, no OSD(s) was/were removed.
|
|
On the command line when invoking the playbook, you can use
|
|
-e osd_to_kill=0,1,2,3 argument."
|
|
when: osd_to_kill is not defined
|
|
|
|
- name: check the osd ids passed have the correct format
|
|
fail:
|
|
msg: "The id {{ item }} has wrong format, please pass the number only"
|
|
with_items: "{{ osd_to_kill.split(',') }}"
|
|
when: not item is regex("^\d+$")
|
|
|
|
tasks:
|
|
- import_role:
|
|
name: ceph-defaults
|
|
|
|
- import_role:
|
|
name: ceph-facts
|
|
tasks_from: container_binary
|
|
|
|
post_tasks:
|
|
- name: set_fact container_exec_cmd build docker exec command (containerized)
|
|
set_fact:
|
|
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
|
|
when: containerized_deployment | bool
|
|
|
|
- name: exit playbook, if can not connect to the cluster
|
|
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
|
|
register: ceph_health
|
|
changed_when: false
|
|
until: ceph_health.stdout.find("HEALTH") > -1
|
|
retries: 5
|
|
delay: 2
|
|
|
|
- name: find the host(s) where the osd(s) is/are running on
|
|
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}"
|
|
changed_when: false
|
|
with_items: "{{ osd_to_kill.split(',') }}"
|
|
register: find_osd_hosts
|
|
|
|
- name: set_fact osd_hosts
|
|
set_fact:
|
|
osd_hosts: "{{ osd_hosts | default([]) + [ [ (item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid, item.item ] ] }}"
|
|
with_items: "{{ find_osd_hosts.results }}"
|
|
|
|
- name: set_fact _osd_hosts
|
|
set_fact:
|
|
_osd_hosts: "{{ _osd_hosts | default([]) + [ [ item.0, item.2, item.3 ] ] }}"
|
|
with_nested:
|
|
- "{{ groups.get(osd_group_name) }}"
|
|
- "{{ osd_hosts }}"
|
|
when: hostvars[item.0]['ansible_facts']['hostname'] == item.1
|
|
|
|
- name: set_fact host_list
|
|
set_fact:
|
|
host_list: "{{ host_list | default([]) | union([item.0]) }}"
|
|
loop: "{{ _osd_hosts }}"
|
|
|
|
- name: get ceph-volume lvm list data
|
|
ceph_volume:
|
|
cluster: "{{ cluster }}"
|
|
action: list
|
|
environment:
|
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
|
register: _lvm_list_data
|
|
delegate_to: "{{ item }}"
|
|
loop: "{{ host_list }}"
|
|
|
|
- name: set_fact _lvm_list
|
|
set_fact:
|
|
_lvm_list: "{{ _lvm_list | default({}) | combine(item.stdout | from_json) }}"
|
|
with_items: "{{ _lvm_list_data.results }}"
|
|
|
|
- name: refresh /etc/ceph/osd files non containerized_deployment
|
|
ceph_volume_simple_scan:
|
|
cluster: "{{ cluster }}"
|
|
force: true
|
|
delegate_to: "{{ item }}"
|
|
loop: "{{ host_list }}"
|
|
when: not containerized_deployment | bool
|
|
|
|
- name: refresh /etc/ceph/osd files containerized_deployment
|
|
command: "{{ container_binary }} exec ceph-osd-{{ item.2 }} ceph-volume simple scan --force /var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
|
|
changed_when: false
|
|
delegate_to: "{{ item.0 }}"
|
|
loop: "{{ _osd_hosts }}"
|
|
when: containerized_deployment | bool
|
|
|
|
- name: find /etc/ceph/osd files
|
|
find:
|
|
paths: /etc/ceph/osd
|
|
pattern: "{{ item.2 }}-*"
|
|
register: ceph_osd_data
|
|
delegate_to: "{{ item.0 }}"
|
|
loop: "{{ _osd_hosts }}"
|
|
when: item.2 not in _lvm_list.keys()
|
|
|
|
- name: slurp ceph osd files content
|
|
slurp:
|
|
src: "{{ item['files'][0]['path'] }}"
|
|
delegate_to: "{{ item.item.0 }}"
|
|
register: ceph_osd_files_content
|
|
loop: "{{ ceph_osd_data.results }}"
|
|
when:
|
|
- item.skipped is undefined
|
|
- item.matched > 0
|
|
|
|
- name: set_fact ceph_osd_files_json
|
|
set_fact:
|
|
ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({ item.item.item.2: item.content | b64decode | from_json}) }}"
|
|
with_items: "{{ ceph_osd_files_content.results }}"
|
|
when: item.skipped is undefined
|
|
|
|
- name: mark osd(s) out of the cluster
|
|
ceph_osd:
|
|
ids: "{{ osd_to_kill.split(',') }}"
|
|
cluster: "{{ cluster }}"
|
|
state: out
|
|
environment:
|
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
|
run_once: true
|
|
|
|
- name: stop osd(s) service
|
|
service:
|
|
name: ceph-osd@{{ item.2 }}
|
|
state: stopped
|
|
enabled: no
|
|
loop: "{{ _osd_hosts }}"
|
|
delegate_to: "{{ item.0 }}"
|
|
|
|
- name: umount osd lockbox
|
|
mount:
|
|
path: "/var/lib/ceph/osd-lockbox/{{ ceph_osd_data_json[item.2]['data']['uuid'] }}"
|
|
state: absent
|
|
loop: "{{ _osd_hosts }}"
|
|
delegate_to: "{{ item.0 }}"
|
|
when:
|
|
- not containerized_deployment | bool
|
|
- item.2 not in _lvm_list.keys()
|
|
- ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool
|
|
- ceph_osd_data_json[item.2]['data']['uuid'] is defined
|
|
|
|
- name: umount osd data
|
|
mount:
|
|
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
|
|
state: absent
|
|
loop: "{{ _osd_hosts }}"
|
|
delegate_to: "{{ item.0 }}"
|
|
when: not containerized_deployment | bool
|
|
|
|
- name: get parent device for data partition
|
|
command: lsblk --noheadings --output PKNAME --nodeps "{{ ceph_osd_data_json[item.2]['data']['path'] }}"
|
|
register: parent_device_data_part
|
|
loop: "{{ _osd_hosts }}"
|
|
delegate_to: "{{ item.0 }}"
|
|
when:
|
|
- item.2 not in _lvm_list.keys()
|
|
- ceph_osd_data_json[item.2]['data']['path'] is defined
|
|
|
|
- name: add pkname information in ceph_osd_data_json
|
|
set_fact:
|
|
ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({item.item[2]: {'pkname_data': '/dev/' + item.stdout }}, recursive=True) }}"
|
|
loop: "{{ parent_device_data_part.results }}"
|
|
when: item.skipped is undefined
|
|
|
|
- name: close dmcrypt close on devices if needed
|
|
command: "cryptsetup close {{ ceph_osd_data_json[item.2][item.3]['uuid'] }}"
|
|
with_nested:
|
|
- "{{ _osd_hosts }}"
|
|
- [ 'block_dmcrypt', 'block.db_dmcrypt', 'block.wal_dmcrypt', 'data', 'journal_dmcrypt' ]
|
|
delegate_to: "{{ item.0 }}"
|
|
failed_when: false
|
|
register: result
|
|
until: result is succeeded
|
|
when:
|
|
- item.2 not in _lvm_list.keys()
|
|
- ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool
|
|
- ceph_osd_data_json[item.2][item.3] is defined
|
|
|
|
- name: use ceph-volume lvm zap to destroy all partitions
|
|
ceph_volume:
|
|
cluster: "{{ cluster }}"
|
|
action: zap
|
|
destroy: true
|
|
data: "{{ ceph_osd_data_json[item.2]['pkname_data'] if item.3 == 'data' else ceph_osd_data_json[item.2][item.3]['path'] }}"
|
|
environment:
|
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
|
with_nested:
|
|
- "{{ _osd_hosts }}"
|
|
- [ 'block', 'block.db', 'block.wal', 'journal', 'data' ]
|
|
delegate_to: "{{ item.0 }}"
|
|
failed_when: false
|
|
register: result
|
|
when:
|
|
- item.2 not in _lvm_list.keys()
|
|
- ceph_osd_data_json[item.2][item.3] is defined
|
|
|
|
- name: zap osd devices
|
|
ceph_volume:
|
|
action: "zap"
|
|
osd_fsid: "{{ item.1 }}"
|
|
environment:
|
|
CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
|
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
|
delegate_to: "{{ item.0 }}"
|
|
loop: "{{ _osd_hosts }}"
|
|
when: item.2 in _lvm_list.keys()
|
|
|
|
- name: ensure osds are marked down
|
|
ceph_osd:
|
|
ids: "{{ osd_to_kill.split(',') }}"
|
|
cluster: "{{ cluster }}"
|
|
state: down
|
|
environment:
|
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
|
run_once: true
|
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
|
|
|
- name: purge osd(s) from the cluster
|
|
ceph_osd:
|
|
ids: "{{ item }}"
|
|
cluster: "{{ cluster }}"
|
|
state: purge
|
|
environment:
|
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
|
run_once: true
|
|
with_items: "{{ osd_to_kill.split(',') }}"
|
|
|
|
- name: remove osd data dir
|
|
file:
|
|
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
|
|
state: absent
|
|
loop: "{{ _osd_hosts }}"
|
|
delegate_to: "{{ item.0 }}"
|
|
|
|
- name: show ceph health
|
|
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
|
|
changed_when: false
|
|
|
|
- name: show ceph osd tree
|
|
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree"
|
|
changed_when: false
|