mirror of https://github.com/ceph/ceph-ansible.git
156 lines
5.1 KiB
YAML
156 lines
5.1 KiB
YAML
---
|
|
# This playbook shrinks Ceph OSDs.
|
|
# It can remove any number of OSD(s) from the cluster and ALL THEIR DATA
|
|
#
|
|
# Use it like this:
|
|
# ansible-playbook shrink-osd.yml -e osd_to_kill=0,2,6
|
|
# Prompts for confirmation to shrink, defaults to no and
|
|
# doesn't shrink the cluster. yes shrinks the cluster.
|
|
#
|
|
# ansible-playbook -e ireallymeanit=yes|no shrink-osd.yml
|
|
# Overrides the prompt using -e option. Can be used in
|
|
# automation scripts to avoid interactive prompt.
|
|
|
|
- name: gather facts and check the init system
|
|
|
|
hosts:
|
|
- "{{ mon_group_name|default('mons') }}"
|
|
- "{{ osd_group_name|default('osds') }}"
|
|
|
|
become: True
|
|
tasks:
|
|
- debug: msg="gather facts on all Ceph hosts for following reference"
|
|
|
|
- name: confirm whether user really meant to remove osd(s) from the cluster
|
|
|
|
hosts:
|
|
- localhost
|
|
|
|
become: true
|
|
|
|
vars_prompt:
|
|
- name: ireallymeanit
|
|
prompt: Are you sure you want to shrink the cluster?
|
|
default: 'no'
|
|
private: no
|
|
|
|
vars:
|
|
mon_group_name: mons
|
|
osd_group_name: osds
|
|
|
|
pre_tasks:
|
|
- name: exit playbook, if user did not mean to shrink cluster
|
|
fail:
|
|
msg: "Exiting shrink-osd playbook, no osd(s) was/were removed..
|
|
To shrink the cluster, either say 'yes' on the prompt or
|
|
or use `-e ireallymeanit=yes` on the command line when
|
|
invoking the playbook"
|
|
when: ireallymeanit != 'yes'
|
|
|
|
- name: exit playbook, if no osd(s) was/were given
|
|
fail:
|
|
msg: "osd_to_kill must be declared
|
|
Exiting shrink-osd playbook, no OSD()s was/were removed.
|
|
On the command line when invoking the playbook, you can use
|
|
-e osd_to_kill=0,1,2,3 argument."
|
|
when: osd_to_kill is not defined
|
|
|
|
roles:
|
|
- ceph-defaults
|
|
|
|
post_tasks:
|
|
|
|
- name: exit playbook, if can not connect to the cluster
|
|
command: timeout 5 ceph --cluster {{ cluster }} health
|
|
register: ceph_health
|
|
until: ceph_health.stdout.find("HEALTH") > -1
|
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
|
retries: 5
|
|
delay: 2
|
|
|
|
- name: find the host(s) where the osd(s) is/are running on
|
|
command: ceph --cluster {{ cluster }} osd find {{ item }}
|
|
with_items: "{{ osd_to_kill.split(',') }}"
|
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
|
register: find_osd_hosts
|
|
|
|
- set_fact:
|
|
osd_hosts: "{{ (item.stdout | from_json).crush_location.host }}"
|
|
with_items: "{{ find_osd_hosts.results }}"
|
|
|
|
- name: check if ceph admin key exists on the osd nodes
|
|
stat:
|
|
path: "/etc/ceph/{{ cluster }}.client.admin.keyring"
|
|
register: ceph_admin_key
|
|
with_items: "{{ osd_hosts }}"
|
|
delegate_to: "{{ item }}"
|
|
failed_when: false
|
|
|
|
- fail:
|
|
msg: "The Ceph admin key is not present on the OSD node, please add it and remove it after the playbook is done."
|
|
with_items: "{{ ceph_admin_key.results }}"
|
|
when:
|
|
- item.stat.exists == false
|
|
|
|
- name: deactivating osd(s)
|
|
command: ceph-disk deactivate --cluster {{ cluster }} --deactivate-by-id {{ item.0 }} --mark-out
|
|
register: deactivate
|
|
ignore_errors: yes
|
|
with_together:
|
|
- "{{ osd_to_kill.split(',') }}"
|
|
- "{{ osd_hosts }}"
|
|
delegate_to: "{{ item.1 }}"
|
|
|
|
- name: set osd(s) out when ceph-disk deactivating fail
|
|
command: ceph --cluster {{ cluster }} osd out osd.{{ item.0 }}
|
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
|
with_together:
|
|
- "{{ osd_to_kill.split(',') }}"
|
|
- "{{ deactivate.results }}"
|
|
when:
|
|
- item.1.stderr|length > 0
|
|
|
|
- name: destroying osd(s)
|
|
command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.0 }} --zap
|
|
register: destroy
|
|
ignore_errors: yes
|
|
with_together:
|
|
- "{{ osd_to_kill.split(',') }}"
|
|
- "{{ osd_hosts }}"
|
|
delegate_to: "{{ item.1 }}"
|
|
|
|
- name: remove osd(s) from crush_map when ceph-disk destroy fail
|
|
command: ceph --cluster {{ cluster }} osd crush remove osd.{{ item.0 }}
|
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
|
with_together:
|
|
- "{{ osd_to_kill.split(',') }}"
|
|
- "{{ destroy.results }}"
|
|
when:
|
|
- item.1.stderr|length > 0
|
|
|
|
- name: delete osd(s) auth key when ceph-disk destroy fail
|
|
command: ceph --cluster {{ cluster }} auth del osd.{{ item.0 }}
|
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
|
with_together:
|
|
- "{{ osd_to_kill.split(',') }}"
|
|
- "{{ destroy.results }}"
|
|
when:
|
|
- item.1.stderr|length > 0
|
|
|
|
- name: deallocate osd(s) id when ceph-disk destroy fail
|
|
command: ceph --cluster {{ cluster }} osd rm {{ item.0 }}
|
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
|
with_together:
|
|
- "{{ osd_to_kill.split(',') }}"
|
|
- "{{ destroy.results }}"
|
|
when:
|
|
- item.1.stderr|length > 0
|
|
|
|
- name: show ceph health
|
|
command: ceph --cluster {{ cluster }} -s
|
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
|
|
|
- name: show ceph osd tree
|
|
command: ceph --cluster {{ cluster }} osd tree
|
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|