2016-08-11 23:20:07 +08:00
|
|
|
---
|
|
|
|
# This playbook shrinks the Ceph monitors from your cluster
|
|
|
|
# It can remove any number of monitor(s) from the cluster and ALL THEIR DATA
|
|
|
|
#
|
|
|
|
# Use it like this:
|
|
|
|
# ansible-playbook shrink-mon.yml -e mon_host=ceph-mon01,ceph-mon02
|
|
|
|
# Prompts for confirmation to shrink, defaults to no and
|
|
|
|
# doesn't shrink the cluster. yes shrinks the cluster.
|
|
|
|
#
|
|
|
|
# ansible-playbook -e ireallymeanit=yes|no shrink-cluster.yml
|
|
|
|
# Overrides the prompt using -e option. Can be used in
|
|
|
|
# automation scripts to avoid interactive prompt.
|
|
|
|
|
|
|
|
|
|
|
|
- name: confirm whether user really meant to remove monitor(s) from the ceph cluster
|
|
|
|
|
|
|
|
hosts:
|
|
|
|
- localhost
|
|
|
|
|
|
|
|
gather_facts: false
|
|
|
|
become: true
|
|
|
|
|
|
|
|
vars_prompt:
|
|
|
|
- name: ireallymeanit
|
|
|
|
prompt: Are you sure you want to shrink the cluster?
|
|
|
|
default: 'no'
|
|
|
|
private: no
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- include_vars: roles/ceph-common/defaults/main.yml
|
2016-11-23 18:14:32 +08:00
|
|
|
- include_vars: group_vars/all.yml
|
2016-08-11 23:20:07 +08:00
|
|
|
|
2016-11-25 05:46:46 +08:00
|
|
|
- name: exit playbook, if only one monitor is present in cluster
|
|
|
|
fail:
|
|
|
|
msg: "You are about to shrink the only monitor present in the cluster.
|
|
|
|
If you really want to do that, please use the purge-cluster playbook."
|
|
|
|
when: "{{ groups[mon_group_name] | length | int == 1 }}"
|
|
|
|
|
2016-08-11 23:20:07 +08:00
|
|
|
- name: exit playbook, if user did not mean to shrink cluster
|
|
|
|
fail:
|
|
|
|
msg: "Exiting shrink-mon playbook, no monitor(s) was/were removed.
|
|
|
|
To shrink the cluster, either say 'yes' on the prompt or
|
|
|
|
or use `-e ireallymeanit=yes` on the command line when
|
|
|
|
invoking the playbook"
|
|
|
|
when: ireallymeanit != 'yes'
|
|
|
|
|
|
|
|
- name: exit playbook, if no monitor(s) was/were given
|
|
|
|
fail:
|
|
|
|
msg: "mon_host must be declared
|
|
|
|
Exiting shrink-cluster playbook, no monitor(s) was/were removed.
|
|
|
|
On the command line when invoking the playbook, you can use
|
|
|
|
-e mon_host=ceph-mon01,ceph-mon02 argument."
|
|
|
|
when: mon_host is not defined
|
|
|
|
|
|
|
|
- name: test if ceph command exist
|
|
|
|
command: command -v ceph
|
|
|
|
changed_when: false
|
|
|
|
failed_when: false
|
|
|
|
register: ceph_command
|
|
|
|
|
|
|
|
- name: exit playbook, if ceph command does not exist
|
|
|
|
debug:
|
|
|
|
msg: "The ceph command is not available, please install it :("
|
|
|
|
run_once: true
|
|
|
|
when:
|
|
|
|
- ceph_command.rc != 0
|
|
|
|
|
|
|
|
- name: exit playbook, if cluster files do not exist
|
|
|
|
stat:
|
|
|
|
path: "{{ item }}"
|
|
|
|
register: ceph_conf_key
|
|
|
|
with_items:
|
|
|
|
- /etc/ceph/{{ cluster }}.conf
|
|
|
|
- /etc/ceph/{{ cluster }}.client.admin.keyring
|
|
|
|
failed_when: false
|
|
|
|
|
|
|
|
- fail:
|
|
|
|
msg: "Ceph's configuration file is not present in /etc/ceph"
|
|
|
|
with_items: "{{ceph_conf_key.results}}"
|
|
|
|
when:
|
|
|
|
- item.stat.exists == false
|
|
|
|
|
|
|
|
- name: exit playbook, if can not connect to the cluster
|
|
|
|
command: timeout 5 ceph --cluster {{ cluster }} health
|
|
|
|
register: ceph_health
|
|
|
|
until: ceph_health.stdout.find("HEALTH") > -1
|
|
|
|
retries: 5
|
|
|
|
delay: 2
|
|
|
|
|
|
|
|
- name: verify given monitors are reachable
|
|
|
|
command: ping -c 1 {{ item }}
|
|
|
|
with_items: "{{mon_host.split(',')}}"
|
|
|
|
register: mon_reachable
|
|
|
|
failed_when: false
|
|
|
|
|
|
|
|
- fail:
|
|
|
|
msg: "One or more monitors are not reachable, please check your /etc/hosts or your DNS"
|
|
|
|
with_items: "{{mon_reachable.results}}"
|
|
|
|
when:
|
|
|
|
- item.rc != 0
|
|
|
|
|
2017-01-20 17:14:35 +08:00
|
|
|
- name: stop monitor service
|
2016-08-11 23:20:07 +08:00
|
|
|
service:
|
|
|
|
name: ceph-mon@{{ item }}
|
|
|
|
state: stopped
|
|
|
|
enabled: no
|
|
|
|
with_items: "{{mon_host.split(',')}}"
|
|
|
|
delegate_to: "{{item}}"
|
|
|
|
failed_when: false
|
|
|
|
|
|
|
|
- name: purge monitor store
|
|
|
|
file:
|
|
|
|
path: /var/lib/ceph/mon/{{ cluster }}-{{ item }}
|
|
|
|
state: absent
|
|
|
|
with_items: "{{mon_host.split(',')}}"
|
|
|
|
delegate_to: "{{item}}"
|
|
|
|
|
|
|
|
- name: remove monitor from the quorum
|
|
|
|
command: ceph --cluster {{ cluster }} mon remove {{ item }}
|
|
|
|
failed_when: false
|
|
|
|
with_items: "{{mon_host.split(',')}}"
|
|
|
|
|
|
|
|
# NOTE (leseb): sorry for the 'sleep' command
|
|
|
|
# but it will take a couple of seconds for other monitors
|
|
|
|
# to notice that one member has left.
|
|
|
|
# 'sleep 5' is not that bad and should be sufficient
|
|
|
|
- name: verify the monitor is out of the cluster
|
|
|
|
shell: "sleep 5 && ceph --cluster {{ cluster }} -s | grep monmap | sed 's/.*quorum//' | egrep -sq {{ item }}"
|
|
|
|
with_items: "{{mon_host.split(',')}}"
|
|
|
|
failed_when: false
|
|
|
|
register: ceph_health_mon
|
|
|
|
|
|
|
|
- name: please remove the monitor from your ceph configuration file
|
|
|
|
debug:
|
|
|
|
msg: "The monitor(s) has/have been successfully removed from the cluster.
|
|
|
|
Please remove the monitor(s) entry(ies) from the rest of your ceph configuration files, cluster wide."
|
|
|
|
run_once: true
|
|
|
|
with_items: "{{ceph_health_mon.results}}"
|
|
|
|
when:
|
|
|
|
- item.rc != 0
|
|
|
|
|
|
|
|
- name: please remove the monitor from your ceph configuration file
|
|
|
|
fail:
|
|
|
|
msg: "Monitor(s) appear(s) to still be part of the cluster, please check what happened."
|
|
|
|
run_once: true
|
|
|
|
with_items: "{{ceph_health_mon.results}}"
|
|
|
|
when:
|
|
|
|
- item.rc == 0
|