ceph-ansible/infrastructure-playbooks/shrink-rgw.yml

133 lines
4.5 KiB
YAML
Raw Normal View History

---
# This playbook shrinks the Ceph RGW from your cluster
#
# Use it like this:
# ansible-playbook shrink-rgw.yml -e rgw_to_kill=ceph-rgw01
# Prompts for confirmation to shrink, defaults to no and
# doesn't shrink the cluster. yes shrinks the cluster.
#
# ansible-playbook -e ireallymeanit=yes|no shrink-rgw.yml
# Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt.
- name: confirm whether user really meant to remove rgw from the ceph cluster
hosts: localhost
become: false
gather_facts: false
vars_prompt:
- name: ireallymeanit
prompt: Are you sure you want to shrink the cluster?
default: 'no'
private: no
tasks:
- name: exit playbook, if no rgw was given
when: rgw_to_kill is not defined or rgw_to_kill | length == 0
fail:
msg: >
rgw_to_kill must be declared.
Exiting shrink-cluster playbook, no RGW was removed. On the command
line when invoking the playbook, you can use
"-e rgw_to_kill=ceph.rgw0 argument". You can only remove a single
RGW each time the playbook runs.
- name: exit playbook, if user did not mean to shrink cluster
when: ireallymeanit != 'yes'
fail:
msg: >
Exiting shrink-mon playbook, no monitor was removed. To shrink the
cluster, either say 'yes' on the prompt or use
'-e ireallymeanit=yes' on the command line when invoking the playbook
- name: gather facts and mons and rgws
hosts:
- "{{ mon_group_name | default('mons') }}[0]"
- "{{ rgw_group_name | default('rgws') }}"
become: true
gather_facts: false
tasks:
- setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
- hosts: mons[0]
become: true
gather_facts: false
pre_tasks:
- import_role:
name: ceph-defaults
- import_role:
name: ceph-facts
tasks_from: container_binary
- name: set_fact container_exec_cmd for mon0
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
until: ceph_health is succeeded
retries: 5
delay: 2
- name: get rgw instances
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
register: rgw_instances
- name: exit playbook, if the rgw_to_kill doesn't exist
when: rgw_to_kill not in (rgw_instances.stdout | from_json).services.rgw.daemons.keys() | list
fail:
msg: >
It seems that the rgw instance given is not part of the ceph cluster. Please
make sure it is.
The rgw instance format is $(hostname}.rgw$(instance number).
tasks:
- name: get rgw host running the rgw instance to kill
set_fact:
rgw_host: '{{ item }}'
with_items: '{{ groups[rgw_group_name] }}'
when: hostvars[item]['ansible_hostname'] == rgw_to_kill.split('.')[0]
- name: stop rgw service
service:
name: ceph-radosgw@rgw.{{ rgw_to_kill }}
state: stopped
enabled: no
delegate_to: "{{ rgw_host }}"
failed_when: false
- name: ensure that the rgw is stopped
command: "systemctl is-active ceph-radosgw@rgw.{{ rgw_to_kill }}"
register: rgw_to_kill_status
failed_when: rgw_to_kill_status.rc == 0
delegate_to: "{{ rgw_host }}"
retries: 5
delay: 2
- name: exit if rgw_to_kill is reported in ceph status
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
register: ceph_status
failed_when:
- (ceph_status.stdout | from_json).services.rgw is defined
- rgw_to_kill in (ceph_status.stdout | from_json).services.rgw.daemons.keys() | list
until:
- (ceph_status.stdout | from_json).services.rgw is defined
- rgw_to_kill not in (ceph_status.stdout | from_json).services.rgw.daemons.keys() | list
retries: 3
delay: 3
- name: purge directories related to rgw
file:
path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_to_kill }}
state: absent
delegate_to: "{{ rgw_host }}"
post_tasks:
- name: show ceph health
command: "{{ container_exec_cmd | default('')}} ceph --cluster {{ cluster }} -s"