ceph-ansible/infrastructure-playbooks/shrink-osd.yml

380 lines
15 KiB
YAML
Raw Normal View History

---
# This playbook shrinks Ceph OSDs that have been created with ceph-volume.
# It can remove any number of OSD(s) from the cluster and ALL THEIR DATA
#
# Use it like this:
# ansible-playbook shrink-osd.yml -e osd_to_kill=0,2,6
# Prompts for confirmation to shrink, defaults to no and
# doesn't shrink the cluster. yes shrinks the cluster.
#
# ansible-playbook -e ireallymeanit=yes|no shrink-osd.yml
# Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt.
- name: Gather facts and check the init system
hosts:
- mons
- osds
become: true
tasks:
- name: Gather facts on all Ceph hosts for following reference
ansible.builtin.debug:
msg: "gather facts on all Ceph hosts for following reference"
- name: Confirm whether user really meant to remove osd(s) from the cluster
hosts: mons[0]
become: true
vars_prompt:
- name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster?
default: 'no'
private: false
vars:
mon_group_name: mons
osd_group_name: osds
pre_tasks:
- name: Exit playbook, if user did not mean to shrink cluster
ansible.builtin.fail:
msg: "Exiting shrink-osd playbook, no osd(s) was/were removed..
To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
when: ireallymeanit != 'yes'
- name: Exit playbook, if no osd(s) was/were given
ansible.builtin.fail:
msg: "osd_to_kill must be declared
Exiting shrink-osd playbook, no OSD(s) was/were removed.
On the command line when invoking the playbook, you can use
-e osd_to_kill=0,1,2,3 argument."
when: osd_to_kill is not defined
- name: Check the osd ids passed have the correct format
ansible.builtin.fail:
msg: "The id {{ item }} has wrong format, please pass the number only"
with_items: "{{ osd_to_kill.split(',') }}"
when: not item is regex("^\d+$")
tasks:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
post_tasks:
- name: Set_fact container_exec_cmd build docker exec command (containerized)
ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: Exit playbook, if can not connect to the cluster
ansible.builtin.command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
changed_when: false
until: ceph_health.stdout.find("HEALTH") > -1
retries: 5
delay: 2
- name: Find the host(s) where the osd(s) is/are running on
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}"
changed_when: false
with_items: "{{ osd_to_kill.split(',') }}"
register: find_osd_hosts
- name: Set_fact osd_hosts
ansible.builtin.set_fact:
osd_hosts: "{{ osd_hosts | default([]) + [[(item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid, item.item]] }}"
with_items: "{{ find_osd_hosts.results }}"
- name: Set_fact _osd_hosts
ansible.builtin.set_fact:
_osd_hosts: "{{ _osd_hosts | default([]) + [ [ item.0, item.2, item.3 ] ] }}"
with_nested:
- "{{ groups.get(osd_group_name) }}"
- "{{ osd_hosts }}"
when: hostvars[item.0]['ansible_facts']['hostname'] == item.1
- name: Set_fact host_list
ansible.builtin.set_fact:
host_list: "{{ host_list | default([]) | union([item.0]) }}"
loop: "{{ _osd_hosts }}"
- name: Get ceph-volume lvm list data
ceph_volume:
cluster: "{{ cluster }}"
action: list
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
register: _lvm_list_data
delegate_to: "{{ item }}"
loop: "{{ host_list }}"
- name: Set_fact _lvm_list
ansible.builtin.set_fact:
_lvm_list: "{{ _lvm_list | default({}) | combine(item.stdout | from_json) }}"
with_items: "{{ _lvm_list_data.results }}"
- name: Refresh /etc/ceph/osd files non containerized_deployment
ceph_volume_simple_scan:
cluster: "{{ cluster }}"
force: true
delegate_to: "{{ item }}"
loop: "{{ host_list }}"
when: not containerized_deployment | bool
- name: Get osd unit status
ansible.builtin.systemd:
name: ceph-osd@{{ item.2 }}
register: osd_status
delegate_to: "{{ item.0 }}"
loop: "{{ _osd_hosts }}"
when:
- containerized_deployment | bool
- name: Refresh /etc/ceph/osd files containerized_deployment
ansible.builtin.command: "{{ container_binary }} exec ceph-osd-{{ item.2 }} ceph-volume simple scan --force /var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
changed_when: false
delegate_to: "{{ item.0 }}"
loop: "{{ _osd_hosts }}"
when:
- containerized_deployment | bool
- item.2 not in _lvm_list.keys()
- osd_status.results[0].status.ActiveState == 'active'
- name: Refresh /etc/ceph/osd files containerized_deployment when OSD container is down
when:
- containerized_deployment | bool
- osd_status.results[0].status.ActiveState != 'active'
block:
- name: Create tmp osd folder
ansible.builtin.file:
path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }}
state: directory
mode: '0755'
delegate_to: "{{ item.0 }}"
when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}"
- name: Activate OSD
ansible.builtin.command: |
{{ container_binary }} run -ti --pids-limit=-1 --rm --net=host --privileged=true --pid=host --ipc=host --cpus=1
-v /dev:/dev -v /etc/localtime:/etc/localtime:ro
-v /var/lib/ceph/tmp/:/var/lib/ceph/osd:z,rshared
-v /etc/ceph:/etc/ceph:z -v /var/run/ceph:/var/run/ceph:z
-v /var/run/udev/:/var/run/udev/ -v /var/log/ceph:/var/log/ceph:z
-e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0 -e CLUSTER=ceph -e DEBUG=verbose
-e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=134217728 -v /run/lvm/:/run/lvm/
-e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE -e CONTAINER_IMAGE={{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag }}
-e OSD_ID={{ item.2 }}
--entrypoint=ceph-volume
{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag }}
simple activate {{ item.2 }} {{ item.1 }} --no-systemd
changed_when: false
delegate_to: "{{ item.0 }}"
when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}"
- name: Simple scan
ansible.builtin.command: |
{{ container_binary }} run -ti --pids-limit=-1 --rm --net=host --privileged=true --pid=host --ipc=host --cpus=1
-v /dev:/dev -v /etc/localtime:/etc/localtime:ro
-v /var/lib/ceph/tmp/:/var/lib/ceph/osd:z,rshared
-v /etc/ceph:/etc/ceph:z -v /var/run/ceph:/var/run/ceph:z
-v /var/run/udev/:/var/run/udev/ -v /var/log/ceph:/var/log/ceph:z
-e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0 -e CLUSTER=ceph -e DEBUG=verbose
-e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=134217728 -v /run/lvm/:/run/lvm/
-e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE -e CONTAINER_IMAGE={{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag }}
-e OSD_ID={{ item.2 }}
--entrypoint=ceph-volume
{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag }}
simple scan --force /var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}
changed_when: false
delegate_to: "{{ item.0 }}"
when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}"
- name: Umount OSD temp folder
ansible.posix.mount:
path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }}
state: unmounted
delegate_to: "{{ item.0 }}"
when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}"
- name: Remove OSD temp folder
ansible.builtin.file:
path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }}
state: absent
delegate_to: "{{ item.0 }}"
when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}"
- name: Find /etc/ceph/osd files
ansible.builtin.find:
paths: /etc/ceph/osd
pattern: "{{ item.2 }}-*"
register: ceph_osd_data
delegate_to: "{{ item.0 }}"
loop: "{{ _osd_hosts }}"
when: item.2 not in _lvm_list.keys()
- name: Slurp ceph osd files content
ansible.builtin.slurp:
src: "{{ item['files'][0]['path'] }}"
delegate_to: "{{ item.item.0 }}"
register: ceph_osd_files_content
loop: "{{ ceph_osd_data.results }}"
when:
- item.skipped is undefined
- item.matched > 0
- name: Set_fact ceph_osd_files_json
ansible.builtin.set_fact:
ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({ item.item.item.2: item.content | b64decode | from_json}) }}"
with_items: "{{ ceph_osd_files_content.results }}"
when: item.skipped is undefined
- name: Mark osd(s) out of the cluster
ceph_osd:
ids: "{{ osd_to_kill.split(',') }}"
cluster: "{{ cluster }}"
state: out
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
run_once: true
- name: Stop osd(s) service
ansible.builtin.service:
name: ceph-osd@{{ item.2 }}
state: stopped
enabled: false
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}"
- name: Umount osd lockbox
ansible.posix.mount:
path: "/var/lib/ceph/osd-lockbox/{{ ceph_osd_data_json[item.2]['data']['uuid'] }}"
state: absent
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}"
when:
- not containerized_deployment | bool
- item.2 not in _lvm_list.keys()
- ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool
- ceph_osd_data_json[item.2]['data']['uuid'] is defined
- name: Umount osd data
ansible.posix.mount:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
state: absent
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}"
when: not containerized_deployment | bool
- name: Get parent device for data partition
ansible.builtin.command: lsblk --noheadings --output PKNAME --nodeps "{{ ceph_osd_data_json[item.2]['data']['path'] }}"
register: parent_device_data_part
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}"
changed_when: false
when:
- item.2 not in _lvm_list.keys()
- ceph_osd_data_json[item.2]['data']['path'] is defined
- name: Add pkname information in ceph_osd_data_json
ansible.builtin.set_fact:
ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({item.item[2]: {'pkname_data': '/dev/' + item.stdout}}, recursive=True) }}"
loop: "{{ parent_device_data_part.results }}"
when: item.skipped is undefined
- name: Close dmcrypt close on devices if needed
ansible.builtin.command: "cryptsetup close {{ ceph_osd_data_json[item.2][item.3]['uuid'] }}"
with_nested:
- "{{ _osd_hosts }}"
- ['block_dmcrypt', 'block.db_dmcrypt', 'block.wal_dmcrypt', 'data', 'journal_dmcrypt']
delegate_to: "{{ item.0 }}"
failed_when: false
register: result
until: result is succeeded
changed_when: false
when:
- item.2 not in _lvm_list.keys()
- ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool
- ceph_osd_data_json[item.2][item.3] is defined
- name: Use ceph-volume lvm zap to destroy all partitions
ceph_volume:
cluster: "{{ cluster }}"
action: zap
destroy: true
data: "{{ ceph_osd_data_json[item.2]['pkname_data'] if item.3 == 'data' else ceph_osd_data_json[item.2][item.3]['path'] }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_nested:
- "{{ _osd_hosts }}"
- ['block', 'block.db', 'block.wal', 'journal', 'data']
delegate_to: "{{ item.0 }}"
failed_when: false
register: result
when:
- item.2 not in _lvm_list.keys()
- ceph_osd_data_json[item.2][item.3] is defined
- name: Zap osd devices
ceph_volume:
action: "zap"
osd_fsid: "{{ item.1 }}"
environment:
CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
delegate_to: "{{ item.0 }}"
loop: "{{ _osd_hosts }}"
when: item.2 in _lvm_list.keys()
- name: Ensure osds are marked down
ceph_osd:
ids: "{{ osd_to_kill.split(',') }}"
cluster: "{{ cluster }}"
state: down
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: Purge osd(s) from the cluster
ceph_osd:
ids: "{{ item }}"
cluster: "{{ cluster }}"
state: purge
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
run_once: true
with_items: "{{ osd_to_kill.split(',') }}"
- name: Remove osd data dir
ansible.builtin.file:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
state: absent
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}"
- name: Show ceph health
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
changed_when: false
- name: Show ceph osd tree
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree"
changed_when: false