2019-08-19 21:07:10 +08:00
|
|
|
# This playbook migrates an OSD from filestore to bluestore backend.
|
|
|
|
#
|
|
|
|
# Use it like this:
|
|
|
|
# ansible-playbook infrastructure-playbooks/filestore-to-bluestore.yml --limit <osd-node-to-migrate>
|
|
|
|
# *ALL* osds on nodes will be shrinked and redeployed using bluestore backend with ceph-volume
|
|
|
|
|
|
|
|
- hosts: "{{ osd_group_name }}"
|
|
|
|
become: true
|
|
|
|
serial: 1
|
|
|
|
vars:
|
|
|
|
delegate_facts_host: true
|
|
|
|
tasks:
|
|
|
|
- name: gather and delegate facts
|
|
|
|
setup:
|
|
|
|
delegate_to: "{{ item }}"
|
|
|
|
delegate_facts: True
|
|
|
|
with_items: "{{ groups[mon_group_name] }}"
|
|
|
|
run_once: true
|
|
|
|
when: delegate_facts_host | bool
|
|
|
|
|
|
|
|
- import_role:
|
|
|
|
name: ceph-defaults
|
|
|
|
|
2020-01-24 05:58:14 +08:00
|
|
|
- name: set_fact current_objectstore
|
2019-12-10 18:07:30 +08:00
|
|
|
set_fact:
|
2020-01-24 05:58:14 +08:00
|
|
|
current_objectstore: '{{ osd_objectstore }}'
|
2019-08-19 21:07:10 +08:00
|
|
|
|
2020-01-24 05:58:14 +08:00
|
|
|
- name: warn user about osd already using bluestore
|
|
|
|
debug:
|
|
|
|
msg: 'WARNING: {{ inventory_hostname }} is already using bluestore. Skipping all tasks.'
|
|
|
|
when: current_objectstore == 'bluestore'
|
2019-08-19 21:07:10 +08:00
|
|
|
|
2020-01-24 05:58:14 +08:00
|
|
|
- name: shrink and redeploy filestore osds
|
|
|
|
when: current_objectstore == 'filestore'
|
2019-08-19 21:07:10 +08:00
|
|
|
block:
|
2020-01-24 05:58:14 +08:00
|
|
|
- import_role:
|
|
|
|
name: ceph-facts
|
2019-08-19 21:07:10 +08:00
|
|
|
|
2020-01-24 05:58:14 +08:00
|
|
|
- name: get ceph osd tree data
|
|
|
|
command: "{{ container_exec_cmd }} ceph osd tree -f json"
|
2019-08-19 21:07:10 +08:00
|
|
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
2020-01-24 05:58:14 +08:00
|
|
|
register: osd_tree
|
2019-08-19 21:07:10 +08:00
|
|
|
run_once: true
|
|
|
|
|
2020-01-24 05:58:14 +08:00
|
|
|
- name: set_fact container_run_cmd
|
|
|
|
set_fact:
|
|
|
|
container_run_cmd: "{{ container_binary + ' run --rm --privileged=true --ulimit nofile=1024:4096 --net=host --pid=host --ipc=host -v /dev:/dev -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -v /var/run:/var/run --entrypoint=' if containerized_deployment else '' }}ceph-volume {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else '' }}"
|
2019-08-19 21:07:10 +08:00
|
|
|
|
2020-01-24 05:58:14 +08:00
|
|
|
- name: get ceph-volume lvm inventory data
|
|
|
|
command: "{{ container_run_cmd }} --cluster {{ cluster }} inventory --format json"
|
|
|
|
register: ceph_volume_inventory
|
2019-12-11 06:03:40 +08:00
|
|
|
|
2020-01-24 05:58:14 +08:00
|
|
|
- name: set_fact inventory
|
|
|
|
set_fact:
|
|
|
|
inventory: "{{ ceph_volume_inventory.stdout | from_json }}"
|
2019-08-19 21:07:10 +08:00
|
|
|
|
2020-01-24 05:58:14 +08:00
|
|
|
- name: set_fact ceph_disk_osds
|
2019-08-19 21:07:10 +08:00
|
|
|
set_fact:
|
2020-01-24 05:58:14 +08:00
|
|
|
ceph_disk_osds_devices: "{{ ceph_disk_osds_devices | default([]) + [item.path] }}"
|
|
|
|
with_items: "{{ inventory }}"
|
2020-01-21 05:40:58 +08:00
|
|
|
when:
|
2020-01-24 05:58:14 +08:00
|
|
|
- not item.available | bool
|
|
|
|
- "'Used by ceph-disk' in item.rejected_reasons"
|
|
|
|
|
|
|
|
- name: ceph-disk prepared OSDs related tasks
|
|
|
|
when: ceph_disk_osds_devices | default([]) | length > 0
|
|
|
|
block:
|
|
|
|
- name: get partlabel
|
|
|
|
command: blkid "{{ item + 'p' if item is match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') else item }}"1 -s PARTLABEL -o value
|
|
|
|
register: partlabel
|
|
|
|
with_items: "{{ ceph_disk_osds_devices | default([]) }}"
|
|
|
|
|
|
|
|
- name: get simple scan data
|
|
|
|
command: "{{ container_run_cmd }} --cluster {{ cluster }} simple scan {{ item.item + 'p1' if item.item is match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') else item.item + '1' }} --stdout"
|
|
|
|
register: simple_scan
|
|
|
|
with_items: "{{ partlabel.results | default([]) }}"
|
|
|
|
when: item.stdout == 'ceph data'
|
|
|
|
ignore_errors: true
|
|
|
|
|
|
|
|
- name: mark out osds
|
|
|
|
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ (item.0.stdout | from_json).whoami }}"
|
|
|
|
with_together:
|
|
|
|
- "{{ simple_scan.results }}"
|
|
|
|
- "{{ partlabel.results }}"
|
|
|
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
|
|
|
run_once: true
|
|
|
|
when: item.1.stdout == 'ceph data'
|
|
|
|
|
|
|
|
- name: stop and disable old osd services
|
|
|
|
service:
|
|
|
|
name: "ceph-osd@{{ (item.0.stdout | from_json).whoami }}"
|
|
|
|
state: stopped
|
|
|
|
enabled: no
|
|
|
|
with_together:
|
|
|
|
- "{{ simple_scan.results }}"
|
|
|
|
- "{{ partlabel.results }}"
|
|
|
|
when: item.1.stdout == 'ceph data'
|
|
|
|
|
|
|
|
- name: umount osd data
|
|
|
|
mount:
|
|
|
|
path: "/var/lib/ceph/osd/{{ cluster }}-{{ (item.0.stdout | from_json).whoami }}"
|
|
|
|
state: unmounted
|
|
|
|
with_together:
|
|
|
|
- "{{ simple_scan.results }}"
|
|
|
|
- "{{ partlabel.results }}"
|
|
|
|
when: item.1.stdout == 'ceph data'
|
|
|
|
|
|
|
|
- name: umount osd lockbox
|
|
|
|
mount:
|
|
|
|
path: "/var/lib/ceph/osd-lockbox/{{ (item.0.stdout | from_json).data.uuid }}"
|
|
|
|
state: unmounted
|
|
|
|
with_together:
|
|
|
|
- "{{ simple_scan.results }}"
|
|
|
|
- "{{ partlabel.results }}"
|
|
|
|
when:
|
|
|
|
- item.1.stdout == 'ceph data'
|
|
|
|
- (item.0.stdout | from_json).encrypted | default(False) | bool
|
|
|
|
|
|
|
|
- name: ensure dmcrypt for data device is closed
|
|
|
|
command: cryptsetup close "{{ (item.0.stdout | from_json).data.uuid }}"
|
|
|
|
with_together:
|
|
|
|
- "{{ simple_scan.results }}"
|
|
|
|
- "{{ partlabel.results }}"
|
|
|
|
failed_when: false
|
|
|
|
changed_when: false
|
|
|
|
when:
|
|
|
|
- item.1.stdout == 'ceph data'
|
|
|
|
- (item.0.stdout | from_json).encrypted | default(False) | bool
|
|
|
|
|
|
|
|
- name: ensure dmcrypt for journal device is closed
|
|
|
|
command: cryptsetup close "{{ (item.0.stdout | from_json).journal.uuid }}"
|
|
|
|
with_together:
|
|
|
|
- "{{ simple_scan.results }}"
|
|
|
|
- "{{ partlabel.results }}"
|
|
|
|
failed_when: false
|
|
|
|
changed_when: false
|
|
|
|
when:
|
|
|
|
- item.1.stdout == 'ceph data'
|
|
|
|
- (item.0.stdout | from_json).encrypted | default(False) | bool
|
|
|
|
|
|
|
|
- name: zap data devices
|
|
|
|
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm zap --destroy {{ (item.0.stdout | from_json).data.path }}"
|
|
|
|
with_together:
|
|
|
|
- "{{ simple_scan.results }}"
|
|
|
|
- "{{ partlabel.results }}"
|
|
|
|
when: item.1.stdout == 'ceph data'
|
|
|
|
|
|
|
|
- name: zap journal devices
|
|
|
|
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm zap --destroy {{ (item.0.stdout | from_json).journal.path }}"
|
|
|
|
with_together:
|
|
|
|
- "{{ simple_scan.results }}"
|
|
|
|
- "{{ partlabel.results }}"
|
|
|
|
when:
|
|
|
|
- item.1.stdout == 'ceph data'
|
|
|
|
- (item.0.stdout | from_json).journal.path is defined
|
|
|
|
|
|
|
|
- name: get ceph-volume lvm list data
|
|
|
|
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm list --format json"
|
|
|
|
register: ceph_volume_lvm_list
|
|
|
|
|
|
|
|
- name: set_fact _lvm_list
|
|
|
|
set_fact:
|
|
|
|
_lvm_list: "{{ _lvm_list | default([]) + item.value }}"
|
|
|
|
with_dict: "{{ (ceph_volume_lvm_list.stdout | from_json) }}"
|
|
|
|
|
|
|
|
- name: ceph-volume prepared OSDs related tasks
|
|
|
|
block:
|
|
|
|
- name: mark out osds
|
|
|
|
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ item }}"
|
|
|
|
with_items: "{{ (ceph_volume_lvm_list.stdout | from_json).keys() | list }}"
|
|
|
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
|
|
|
run_once: true
|
|
|
|
|
|
|
|
- name: stop and disable old osd services
|
|
|
|
service:
|
|
|
|
name: "ceph-osd@{{ item }}"
|
|
|
|
state: stopped
|
|
|
|
enabled: no
|
|
|
|
with_items: "{{ (ceph_volume_lvm_list.stdout | from_json).keys() | list }}"
|
|
|
|
|
|
|
|
- name: mark down osds
|
|
|
|
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd down {{ item }}"
|
|
|
|
with_items: "{{ (ceph_volume_lvm_list.stdout | from_json).keys() | list }}"
|
|
|
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
|
|
|
run_once: true
|
|
|
|
|
|
|
|
- name: ensure all dmcrypt for data and journal are closed
|
|
|
|
command: cryptsetup close "{{ item['lv_uuid'] }}"
|
|
|
|
with_items: "{{ _lvm_list }}"
|
|
|
|
changed_when: false
|
|
|
|
failed_when: false
|
|
|
|
when: item['tags'].get('ceph.encrypted', 0) | int == 1
|
|
|
|
|
|
|
|
- name: set_fact osd_fsid_list
|
|
|
|
set_fact:
|
|
|
|
osd_fsid_list: "{{ osd_fsid_list | default([]) + [{'osd_fsid': item.tags['ceph.osd_fsid'], 'destroy': (item.lv_name.startswith('osd-data-') and item.vg_name.startswith('ceph-')) | ternary(true, false)}] }}"
|
|
|
|
with_items: "{{ _lvm_list }}"
|
|
|
|
when: item.type == 'data'
|
|
|
|
|
|
|
|
- name: zap ceph-volume prepared OSDs
|
|
|
|
ceph_volume:
|
|
|
|
action: "zap"
|
|
|
|
osd_fsid: "{{ item.osd_fsid }}"
|
|
|
|
destroy: "{{ item.destroy }}"
|
|
|
|
environment:
|
|
|
|
CEPH_VOLUME_DEBUG: 1
|
|
|
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
|
|
|
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
|
|
|
loop: "{{ osd_fsid_list }}"
|
|
|
|
when: osd_fsid_list is defined
|
|
|
|
|
|
|
|
- name: ensure all dm are closed
|
|
|
|
command: dmsetup remove "{{ item['lv_path'] }}"
|
|
|
|
with_items: "{{ _lvm_list }}"
|
|
|
|
changed_when: false
|
|
|
|
failed_when: false
|
|
|
|
when:
|
|
|
|
- item['lv_path'] is defined
|
|
|
|
# Do not close mappers for non 'lvm batch' devices
|
|
|
|
- devices | default([]) | length > 0
|
|
|
|
|
|
|
|
- name: ensure all pv are removed
|
|
|
|
command: "pvremove --yes {{ item.devices[0] }}"
|
|
|
|
with_items: "{{ _lvm_list }}"
|
|
|
|
failed_when: false
|
|
|
|
when:
|
|
|
|
- item.type == 'data'
|
|
|
|
- item.lv_name.startswith('osd-data-') | bool
|
|
|
|
- item.vg_name.startswith('ceph-') | bool
|
|
|
|
when: _lvm_list is defined
|
|
|
|
|
|
|
|
- name: set_fact osd_ids
|
|
|
|
set_fact:
|
|
|
|
osd_ids: "{{ osd_ids | default([]) + [item] }}"
|
|
|
|
with_items:
|
|
|
|
- "{{ ((osd_tree.stdout | from_json).nodes | selectattr('name', 'match', inventory_hostname) | map(attribute='children') | list) }}"
|
2019-08-19 21:07:10 +08:00
|
|
|
|
|
|
|
|
2020-01-24 05:58:14 +08:00
|
|
|
- name: purge osd(s) from the cluster
|
|
|
|
command: >
|
|
|
|
{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it
|
|
|
|
run_once: true
|
|
|
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
|
|
|
with_items: "{{ osd_ids }}"
|
2019-08-19 21:07:10 +08:00
|
|
|
|
2020-01-24 05:58:14 +08:00
|
|
|
- name: purge /var/lib/ceph/osd directories
|
|
|
|
file:
|
|
|
|
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}"
|
|
|
|
state: absent
|
|
|
|
with_items: "{{ osd_ids }}"
|
2019-08-19 21:07:10 +08:00
|
|
|
|
2020-01-24 05:58:14 +08:00
|
|
|
- name: remove gpt header
|
|
|
|
command: parted -s "{{ item }}" mklabel msdos
|
|
|
|
with_items: "{{ (devices + dedicated_devices + ceph_disk_osds_devices | default([])) | unique }}"
|
2019-08-19 21:07:10 +08:00
|
|
|
|
2020-01-24 05:58:14 +08:00
|
|
|
- name: refresh ansible devices fact
|
|
|
|
setup:
|
|
|
|
filter: ansible_devices
|
|
|
|
when: osd_auto_discovery | bool
|
2020-01-22 05:37:10 +08:00
|
|
|
|
2020-01-24 05:58:14 +08:00
|
|
|
- name: force osd_objectstore to bluestore
|
|
|
|
set_fact:
|
|
|
|
osd_objectstore: bluestore
|
|
|
|
|
|
|
|
- import_role:
|
|
|
|
name: ceph-defaults
|
|
|
|
- import_role:
|
|
|
|
name: ceph-facts
|
|
|
|
- import_role:
|
|
|
|
name: ceph-handler
|
|
|
|
- import_role:
|
|
|
|
name: ceph-container-common
|
|
|
|
when: containerized_deployment | bool
|
|
|
|
- import_role:
|
|
|
|
name: ceph-config
|
|
|
|
- import_role:
|
|
|
|
name: ceph-osd
|