fs2bs: skip migration when a mix of fs and bs is detected

Since the default of `osd_objectstore` has changed as of 3.2, some
deployments might have a mix of filestore and bluestore OSDs on a same
node. In some specific cases, there's a possibility that a filestore OSD
shares a journal/db device with a bluestore OSD. We shouldn't try to
redeploy in this context because ceph-volume will complain. (either
because in lvm batch you can't pass partition or about gpt header).
The safest option is to skip the migration on the node when such a mix
is detected or force all osds including those already using bluestore
(option `force_filestore_to_bluestore=True` has to be passed as an extra var).
If all OSDs are using filestore, then they will be migrated to
bluestore.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1875777

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit e66f12d138)
pull/6195/head
Guillaume Abrioux 2020-12-15 17:49:32 +01:00
parent 8d59a25a55
commit af95c34c6b
1 changed files with 61 additions and 25 deletions

View File

@ -2,7 +2,10 @@
# #
# Use it like this: # Use it like this:
# ansible-playbook infrastructure-playbooks/filestore-to-bluestore.yml --limit <osd-node-to-migrate> # ansible-playbook infrastructure-playbooks/filestore-to-bluestore.yml --limit <osd-node-to-migrate>
# *ALL* osds on nodes will be shrinked and redeployed using bluestore backend with ceph-volume # If all osds on the node are using filestore backend, then *ALL* of them will be shrinked and redeployed using bluestore backend with ceph-volume.
#
# If a mix of filestore and bluestore OSDs is detected on the node, the node will be skipped unless you pass `force_filestore_to_bluestore=True` as an extra var.
# ie: ansible-playbook infrastructure-playbooks/filestore-to-bluestore.yml --limit <osd-node-to-migrate> -e force_filestore_to_bluestore=True
- hosts: "{{ osd_group_name }}" - hosts: "{{ osd_group_name }}"
become: true become: true
@ -25,19 +28,8 @@
- import_role: - import_role:
name: ceph-defaults name: ceph-defaults
- name: set_fact current_objectstore - name: import_role ceph-facts
set_fact: import_role:
current_objectstore: '{{ osd_objectstore }}'
- name: warn user about osd already using bluestore
debug:
msg: 'WARNING: {{ inventory_hostname }} is already using bluestore. Skipping all tasks.'
when: current_objectstore == 'bluestore'
- name: shrink and redeploy filestore osds
when: current_objectstore == 'filestore'
block:
- import_role:
name: ceph-facts name: ceph-facts
tasks_from: container_binary.yml tasks_from: container_binary.yml
@ -46,13 +38,45 @@
container_run_cmd: "{{ container_binary + ' run --rm --privileged=true --net=host --pid=host --ipc=host -v /dev:/dev -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -v /var/run:/var/run --entrypoint=' if containerized_deployment | bool else '' }}ceph-volume {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else '' }}" container_run_cmd: "{{ container_binary + ' run --rm --privileged=true --net=host --pid=host --ipc=host -v /dev:/dev -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -v /var/run:/var/run --entrypoint=' if containerized_deployment | bool else '' }}ceph-volume {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else '' }}"
container_exec_cmd: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_hostname'] if containerized_deployment | bool else '' }}" container_exec_cmd: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_hostname'] if containerized_deployment | bool else '' }}"
- name: get ceph osd tree data - name: get ceph osd tree data
command: "{{ container_exec_cmd }} ceph osd tree -f json" command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree -f json"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
register: osd_tree register: osd_tree
changed_when: false
run_once: true run_once: true
- name: set_fact osd_ids
set_fact:
osd_ids: "{{ osd_ids | default([]) | union(item) }}"
with_items:
- "{{ ((osd_tree.stdout | default('{}') | trim | from_json).nodes | selectattr('name', 'match', '^' + inventory_hostname + '$') | map(attribute='children') | list) }}"
- name: get osd metadata
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd metadata osd.{{ item }} -f json"
register: osd_metadata
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
with_items: "{{ osd_ids }}"
- name: set_fact _osd_objectstore
set_fact:
_osd_objectstore: "{{ _osd_objectstore | default([]) | union([(item.stdout | from_json).osd_objectstore]) }}"
with_items: "{{ osd_metadata.results }}"
- name: set_fact skip_this_node
set_fact:
skip_this_node: "{{ ('filestore' in _osd_objectstore and 'bluestore' in _osd_objectstore and not force_filestore_to_bluestore | default(False)) or ('filestore' not in _osd_objectstore) }}"
- name: add node to skipped node list
set_fact:
skipped_nodes: "{{ skipped_nodes | default([]) | union([inventory_hostname]) }}"
when:
- skip_this_node | bool
- name: filestore to bluestore migration workflow
when: not skip_this_node | bool
block:
- name: get ceph-volume lvm inventory data - name: get ceph-volume lvm inventory data
command: "{{ container_run_cmd }} --cluster {{ cluster }} inventory --format json" command: "{{ container_run_cmd }} --cluster {{ cluster }} inventory --format json"
register: ceph_volume_inventory register: ceph_volume_inventory
@ -357,3 +381,15 @@
name: ceph-config name: ceph-config
- import_role: - import_role:
name: ceph-osd name: ceph-osd
- name: report any skipped node during this playbook
debug:
msg: |
"WARNING:"
"The following nodes were skipped because OSDs are either"
"all bluestore ones or there's a mix of filestore and bluestore OSDs"
"{{ ' '.join(skipped_nodes) }}"
when:
- inventory_hostname == ansible_play_hosts_all | last
- skipped_nodes is defined