filestore-to-bluestore: skip bluestore osd nodes

If the OSD node is already using bluestore OSDs then we should skip
all the remaining tasks to avoid purging OSD for nothing.
Instead we warn the user.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1790472

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
(cherry picked from commit 83c5a1d7a8)
pull/5038/head
Dimitri Savineau 2020-01-23 16:58:14 -05:00 committed by Guillaume Abrioux
parent 460d3557d7
commit 487be2675a
1 changed files with 234 additions and 222 deletions

View File

@ -21,245 +21,257 @@
- import_role: - import_role:
name: ceph-defaults name: ceph-defaults
- import_role: - name: set_fact current_objectstore
name: ceph-facts
- name: get ceph osd tree data
command: "{{ container_exec_cmd }} ceph osd tree -f json"
delegate_to: "{{ groups[mon_group_name][0] }}"
register: osd_tree
run_once: true
- name: set_fact container_run_cmd
set_fact: set_fact:
container_run_cmd: "{{ container_binary + ' run --rm --privileged=true --ulimit nofile=1024:4096 --net=host --pid=host --ipc=host -v /dev:/dev -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -v /var/run:/var/run --entrypoint=' if containerized_deployment else '' }}ceph-volume {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else '' }}" current_objectstore: '{{ osd_objectstore }}'
- name: get ceph-volume lvm inventory data - name: warn user about osd already using bluestore
command: "{{ container_run_cmd }} --cluster {{ cluster }} inventory --format json" debug:
register: ceph_volume_inventory msg: 'WARNING: {{ inventory_hostname }} is already using bluestore. Skipping all tasks.'
when: current_objectstore == 'bluestore'
- name: set_fact inventory - name: shrink and redeploy filestore osds
set_fact: when: current_objectstore == 'filestore'
inventory: "{{ ceph_volume_inventory.stdout | from_json }}"
- name: set_fact ceph_disk_osds
set_fact:
ceph_disk_osds_devices: "{{ ceph_disk_osds_devices | default([]) + [item.path] }}"
with_items: "{{ inventory }}"
when:
- not item.available | bool
- "'Used by ceph-disk' in item.rejected_reasons"
- name: ceph-disk prepared OSDs related tasks
when: ceph_disk_osds_devices | default([]) | length > 0
block: block:
- name: get partlabel - import_role:
command: blkid "{{ item + 'p' if item is match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') else item }}"1 -s PARTLABEL -o value name: ceph-facts
register: partlabel
with_items: "{{ ceph_disk_osds_devices | default([]) }}"
- name: get simple scan data - name: get ceph osd tree data
command: "{{ container_run_cmd }} --cluster {{ cluster }} simple scan {{ item.item + 'p1' if item.item is match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') else item.item + '1' }} --stdout" command: "{{ container_exec_cmd }} ceph osd tree -f json"
register: simple_scan
with_items: "{{ partlabel.results | default([]) }}"
when: item.stdout == 'ceph data'
ignore_errors: true
- name: mark out osds
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ (item.0.stdout | from_json).whoami }}"
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: item.1.stdout == 'ceph data'
- name: stop and disable old osd services
service:
name: "ceph-osd@{{ (item.0.stdout | from_json).whoami }}"
state: stopped
enabled: no
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
when: item.1.stdout == 'ceph data'
- name: umount osd data
mount:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ (item.0.stdout | from_json).whoami }}"
state: unmounted
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
when: item.1.stdout == 'ceph data'
- name: umount osd lockbox
mount:
path: "/var/lib/ceph/osd-lockbox/{{ (item.0.stdout | from_json).data.uuid }}"
state: unmounted
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
when:
- item.1.stdout == 'ceph data'
- (item.0.stdout | from_json).encrypted | default(False) | bool
- name: ensure dmcrypt for data device is closed
command: cryptsetup close "{{ (item.0.stdout | from_json).data.uuid }}"
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
failed_when: false
changed_when: false
when:
- item.1.stdout == 'ceph data'
- (item.0.stdout | from_json).encrypted | default(False) | bool
- name: ensure dmcrypt for journal device is closed
command: cryptsetup close "{{ (item.0.stdout | from_json).journal.uuid }}"
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
failed_when: false
changed_when: false
when:
- item.1.stdout == 'ceph data'
- (item.0.stdout | from_json).encrypted | default(False) | bool
- name: zap data devices
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm zap --destroy {{ (item.0.stdout | from_json).data.path }}"
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
when: item.1.stdout == 'ceph data'
- name: zap journal devices
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm zap --destroy {{ (item.0.stdout | from_json).journal.path }}"
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
when:
- item.1.stdout == 'ceph data'
- (item.0.stdout | from_json).journal.path is defined
- name: get ceph-volume lvm list data
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm list --format json"
register: ceph_volume_lvm_list
- name: set_fact _lvm_list
set_fact:
_lvm_list: "{{ _lvm_list | default([]) + item.value }}"
with_dict: "{{ (ceph_volume_lvm_list.stdout | from_json) }}"
- name: ceph-volume prepared OSDs related tasks
block:
- name: mark out osds
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ item }}"
with_items: "{{ (ceph_volume_lvm_list.stdout | from_json).keys() | list }}"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
register: osd_tree
run_once: true run_once: true
- name: stop and disable old osd services - name: set_fact container_run_cmd
service:
name: "ceph-osd@{{ item }}"
state: stopped
enabled: no
with_items: "{{ (ceph_volume_lvm_list.stdout | from_json).keys() | list }}"
- name: mark down osds
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd down {{ item }}"
with_items: "{{ (ceph_volume_lvm_list.stdout | from_json).keys() | list }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- name: ensure all dmcrypt for data and journal are closed
command: cryptsetup close "{{ item['lv_uuid'] }}"
with_items: "{{ _lvm_list }}"
changed_when: false
failed_when: false
when: item['tags'].get('ceph.encrypted', 0) | int == 1
- name: set_fact osd_fsid_list
set_fact: set_fact:
osd_fsid_list: "{{ osd_fsid_list | default([]) + [{'osd_fsid': item.tags['ceph.osd_fsid'], 'destroy': (item.lv_name.startswith('osd-data-') and item.vg_name.startswith('ceph-')) | ternary(true, false)}] }}" container_run_cmd: "{{ container_binary + ' run --rm --privileged=true --ulimit nofile=1024:4096 --net=host --pid=host --ipc=host -v /dev:/dev -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -v /var/run:/var/run --entrypoint=' if containerized_deployment else '' }}ceph-volume {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else '' }}"
with_items: "{{ _lvm_list }}"
when: item.type == 'data'
- name: zap ceph-volume prepared OSDs - name: get ceph-volume lvm inventory data
ceph_volume: command: "{{ container_run_cmd }} --cluster {{ cluster }} inventory --format json"
action: "zap" register: ceph_volume_inventory
osd_fsid: "{{ item.osd_fsid }}"
destroy: "{{ item.destroy }}"
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
loop: "{{ osd_fsid_list }}"
when: osd_fsid_list is defined
- name: ensure all dm are closed - name: set_fact inventory
command: dmsetup remove "{{ item['lv_path'] }}" set_fact:
with_items: "{{ _lvm_list }}" inventory: "{{ ceph_volume_inventory.stdout | from_json }}"
changed_when: false
failed_when: false - name: set_fact ceph_disk_osds
set_fact:
ceph_disk_osds_devices: "{{ ceph_disk_osds_devices | default([]) + [item.path] }}"
with_items: "{{ inventory }}"
when: when:
- item['lv_path'] is defined - not item.available | bool
# Do not close mappers for non 'lvm batch' devices - "'Used by ceph-disk' in item.rejected_reasons"
- devices | default([]) | length > 0
- name: ensure all pv are removed - name: ceph-disk prepared OSDs related tasks
command: "pvremove --yes {{ item.devices[0] }}" when: ceph_disk_osds_devices | default([]) | length > 0
with_items: "{{ _lvm_list }}" block:
failed_when: false - name: get partlabel
when: command: blkid "{{ item + 'p' if item is match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') else item }}"1 -s PARTLABEL -o value
- item.type == 'data' register: partlabel
- item.lv_name.startswith('osd-data-') | bool with_items: "{{ ceph_disk_osds_devices | default([]) }}"
- item.vg_name.startswith('ceph-') | bool
when: _lvm_list is defined
- name: set_fact osd_ids - name: get simple scan data
set_fact: command: "{{ container_run_cmd }} --cluster {{ cluster }} simple scan {{ item.item + 'p1' if item.item is match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') else item.item + '1' }} --stdout"
osd_ids: "{{ osd_ids | default([]) + [item] }}" register: simple_scan
with_items: with_items: "{{ partlabel.results | default([]) }}"
- "{{ ((osd_tree.stdout | from_json).nodes | selectattr('name', 'match', inventory_hostname) | map(attribute='children') | list) }}" when: item.stdout == 'ceph data'
ignore_errors: true
- name: mark out osds
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ (item.0.stdout | from_json).whoami }}"
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: item.1.stdout == 'ceph data'
- name: stop and disable old osd services
service:
name: "ceph-osd@{{ (item.0.stdout | from_json).whoami }}"
state: stopped
enabled: no
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
when: item.1.stdout == 'ceph data'
- name: umount osd data
mount:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ (item.0.stdout | from_json).whoami }}"
state: unmounted
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
when: item.1.stdout == 'ceph data'
- name: umount osd lockbox
mount:
path: "/var/lib/ceph/osd-lockbox/{{ (item.0.stdout | from_json).data.uuid }}"
state: unmounted
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
when:
- item.1.stdout == 'ceph data'
- (item.0.stdout | from_json).encrypted | default(False) | bool
- name: ensure dmcrypt for data device is closed
command: cryptsetup close "{{ (item.0.stdout | from_json).data.uuid }}"
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
failed_when: false
changed_when: false
when:
- item.1.stdout == 'ceph data'
- (item.0.stdout | from_json).encrypted | default(False) | bool
- name: ensure dmcrypt for journal device is closed
command: cryptsetup close "{{ (item.0.stdout | from_json).journal.uuid }}"
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
failed_when: false
changed_when: false
when:
- item.1.stdout == 'ceph data'
- (item.0.stdout | from_json).encrypted | default(False) | bool
- name: zap data devices
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm zap --destroy {{ (item.0.stdout | from_json).data.path }}"
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
when: item.1.stdout == 'ceph data'
- name: zap journal devices
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm zap --destroy {{ (item.0.stdout | from_json).journal.path }}"
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
when:
- item.1.stdout == 'ceph data'
- (item.0.stdout | from_json).journal.path is defined
- name: get ceph-volume lvm list data
command: "{{ container_run_cmd }} --cluster {{ cluster }} lvm list --format json"
register: ceph_volume_lvm_list
- name: set_fact _lvm_list
set_fact:
_lvm_list: "{{ _lvm_list | default([]) + item.value }}"
with_dict: "{{ (ceph_volume_lvm_list.stdout | from_json) }}"
- name: ceph-volume prepared OSDs related tasks
block:
- name: mark out osds
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ item }}"
with_items: "{{ (ceph_volume_lvm_list.stdout | from_json).keys() | list }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- name: stop and disable old osd services
service:
name: "ceph-osd@{{ item }}"
state: stopped
enabled: no
with_items: "{{ (ceph_volume_lvm_list.stdout | from_json).keys() | list }}"
- name: mark down osds
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd down {{ item }}"
with_items: "{{ (ceph_volume_lvm_list.stdout | from_json).keys() | list }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- name: ensure all dmcrypt for data and journal are closed
command: cryptsetup close "{{ item['lv_uuid'] }}"
with_items: "{{ _lvm_list }}"
changed_when: false
failed_when: false
when: item['tags'].get('ceph.encrypted', 0) | int == 1
- name: set_fact osd_fsid_list
set_fact:
osd_fsid_list: "{{ osd_fsid_list | default([]) + [{'osd_fsid': item.tags['ceph.osd_fsid'], 'destroy': (item.lv_name.startswith('osd-data-') and item.vg_name.startswith('ceph-')) | ternary(true, false)}] }}"
with_items: "{{ _lvm_list }}"
when: item.type == 'data'
- name: zap ceph-volume prepared OSDs
ceph_volume:
action: "zap"
osd_fsid: "{{ item.osd_fsid }}"
destroy: "{{ item.destroy }}"
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
loop: "{{ osd_fsid_list }}"
when: osd_fsid_list is defined
- name: ensure all dm are closed
command: dmsetup remove "{{ item['lv_path'] }}"
with_items: "{{ _lvm_list }}"
changed_when: false
failed_when: false
when:
- item['lv_path'] is defined
# Do not close mappers for non 'lvm batch' devices
- devices | default([]) | length > 0
- name: ensure all pv are removed
command: "pvremove --yes {{ item.devices[0] }}"
with_items: "{{ _lvm_list }}"
failed_when: false
when:
- item.type == 'data'
- item.lv_name.startswith('osd-data-') | bool
- item.vg_name.startswith('ceph-') | bool
when: _lvm_list is defined
- name: set_fact osd_ids
set_fact:
osd_ids: "{{ osd_ids | default([]) + [item] }}"
with_items:
- "{{ ((osd_tree.stdout | from_json).nodes | selectattr('name', 'match', inventory_hostname) | map(attribute='children') | list) }}"
- name: purge osd(s) from the cluster - name: purge osd(s) from the cluster
command: > command: >
{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it {{ container_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it
run_once: true run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ osd_ids }}" with_items: "{{ osd_ids }}"
- name: purge /var/lib/ceph/osd directories - name: purge /var/lib/ceph/osd directories
file: file:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}" path: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}"
state: absent state: absent
with_items: "{{ osd_ids }}" with_items: "{{ osd_ids }}"
- name: remove gpt header - name: remove gpt header
command: parted -s "{{ item }}" mklabel msdos command: parted -s "{{ item }}" mklabel msdos
with_items: "{{ (devices + dedicated_devices + ceph_disk_osds_devices | default([])) | unique }}" with_items: "{{ (devices + dedicated_devices + ceph_disk_osds_devices | default([])) | unique }}"
- name: refresh ansible devices fact - name: refresh ansible devices fact
setup: setup:
filter: ansible_devices filter: ansible_devices
when: osd_auto_discovery | bool when: osd_auto_discovery | bool
- import_role: - name: force osd_objectstore to bluestore
name: ceph-defaults set_fact:
- import_role: osd_objectstore: bluestore
name: ceph-facts
- import_role: - import_role:
name: ceph-handler name: ceph-defaults
- import_role: - import_role:
name: ceph-container-common name: ceph-facts
when: containerized_deployment | bool - import_role:
- import_role: name: ceph-handler
name: ceph-config - import_role:
vars: name: ceph-container-common
osd_objectstore: bluestore when: containerized_deployment | bool
- import_role: - import_role:
name: ceph-osd name: ceph-config
vars: - import_role:
osd_objectstore: bluestore name: ceph-osd