filestore-to-bluestore: --destroy with raw devices

We still need --destroy when using a raw device otherwise we won't be
able to recreate the lvm stack on that device with bluestore.

Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-bdc67a84-894a-4687-b43f-bcd76317580a /dev/sdd
 stderr: Physical volume '/dev/sdd' is already in volume group 'ceph-b7801d50-e827-4857-95ec-3291ad6f0151'
  Unable to add physical volume '/dev/sdd' to volume group 'ceph-b7801d50-e827-4857-95ec-3291ad6f0151'
  /dev/sdd: physical volume not initialized.
--> Was unable to complete a new OSD, will rollback changes

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1792227

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
(cherry picked from commit f995b079a6)
pull/4972/head
Dimitri Savineau 2020-01-20 16:40:58 -05:00 committed by Guillaume Abrioux
parent 6a51330892
commit e4965e9ea9
7 changed files with 28 additions and 9 deletions

View File

@ -179,15 +179,15 @@
- name: set_fact osd_fsid_list
set_fact:
osd_fsid_list: "{{ osd_fsid_list | default([]) + [item.tags['ceph.osd_fsid']] }}"
osd_fsid_list: "{{ osd_fsid_list | default([]) + [{'osd_fsid': item.tags['ceph.osd_fsid'], 'destroy': (item.lv_name.startswith('osd-data-') and item.vg_name.startswith('ceph-')) | ternary(true, false)}] }}"
with_items: "{{ _lvm_list }}"
when: item.type == 'data'
- name: zap ceph-volume prepared OSDs
ceph_volume:
action: "zap"
osd_fsid: "{{ item }}"
destroy: False
osd_fsid: "{{ item.osd_fsid }}"
destroy: "{{ item.destroy }}"
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
@ -203,6 +203,14 @@
- item['lv_path'] is defined
# Do not close mappers for non 'lvm batch' devices
- devices | default([]) | length > 0
- name: ensure all pv are removed
command: "pvremove --yes {{ item.devices[0] }}"
with_items: "{{ _lvm_list }}"
when:
- item.type == 'data'
- item.lv_name.startswith('osd-data-') | bool
- item.vg_name.startswith('ceph-') | bool
when: _lvm_list is defined
- name: set_fact osd_ids
@ -238,6 +246,10 @@
- import_role:
name: ceph-container-common
when: containerized_deployment | bool
- import_role:
name: ceph-config
vars:
osd_objectstore: bluestore
- import_role:
name: ceph-osd
vars:

View File

@ -5,3 +5,5 @@ mon0
osd0 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]"
osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" dmcrypt=true
osd2 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=1024
osd3 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
osd4 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]" dmcrypt=true

View File

@ -5,7 +5,7 @@ docker: true
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 3
osd_vms: 5
mds_vms: 0
rgw_vms: 0
nfs_vms: 0

View File

@ -5,3 +5,5 @@ mon0
osd0 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]"
osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" dmcrypt=true
osd2 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=1024
osd3 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
osd4 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]" dmcrypt=true

View File

@ -5,7 +5,7 @@ docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 3
osd_vms: 5
mds_vms: 0
rgw_vms: 0
nfs_vms: 0

View File

@ -49,6 +49,7 @@
unit: '%'
label: gpt
state: present
tags: partitions
- name: partition /dev/sdc for journals
parted:
device: /dev/sdc
@ -58,6 +59,7 @@
unit: '%'
state: present
label: gpt
tags: partitions
- name: create journals vg from /dev/sdc2
lvg:
vg: journals

View File

@ -43,7 +43,8 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'all:!osd2'
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd0:osd1'
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd3:osd4' --tags partitions
# deploy the cluster
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
@ -54,7 +55,7 @@ commands=
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/filestore-to-bluestore.yml --limit osd0,osd1,osd2
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/filestore-to-bluestore.yml --limit osds
bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:nautilus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"