osd: remove ceph-disk support

We don't support the preparation of OSD with ceph-disk. ceph-volume is
only supported. However, the start operation of OSD is still supported.
So let's say you change a config option, the handlers will be able to
restart all the OSDs via their respective systemd unit files.

Signed-off-by: Sébastien Han <seb@redhat.com>
Co-authored-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/3851/head
Sébastien Han 2018-10-02 23:54:57 +02:00 committed by Dimitri Savineau
parent d25af1b872
commit e2a5aa062e
5 changed files with 34 additions and 256 deletions

View File

@ -56,113 +56,13 @@ dummy:
#dmcrypt: False #dmcrypt: False
# I. First scenario: collocated
#
# To enable this scenario do: osd_scenario: collocated
#
#
# If osd_objectstore: filestore is enabled both 'ceph data' and 'ceph journal' partitions
# will be stored on the same device.
#
# If osd_objectstore: bluestore is enabled 'ceph data', 'ceph block', 'ceph block.db', 'ceph block.wal' will be stored
# on the same device. The device will get 2 partitions:
# - One for 'data', called 'ceph data'
# - One for 'ceph block', 'ceph block.db', 'ceph block.wal' called 'ceph block'
#
# Example of what you will get:
# [root@ceph-osd0 ~]# blkid /dev/sda*
# /dev/sda: PTTYPE="gpt"
# /dev/sda1: UUID="9c43e346-dd6e-431f-92d8-cbed4ccb25f6" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="749c71c9-ed8f-4930-82a7-a48a3bcdb1c7"
# /dev/sda2: PARTLABEL="ceph block" PARTUUID="e6ca3e1d-4702-4569-abfa-e285de328e9d"
#
# Note: This scenario uses the ceph-disk tool to provision OSDs
#osd_scenario: dummy #osd_scenario: dummy
#valid_osd_scenarios: #valid_osd_scenarios:
# - collocated
# - non-collocated
# - lvm # - lvm
# II. Second scenario: non-collocated
#
# To enable this scenario do: osd_scenario: non-collocated
#
# If osd_objectstore: filestore is enabled 'ceph data' and 'ceph journal' partitions
# will be stored on different devices:
# - 'ceph data' will be stored on the device listed in 'devices'
# - 'ceph journal' will be stored on the device listed in 'dedicated_devices'
#
# Let's take an example, imagine 'devices' was declared like this:
#
# devices:
# - /dev/sda
# - /dev/sdb
# - /dev/sdc
# - /dev/sdd
#
# And 'dedicated_devices' was declared like this:
#
# dedicated_devices:
# - /dev/sdf
# - /dev/sdf
# - /dev/sdg
# - /dev/sdg
#
# This will result in the following mapping:
# - /dev/sda will have /dev/sdf1 as a journal
# - /dev/sdb will have /dev/sdf2 as a journal
# - /dev/sdc will have /dev/sdg1 as a journal
# - /dev/sdd will have /dev/sdg2 as a journal
#
#
# If osd_objectstore: bluestore is enabled, both 'ceph block.db' and 'ceph block.wal' partitions will be stored
# on a dedicated device.
#
# So the following will happen:
# - The devices listed in 'devices' will get 2 partitions, one for 'block' and one for 'data'.
# 'data' is only 100MB big and do not store any of your data, it's just a bunch of Ceph metadata.
# 'block' will store all your actual data.
# - The devices in 'dedicated_devices' will get 1 partition for RocksDB DB, called 'block.db'
# and one for RocksDB WAL, called 'block.wal'. To use a single partition for RocksDB and WAL together
# set bluestore_wal_devices to [] (supported only for non-containerized deployment).
#
# By default dedicated_devices will represent block.db
#
# Example of what you will get:
# [root@ceph-osd0 ~]# blkid /dev/sd*
# /dev/sda: PTTYPE="gpt"
# /dev/sda1: UUID="c6821801-2f21-4980-add0-b7fc8bd424d5" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="f2cc6fa8-5b41-4428-8d3f-6187453464d0"
# /dev/sda2: PARTLABEL="ceph block" PARTUUID="ea454807-983a-4cf2-899e-b2680643bc1c"
# /dev/sdb: PTTYPE="gpt"
# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="af5b2d74-4c08-42cf-be57-7248c739e217"
# /dev/sdb2: PARTLABEL="ceph block.wal" PARTUUID="af3f8327-9aa9-4c2b-a497-cf0fe96d126a"
#
# Note: This scenario uses the ceph-disk tool to provision OSDs
#dedicated_devices: [] #dedicated_devices: []
# More device granularity for Bluestore
#
# ONLY if osd_objectstore: bluestore is enabled.
#
# By default, if 'bluestore_wal_devices' is empty, it will get the content of 'dedicated_devices'.
# If set, then you will have a dedicated partition on a specific device for block.wal.
#
# Set bluestore_wal_devices: [] to use the same partition for RocksDB and WAL (supported only for non-containerized deployment).
#
# Example of what you will get:
# [root@ceph-osd0 ~]# blkid /dev/sd*
# /dev/sda: PTTYPE="gpt"
# /dev/sda1: UUID="39241ae9-d119-4335-96b3-0898da8f45ce" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="961e7313-bdb7-49e7-9ae7-077d65c4c669"
# /dev/sda2: PARTLABEL="ceph block" PARTUUID="bff8e54e-b780-4ece-aa16-3b2f2b8eb699"
# /dev/sdb: PTTYPE="gpt"
# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="0734f6b6-cc94-49e9-93de-ba7e1d5b79e3"
# /dev/sdc: PTTYPE="gpt"
# /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3"
# Note: This option uses the ceph-disk tool
#bluestore_wal_devices: "{{ dedicated_devices }}"
# III. Use ceph-volume to create OSDs from logical volumes. # III. Use ceph-volume to create OSDs from logical volumes.
# Use 'osd_scenario: lvm' to enable this scenario. # Use 'osd_scenario: lvm' to enable this scenario.
# when using lvm, not collocated journals. # when using lvm, not collocated journals.

View File

@ -48,113 +48,13 @@ osd_auto_discovery: false
dmcrypt: False dmcrypt: False
# I. First scenario: collocated
#
# To enable this scenario do: osd_scenario: collocated
#
#
# If osd_objectstore: filestore is enabled both 'ceph data' and 'ceph journal' partitions
# will be stored on the same device.
#
# If osd_objectstore: bluestore is enabled 'ceph data', 'ceph block', 'ceph block.db', 'ceph block.wal' will be stored
# on the same device. The device will get 2 partitions:
# - One for 'data', called 'ceph data'
# - One for 'ceph block', 'ceph block.db', 'ceph block.wal' called 'ceph block'
#
# Example of what you will get:
# [root@ceph-osd0 ~]# blkid /dev/sda*
# /dev/sda: PTTYPE="gpt"
# /dev/sda1: UUID="9c43e346-dd6e-431f-92d8-cbed4ccb25f6" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="749c71c9-ed8f-4930-82a7-a48a3bcdb1c7"
# /dev/sda2: PARTLABEL="ceph block" PARTUUID="e6ca3e1d-4702-4569-abfa-e285de328e9d"
#
# Note: This scenario uses the ceph-disk tool to provision OSDs
osd_scenario: dummy osd_scenario: dummy
valid_osd_scenarios: valid_osd_scenarios:
- collocated
- non-collocated
- lvm - lvm
# II. Second scenario: non-collocated
#
# To enable this scenario do: osd_scenario: non-collocated
#
# If osd_objectstore: filestore is enabled 'ceph data' and 'ceph journal' partitions
# will be stored on different devices:
# - 'ceph data' will be stored on the device listed in 'devices'
# - 'ceph journal' will be stored on the device listed in 'dedicated_devices'
#
# Let's take an example, imagine 'devices' was declared like this:
#
# devices:
# - /dev/sda
# - /dev/sdb
# - /dev/sdc
# - /dev/sdd
#
# And 'dedicated_devices' was declared like this:
#
# dedicated_devices:
# - /dev/sdf
# - /dev/sdf
# - /dev/sdg
# - /dev/sdg
#
# This will result in the following mapping:
# - /dev/sda will have /dev/sdf1 as a journal
# - /dev/sdb will have /dev/sdf2 as a journal
# - /dev/sdc will have /dev/sdg1 as a journal
# - /dev/sdd will have /dev/sdg2 as a journal
#
#
# If osd_objectstore: bluestore is enabled, both 'ceph block.db' and 'ceph block.wal' partitions will be stored
# on a dedicated device.
#
# So the following will happen:
# - The devices listed in 'devices' will get 2 partitions, one for 'block' and one for 'data'.
# 'data' is only 100MB big and do not store any of your data, it's just a bunch of Ceph metadata.
# 'block' will store all your actual data.
# - The devices in 'dedicated_devices' will get 1 partition for RocksDB DB, called 'block.db'
# and one for RocksDB WAL, called 'block.wal'. To use a single partition for RocksDB and WAL together
# set bluestore_wal_devices to [] (supported only for non-containerized deployment).
#
# By default dedicated_devices will represent block.db
#
# Example of what you will get:
# [root@ceph-osd0 ~]# blkid /dev/sd*
# /dev/sda: PTTYPE="gpt"
# /dev/sda1: UUID="c6821801-2f21-4980-add0-b7fc8bd424d5" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="f2cc6fa8-5b41-4428-8d3f-6187453464d0"
# /dev/sda2: PARTLABEL="ceph block" PARTUUID="ea454807-983a-4cf2-899e-b2680643bc1c"
# /dev/sdb: PTTYPE="gpt"
# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="af5b2d74-4c08-42cf-be57-7248c739e217"
# /dev/sdb2: PARTLABEL="ceph block.wal" PARTUUID="af3f8327-9aa9-4c2b-a497-cf0fe96d126a"
#
# Note: This scenario uses the ceph-disk tool to provision OSDs
dedicated_devices: [] dedicated_devices: []
# More device granularity for Bluestore
#
# ONLY if osd_objectstore: bluestore is enabled.
#
# By default, if 'bluestore_wal_devices' is empty, it will get the content of 'dedicated_devices'.
# If set, then you will have a dedicated partition on a specific device for block.wal.
#
# Set bluestore_wal_devices: [] to use the same partition for RocksDB and WAL (supported only for non-containerized deployment).
#
# Example of what you will get:
# [root@ceph-osd0 ~]# blkid /dev/sd*
# /dev/sda: PTTYPE="gpt"
# /dev/sda1: UUID="39241ae9-d119-4335-96b3-0898da8f45ce" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="961e7313-bdb7-49e7-9ae7-077d65c4c669"
# /dev/sda2: PARTLABEL="ceph block" PARTUUID="bff8e54e-b780-4ece-aa16-3b2f2b8eb699"
# /dev/sdb: PTTYPE="gpt"
# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="0734f6b6-cc94-49e9-93de-ba7e1d5b79e3"
# /dev/sdc: PTTYPE="gpt"
# /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3"
# Note: This option uses the ceph-disk tool
bluestore_wal_devices: "{{ dedicated_devices }}"
# III. Use ceph-volume to create OSDs from logical volumes. # III. Use ceph-volume to create OSDs from logical volumes.
# Use 'osd_scenario: lvm' to enable this scenario. # Use 'osd_scenario: lvm' to enable this scenario.
# when using lvm, not collocated journals. # when using lvm, not collocated journals.

View File

@ -1,38 +0,0 @@
---
# NOTE (leseb) : this task is for disk devices only because of the explicit use of the first
# partition.
- name: activate osd(s) when device is a disk
command: ceph-disk activate "{{ item }}{%- if 'nvme' in item or 'cciss' in item or 'loop' in item %}{{ 'p' }}{%- endif %}{%- if 'mpath' in item %}{{ '-part' }}{%- endif %}{{ '1' }}"
with_items:
- "{{ devices|unique }}"
changed_when: false
register: activate_osd_disk
when:
- not osd_auto_discovery
- not dmcrypt
- item != '/dev/dead'
- name: activate osd(s) when device is a disk (dmcrypt)
command: ceph-disk activate --dmcrypt "{{ item }}{%- if 'nvme' in item or 'cciss' in item or 'loop' in item %}{{ 'p' }}{%- endif %}{%- if 'mpath' in item %}{{ '-part' }}{%- endif %}{{ '1' }}"
with_items:
- "{{ devices|unique }}"
changed_when: false
register: activate_osd_disk_dmcrypt
when:
- not osd_auto_discovery
- dmcrypt
- item != '/dev/dead'
# NOTE (leseb): we must do this because of
# https://github.com/ansible/ansible/issues/4297
- name: set_fact combined_activate_osd_disk_results
set_fact:
combined_activate_osd_disk_results: "{{ activate_osd_disk if osd_scenario != 'collocated' else activate_osd_disk_dmcrypt }}"
- name: fail if ceph-disk cannot create an OSD
fail:
msg: "ceph-disk failed to create an OSD"
when:
" 'ceph-disk: Error: ceph osd create failed' in item.get('stderr', '') "
with_items: "{{ (combined_activate_osd_disk_results|default({})).results|default([]) }}"

View File

@ -0,0 +1,32 @@
---
- name: set_fact docker_env_args '-e osd_bluestore=0 -e osd_filestore=1 -e osd_dmcrypt=0'
set_fact:
docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=0
when:
- containerized_deployment
- osd_objectstore == 'filestore'
- not dmcrypt
- name: set_fact docker_env_args '-e osd_bluestore=0 -e osd_filestore=1 -e osd_dmcrypt=1'
set_fact:
docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=1
when:
- containerized_deployment
- osd_objectstore == 'filestore'
- dmcrypt
- name: set_fact docker_env_args '-e osd_bluestore=1 -e osd_filestore=0 -e osd_dmcrypt=0'
set_fact:
docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0
when:
- containerized_deployment
- osd_objectstore == 'bluestore'
- not dmcrypt
- name: set_fact docker_env_args '-e osd_bluestore=1 -e osd_filestore=0 -e osd_dmcrypt=1'
set_fact:
docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=1
when:
- containerized_deployment
- osd_objectstore == 'bluestore'
- dmcrypt

View File

@ -37,8 +37,8 @@
- name: include_tasks common.yml - name: include_tasks common.yml
include_tasks: common.yml include_tasks: common.yml
- name: include ceph_disk_cli_options_facts.yml - name: include container_options_facts.yml
include_tasks: ceph_disk_cli_options_facts.yml include_tasks: container_options_facts.yml
- name: include build_devices.yml - name: include build_devices.yml
include_tasks: build_devices.yml include_tasks: build_devices.yml
@ -50,22 +50,6 @@
register: parted_results register: parted_results
with_items: "{{ devices }}" with_items: "{{ devices }}"
- name: include check_gpt.yml
include_tasks: check_gpt.yml
when:
- osd_scenario != 'lvm'
- name: include_tasks scenarios/collocated.yml
include_tasks: scenarios/collocated.yml
when:
- osd_scenario == 'collocated'
- name: include_tasks scenarios/non-collocated.yml
include_tasks: scenarios/non-collocated.yml
when:
- not osd_auto_discovery
- osd_scenario == 'non-collocated'
- name: include_tasks scenarios/lvm.yml - name: include_tasks scenarios/lvm.yml
include_tasks: scenarios/lvm.yml include_tasks: scenarios/lvm.yml
when: when: