osd: drop filestore support

filestore is about to be removed. This commit removes the filestore
support in ceph-ansible.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/7394/head
Guillaume Abrioux 2023-02-15 04:35:01 +01:00 committed by Teoman ONAY
parent 1f7b3ac5a3
commit 15b91cef90
32 changed files with 23 additions and 943 deletions

View File

@ -373,12 +373,6 @@ dummy:
# Any device containing these patterns in their path will be excluded. # Any device containing these patterns in their path will be excluded.
#osd_auto_discovery_exclude: "dm-*|loop*|md*|rbd*" #osd_auto_discovery_exclude: "dm-*|loop*|md*|rbd*"
# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can
# be set to 'true' or 'false' to explicitly override those
# defaults. Leave it 'null' to use the default for your chosen mkfs
# type.
#filestore_xattr_use_omap: null
## MDS options ## MDS options
# #

View File

@ -373,12 +373,6 @@ ceph_iscsi_config_dev: false
# Any device containing these patterns in their path will be excluded. # Any device containing these patterns in their path will be excluded.
#osd_auto_discovery_exclude: "dm-*|loop*|md*|rbd*" #osd_auto_discovery_exclude: "dm-*|loop*|md*|rbd*"
# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can
# be set to 'true' or 'false' to explicitly override those
# defaults. Leave it 'null' to use the default for your chosen mkfs
# type.
#filestore_xattr_use_omap: null
## MDS options ## MDS options
# #

View File

@ -1,444 +0,0 @@
# This playbook migrates an OSD from filestore to bluestore backend.
#
# Use it like this:
# ansible-playbook infrastructure-playbooks/filestore-to-bluestore.yml --limit <osd-node-to-migrate>
# If all osds on the node are using filestore backend, then *ALL* of them will be shrinked and redeployed using bluestore backend with ceph-volume.
#
# If a mix of filestore and bluestore OSDs is detected on the node, the node will be skipped unless you pass `force_filestore_to_bluestore=True` as an extra var.
# ie: ansible-playbook infrastructure-playbooks/filestore-to-bluestore.yml --limit <osd-node-to-migrate> -e force_filestore_to_bluestore=True
- hosts: "{{ osd_group_name }}"
become: true
serial: 1
vars:
delegate_facts_host: true
tasks:
- name: gather and delegate facts
setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
delegate_to: "{{ item }}"
delegate_facts: True
with_items: "{{ groups[mon_group_name] }}"
run_once: true
when: delegate_facts_host | bool
- import_role:
name: ceph-defaults
- name: import_role ceph-facts
import_role:
name: ceph-facts
tasks_from: container_binary.yml
- name: set_fact ceph_cmd
set_fact:
ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}"
- name: get ceph osd tree data
command: "{{ ceph_cmd }} --cluster {{ cluster }} osd tree -f json"
delegate_to: "{{ groups[mon_group_name][0] }}"
register: osd_tree
changed_when: false
run_once: true
- name: set_fact osd_ids
set_fact:
osd_ids: "{{ osd_ids | default([]) | union(item) }}"
with_items:
- "{{ ((osd_tree.stdout | default('{}') | trim | from_json).nodes | selectattr('name', 'match', '^' + inventory_hostname + '$') | map(attribute='children') | list) }}"
- name: get osd metadata
command: "{{ ceph_cmd }} --cluster {{ cluster }} osd metadata osd.{{ item }} -f json"
register: osd_metadata
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
with_items: "{{ osd_ids }}"
- name: set_fact _osd_objectstore
set_fact:
_osd_objectstore: "{{ _osd_objectstore | default([]) | union([(item.stdout | from_json).osd_objectstore]) }}"
with_items: "{{ osd_metadata.results }}"
- name: set_fact skip_this_node
set_fact:
skip_this_node: "{{ ('filestore' in _osd_objectstore and 'bluestore' in _osd_objectstore and not force_filestore_to_bluestore | default(False)) or ('filestore' not in _osd_objectstore) }}"
- name: filestore to bluestore migration workflow
when: not skip_this_node | bool
block:
- name: get ceph-volume lvm inventory data
ceph_volume:
cluster: "{{ cluster }}"
action: inventory
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
register: ceph_volume_inventory
- name: set_fact inventory
set_fact:
inventory: "{{ ceph_volume_inventory.stdout | from_json }}"
- name: set_fact ceph_disk_osds
set_fact:
ceph_disk_osds_devices: "{{ ceph_disk_osds_devices | default([]) + [item.path] }}"
with_items: "{{ inventory }}"
when:
- not item.available | bool
- "'Used by ceph-disk' in item.rejected_reasons"
- name: ceph-disk prepared OSDs related tasks
when: ceph_disk_osds_devices | default([]) | length > 0
block:
- name: get partlabel
command: blkid "{{ item + 'p' if item is match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') else item }}"1 -s PARTLABEL -o value
register: partlabel
with_items: "{{ ceph_disk_osds_devices | default([]) }}"
- name: get simple scan data
ceph_volume_simple_scan:
path: "{{ item.item + 'p1' if item.item is match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') else item.item + '1' }}"
cluster: "{{ cluster }"
stdout: true
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
register: simple_scan
with_items: "{{ partlabel.results | default([]) }}"
when: item.stdout == 'ceph data'
ignore_errors: true
- name: mark out osds
ceph_osd:
ids: "{{ (item.0.stdout | from_json).whoami }}"
cluster: "{{ cluster }}"
state: out
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: item.1.stdout == 'ceph data'
- name: stop and disable old osd services
service:
name: "ceph-osd@{{ (item.0.stdout | from_json).whoami }}"
state: stopped
enabled: no
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
when: item.1.stdout == 'ceph data'
- name: umount osd data
ansible.posix.mount:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ (item.0.stdout | from_json).whoami }}"
state: unmounted
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
when: item.1.stdout == 'ceph data'
- name: umount osd lockbox
ansible.posix.mount:
path: "/var/lib/ceph/osd-lockbox/{{ (item.0.stdout | from_json).data.uuid }}"
state: unmounted
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
when:
- item.1.stdout == 'ceph data'
- (item.0.stdout | from_json).encrypted | default(False) | bool
- name: ensure dmcrypt for data device is closed
command: cryptsetup close "{{ (item.0.stdout | from_json).data.uuid }}"
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
failed_when: false
changed_when: false
when:
- item.1.stdout == 'ceph data'
- (item.0.stdout | from_json).encrypted | default(False) | bool
- name: ensure dmcrypt for journal device is closed
command: cryptsetup close "{{ (item.0.stdout | from_json).journal.uuid }}"
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
failed_when: false
changed_when: false
when:
- item.1.stdout == 'ceph data'
- (item.0.stdout | from_json).encrypted | default(False) | bool
- name: zap data devices
ceph_volume:
cluster: "{{ cluster }}"
action: zap
destroy: true
data: "{{ (item.0.stdout | from_json).data.path }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
when: item.1.stdout == 'ceph data'
- name: zap journal devices
ceph_volume:
cluster: "{{ cluster }}"
action: zap
destroy: true
journal: "{{ (item.0.stdout | from_json).journal.path }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_together:
- "{{ simple_scan.results }}"
- "{{ partlabel.results }}"
when:
- item.1.stdout == 'ceph data'
- (item.0.stdout | from_json).journal.path is defined
- name: get ceph-volume lvm list data
ceph_volume:
cluster: "{{ cluster }}"
action: list
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
register: ceph_volume_lvm_list
- name: set_fact _lvm_list
set_fact:
_lvm_list: "{{ _lvm_list | default([]) + item.value }}"
with_dict: "{{ (ceph_volume_lvm_list.stdout | default('{}') | from_json) }}"
- name: ceph-volume prepared OSDs related tasks
block:
- name: mark out osds
ceph_osd:
ids: "{{ (ceph_volume_lvm_list.stdout | default('{}') | from_json).keys() | list }}"
cluster: "{{ cluster }}"
state: out
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- name: stop and disable old osd services
service:
name: "ceph-osd@{{ item }}"
state: stopped
enabled: no
with_items: "{{ (ceph_volume_lvm_list.stdout | default('{}') | from_json).keys() | list }}"
- name: stop and disable ceph-volume services
service:
name: "ceph-volume@lvm-{{ item.tags['ceph.osd_id'] }}-{{ item.tags['ceph.osd_fsid'] }}"
state: stopped
enabled: no
with_items: "{{ _lvm_list }}"
when:
- not containerized_deployment | bool
- item.type == 'data'
- name: mark down osds
ceph_osd:
ids: "{{ (ceph_volume_lvm_list.stdout | default('{}') | from_json).keys() | list }}"
cluster: "{{ cluster }}"
state: down
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- name: ensure all dmcrypt for data and journal are closed
command: cryptsetup close "{{ item['lv_uuid'] }}"
with_items: "{{ _lvm_list }}"
changed_when: false
failed_when: false
when: item['tags'].get('ceph.encrypted', 0) | int == 1
- name: set_fact osd_fsid_list
set_fact:
osd_fsid_list: "{{ osd_fsid_list | default([]) + [{'osd_fsid': item.tags['ceph.osd_fsid'], 'destroy': (item.lv_name.startswith('osd-data-') and item.vg_name.startswith('ceph-')) | ternary(true, false), 'device': item.devices[0], 'journal': item['tags']['ceph.journal_device'] }] }}"
with_items: "{{ _lvm_list }}"
when: item.type == 'data'
- name: zap ceph-volume prepared OSDs
ceph_volume:
action: "zap"
osd_fsid: "{{ item.osd_fsid }}"
destroy: false
environment:
CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
loop: "{{ osd_fsid_list }}"
when: osd_fsid_list is defined
- name: zap destroy ceph-volume prepared devices
ceph_volume:
action: "zap"
data: "{{ item.device }}"
destroy: true
environment:
CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
loop: "{{ osd_fsid_list }}"
when:
- osd_fsid_list is defined
- item.destroy | bool
- name: test if the journal device hasn't been already destroyed because of collocation
stat:
path: "{{ item.journal }}"
loop: "{{ osd_fsid_list }}"
register: journal_path
when:
- osd_fsid_list is defined
- item.destroy | bool
- item.journal is defined
- item.journal not in (lvm_volumes | selectattr('journal', 'defined') | map(attribute='journal') | list)
- name: zap destroy ceph-volume prepared journal devices
ceph_volume:
action: "zap"
data: "{{ item.0.journal }}"
destroy: true
environment:
CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
loop: "{{ osd_fsid_list | zip(journal_path.results) | list }}"
when:
- osd_fsid_list is defined
- item.0.destroy | bool
- item.0.journal is defined
- item.0.journal not in (lvm_volumes | selectattr('journal', 'defined') | map(attribute='journal') | list)
- item.1.stat.exists | bool
- name: ensure all dm are closed
command: dmsetup remove "{{ item['lv_path'] }}"
with_items: "{{ _lvm_list }}"
changed_when: false
failed_when: false
when:
- item['lv_path'] is defined
# Do not close mappers for non 'lvm batch' devices
- devices | default([]) | length > 0
- name: ensure all pv are removed
command: "pvremove --yes {{ item.devices[0] }}"
with_items: "{{ _lvm_list }}"
failed_when: false
when:
- item.type == 'data'
- item.lv_name.startswith('osd-data-') | bool
- item.vg_name.startswith('ceph-') | bool
when: _lvm_list is defined
- name: set_fact osd_ids
set_fact:
osd_ids: "{{ osd_ids | default([]) + [item] }}"
with_items:
- "{{ ((osd_tree.stdout | default('{}') | from_json).nodes | selectattr('name', 'match', '^' + inventory_hostname + '$') | map(attribute='children') | list) }}"
- name: purge osd(s) from the cluster
ceph_osd:
ids: "{{ item }}"
cluster: "{{ cluster }}"
state: purge
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ osd_ids }}"
- name: purge /var/lib/ceph/osd directories
file:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}"
state: absent
with_items: "{{ osd_ids }}"
- name: force osd_objectstore to bluestore
set_fact:
osd_objectstore: bluestore
- name: refresh ansible devices fact
setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
filter: ansible_devices
when: osd_auto_discovery | bool
- import_role:
name: ceph-defaults
- import_role:
name: ceph-facts
- name: remove gpt header
command: parted -s "{{ item }}" mklabel msdos
with_items: "{{ (devices + dedicated_devices | default([]) + ceph_disk_osds_devices | default([])) | unique }}"
- name: update lvm_volumes configuration for bluestore
when:
- lvm_volumes | length > 0
- not osd_auto_discovery | bool
block:
- name: reuse filestore journal partition for bluestore db
set_fact:
config_part: "{{ config_part | default([]) + [item | combine({'db': item.journal})] }}"
with_items: "{{ lvm_volumes | selectattr('journal_vg', 'undefined') | list }}"
- name: reuse filestore journal vg/lv for bluestore db
set_fact:
config_vglv: "{{ config_vglv | default([]) + [item | combine({'db': item.journal, 'db_vg': item.journal_vg})] }}"
with_items: "{{ lvm_volumes | selectattr('journal_vg', 'defined') | list }}"
- name: override lvm_volumes with bluestore configuration
set_fact:
lvm_volumes: "{{ config_part | default([]) + config_vglv | default([]) }}"
- import_role:
name: ceph-handler
- import_role:
name: ceph-container-common
when: containerized_deployment | bool
- import_role:
name: ceph-config
- import_role:
name: ceph-osd
- name: final play
hosts: "{{ osd_group_name }}"
become: true
gather_facts: false
tasks:
- import_role:
name: ceph-defaults
- name: report any skipped node during this playbook
debug:
msg: |
"WARNING:"
"This node has been skipped because OSDs are either"
"all bluestore or there's a mix of filestore and bluestore OSDs"
when:
- skip_this_node | bool

View File

@ -40,10 +40,9 @@ options:
default: ceph default: ceph
objectstore: objectstore:
description: description:
- The objectstore of the OSD, either filestore or bluestore - The objectstore of the OSD, (bluestore only)
- Required if action is 'create'
required: false required: false
choices: ['bluestore', 'filestore'] choices: ['bluestore']
default: bluestore default: bluestore
action: action:
description: description:
@ -67,35 +66,21 @@ options:
description: description:
- The OSD ID - The OSD ID
required: false required: false
journal:
description:
- The logical volume name or partition to use as a filestore journal.
- Only applicable if objectstore is 'filestore'.
required: false
journal_vg:
description:
- If journal is a lv, this must be the name of the volume group it belongs to.
- Only applicable if objectstore is 'filestore'.
required: false
db: db:
description: description:
- A partition or logical volume name to use for block.db. - A partition or logical volume name to use for block.db.
- Only applicable if objectstore is 'bluestore'.
required: false required: false
db_vg: db_vg:
description: description:
- If db is a lv, this must be the name of the volume group it belongs to. # noqa: E501 - If db is a lv, this must be the name of the volume group it belongs to. # noqa: E501
- Only applicable if objectstore is 'bluestore'.
required: false required: false
wal: wal:
description: description:
- A partition or logical volume name to use for block.wal. - A partition or logical volume name to use for block.wal.
- Only applicable if objectstore is 'bluestore'.
required: false required: false
wal_vg: wal_vg:
description: description:
- If wal is a lv, this must be the name of the volume group it belongs to. # noqa: E501 - If wal is a lv, this must be the name of the volume group it belongs to. # noqa: E501
- Only applicable if objectstore is 'bluestore'.
required: false required: false
crush_device_class: crush_device_class:
description: description:
@ -116,12 +101,6 @@ options:
- Only applicable if action is 'batch'. - Only applicable if action is 'batch'.
required: false required: false
default: 1 default: 1
journal_size:
description:
- The size in MB of filestore journals.
- Only applicable if action is 'batch'.
required: false
default: 5120
block_db_size: block_db_size:
description: description:
- The size in bytes of bluestore block db lvs. - The size in bytes of bluestore block db lvs.
@ -129,23 +108,15 @@ options:
- Only applicable if action is 'batch'. - Only applicable if action is 'batch'.
required: false required: false
default: -1 default: -1
journal_devices:
description:
- A list of devices for filestore journal to pass to the 'ceph-volume lvm batch' subcommand.
- Only applicable if action is 'batch'.
- Only applicable if objectstore is 'filestore'.
required: false
block_db_devices: block_db_devices:
description: description:
- A list of devices for bluestore block db to pass to the 'ceph-volume lvm batch' subcommand. - A list of devices for bluestore block db to pass to the 'ceph-volume lvm batch' subcommand.
- Only applicable if action is 'batch'. - Only applicable if action is 'batch'.
- Only applicable if objectstore is 'bluestore'.
required: false required: false
wal_devices: wal_devices:
description: description:
- A list of devices for bluestore block wal to pass to the 'ceph-volume lvm batch' subcommand. - A list of devices for bluestore block wal to pass to the 'ceph-volume lvm batch' subcommand.
- Only applicable if action is 'batch'. - Only applicable if action is 'batch'.
- Only applicable if objectstore is 'bluestore'.
required: false required: false
report: report:
description: description:
@ -169,14 +140,6 @@ author:
''' '''
EXAMPLES = ''' EXAMPLES = '''
- name: set up a filestore osd with an lv data and a journal partition
ceph_volume:
objectstore: filestore
data: data-lv
data_vg: data-vg
journal: /dev/sdc1
action: create
- name: set up a bluestore osd with a raw device for data - name: set up a bluestore osd with a raw device for data
ceph_volume: ceph_volume:
objectstore: bluestore objectstore: bluestore
@ -284,8 +247,6 @@ def batch(module, container_image, report=None):
objectstore = module.params['objectstore'] objectstore = module.params['objectstore']
batch_devices = module.params.get('batch_devices', None) batch_devices = module.params.get('batch_devices', None)
crush_device_class = module.params.get('crush_device_class', None) crush_device_class = module.params.get('crush_device_class', None)
journal_devices = module.params.get('journal_devices', None)
journal_size = module.params.get('journal_size', None)
block_db_size = module.params.get('block_db_size', None) block_db_size = module.params.get('block_db_size', None)
block_db_devices = module.params.get('block_db_devices', None) block_db_devices = module.params.get('block_db_devices', None)
wal_devices = module.params.get('wal_devices', None) wal_devices = module.params.get('wal_devices', None)
@ -320,18 +281,11 @@ def batch(module, container_image, report=None):
if osds_per_device > 1: if osds_per_device > 1:
cmd.extend(['--osds-per-device', str(osds_per_device)]) cmd.extend(['--osds-per-device', str(osds_per_device)])
if objectstore == 'filestore':
cmd.extend(['--journal-size', journal_size])
if objectstore == 'bluestore' and block_db_size != '-1': if objectstore == 'bluestore' and block_db_size != '-1':
cmd.extend(['--block-db-size', block_db_size]) cmd.extend(['--block-db-size', block_db_size])
cmd.extend(batch_devices) cmd.extend(batch_devices)
if journal_devices and objectstore == 'filestore':
cmd.append('--journal-devices')
cmd.extend(journal_devices)
if block_db_devices and objectstore == 'bluestore': if block_db_devices and objectstore == 'bluestore':
cmd.append('--db-devices') cmd.append('--db-devices')
cmd.extend(block_db_devices) cmd.extend(block_db_devices)
@ -376,8 +330,6 @@ def prepare_or_create_osd(module, action, container_image):
data = module.params['data'] data = module.params['data']
data_vg = module.params.get('data_vg', None) data_vg = module.params.get('data_vg', None)
data = get_data(data, data_vg) data = get_data(data, data_vg)
journal = module.params.get('journal', None)
journal_vg = module.params.get('journal_vg', None)
db = module.params.get('db', None) db = module.params.get('db', None)
db_vg = module.params.get('db_vg', None) db_vg = module.params.get('db_vg', None)
wal = module.params.get('wal', None) wal = module.params.get('wal', None)
@ -392,10 +344,6 @@ def prepare_or_create_osd(module, action, container_image):
cmd.append('--data') cmd.append('--data')
cmd.append(data) cmd.append(data)
if journal and objectstore == 'filestore':
journal = get_journal(journal, journal_vg)
cmd.extend(['--journal', journal])
if db and objectstore == 'bluestore': if db and objectstore == 'bluestore':
db = get_db(db, db_vg) db = get_db(db, db_vg)
cmd.extend(['--block.db', db]) cmd.extend(['--block.db', db])
@ -493,8 +441,6 @@ def zap_devices(module, container_image):
# get module variables # get module variables
data = module.params.get('data', None) data = module.params.get('data', None)
data_vg = module.params.get('data_vg', None) data_vg = module.params.get('data_vg', None)
journal = module.params.get('journal', None)
journal_vg = module.params.get('journal_vg', None)
db = module.params.get('db', None) db = module.params.get('db', None)
db_vg = module.params.get('db_vg', None) db_vg = module.params.get('db_vg', None)
wal = module.params.get('wal', None) wal = module.params.get('wal', None)
@ -519,10 +465,6 @@ def zap_devices(module, container_image):
data = get_data(data, data_vg) data = get_data(data, data_vg)
cmd.append(data) cmd.append(data)
if journal:
journal = get_journal(journal, journal_vg)
cmd.extend([journal])
if db: if db:
db = get_db(db, db_vg) db = get_db(db, db_vg)
cmd.extend([db]) cmd.extend([db])
@ -538,14 +480,12 @@ def run_module():
module_args = dict( module_args = dict(
cluster=dict(type='str', required=False, default='ceph'), cluster=dict(type='str', required=False, default='ceph'),
objectstore=dict(type='str', required=False, choices=[ objectstore=dict(type='str', required=False, choices=[
'bluestore', 'filestore'], default='bluestore'), 'bluestore'], default='bluestore'),
action=dict(type='str', required=False, choices=[ action=dict(type='str', required=False, choices=[
'create', 'zap', 'batch', 'prepare', 'activate', 'list', 'create', 'zap', 'batch', 'prepare', 'activate', 'list',
'inventory'], default='create'), # noqa: 4502 'inventory'], default='create'), # noqa: 4502
data=dict(type='str', required=False), data=dict(type='str', required=False),
data_vg=dict(type='str', required=False), data_vg=dict(type='str', required=False),
journal=dict(type='str', required=False),
journal_vg=dict(type='str', required=False),
db=dict(type='str', required=False), db=dict(type='str', required=False),
db_vg=dict(type='str', required=False), db_vg=dict(type='str', required=False),
wal=dict(type='str', required=False), wal=dict(type='str', required=False),
@ -554,8 +494,6 @@ def run_module():
dmcrypt=dict(type='bool', required=False, default=False), dmcrypt=dict(type='bool', required=False, default=False),
batch_devices=dict(type='list', required=False, default=[]), batch_devices=dict(type='list', required=False, default=[]),
osds_per_device=dict(type='int', required=False, default=1), osds_per_device=dict(type='int', required=False, default=1),
journal_size=dict(type='str', required=False, default='5120'),
journal_devices=dict(type='list', required=False, default=[]),
block_db_size=dict(type='str', required=False, default='-1'), block_db_size=dict(type='str', required=False, default='-1'),
block_db_devices=dict(type='list', required=False, default=[]), block_db_devices=dict(type='list', required=False, default=[]),
wal_devices=dict(type='list', required=False, default=[]), wal_devices=dict(type='list', required=False, default=[]),

View File

@ -51,7 +51,6 @@
objectstore: "{{ osd_objectstore }}" objectstore: "{{ osd_objectstore }}"
batch_devices: "{{ _devices }}" batch_devices: "{{ _devices }}"
osds_per_device: "{{ osds_per_device | default(1) | int }}" osds_per_device: "{{ osds_per_device | default(1) | int }}"
journal_size: "{{ journal_size }}"
block_db_size: "{{ block_db_size }}" block_db_size: "{{ block_db_size }}"
report: true report: true
action: "batch" action: "batch"

View File

@ -66,19 +66,6 @@ log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by S
{% endif %} {% endif %}
{% if inventory_hostname in groups.get(osd_group_name, []) %} {% if inventory_hostname in groups.get(osd_group_name, []) %}
{% if osd_objectstore == 'filestore' %}
[osd]
osd mkfs type = {{ osd_mkfs_type }}
osd mkfs options xfs = {{ osd_mkfs_options_xfs }}
osd mount options xfs = {{ osd_mount_options_xfs }}
osd journal size = {{ journal_size }}
{% if filestore_xattr_use_omap != None %}
filestore xattr use omap = {{ filestore_xattr_use_omap }}
{% elif osd_mkfs_type == "ext4" %}
filestore xattr use omap = true
{# else, default is false #}
{% endif %}
{% endif %}
{% if osd_objectstore == 'bluestore' %} {% if osd_objectstore == 'bluestore' %}
[osd] [osd]
osd memory target = {{ _osd_memory_target | default(osd_memory_target) }} osd memory target = {{ _osd_memory_target | default(osd_memory_target) }}

View File

@ -365,12 +365,6 @@ osd_objectstore: bluestore
# Any device containing these patterns in their path will be excluded. # Any device containing these patterns in their path will be excluded.
osd_auto_discovery_exclude: "dm-*|loop*|md*|rbd*" osd_auto_discovery_exclude: "dm-*|loop*|md*|rbd*"
# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can
# be set to 'true' or 'false' to explicitly override those
# defaults. Leave it 'null' to use the default for your chosen mkfs
# type.
filestore_xattr_use_omap: null
## MDS options ## MDS options
# #

View File

@ -1,18 +1,4 @@
--- ---
- name: set_fact container_env_args '-e osd_bluestore=0 -e osd_filestore=1 -e osd_dmcrypt=0'
set_fact:
container_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=0
when:
- osd_objectstore == 'filestore'
- not dmcrypt | bool
- name: set_fact container_env_args '-e osd_bluestore=0 -e osd_filestore=1 -e osd_dmcrypt=1'
set_fact:
container_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=1
when:
- osd_objectstore == 'filestore'
- dmcrypt | bool
- name: set_fact container_env_args '-e osd_bluestore=1 -e osd_filestore=0 -e osd_dmcrypt=0' - name: set_fact container_env_args '-e osd_bluestore=1 -e osd_filestore=0 -e osd_dmcrypt=0'
set_fact: set_fact:
container_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0 container_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0

View File

@ -8,11 +8,9 @@
dmcrypt: "{{ dmcrypt|default(omit) }}" dmcrypt: "{{ dmcrypt|default(omit) }}"
crush_device_class: "{{ crush_device_class|default(omit) }}" crush_device_class: "{{ crush_device_class|default(omit) }}"
osds_per_device: "{{ osds_per_device }}" osds_per_device: "{{ osds_per_device }}"
journal_size: "{{ journal_size }}"
block_db_size: "{{ block_db_size }}" block_db_size: "{{ block_db_size }}"
block_db_devices: "{{ dedicated_devices | unique if dedicated_devices | length > 0 else omit }}" block_db_devices: "{{ dedicated_devices | unique if dedicated_devices | length > 0 else omit }}"
wal_devices: "{{ bluestore_wal_devices | unique if bluestore_wal_devices | length > 0 else omit }}" wal_devices: "{{ bluestore_wal_devices | unique if bluestore_wal_devices | length > 0 else omit }}"
journal_devices: "{{ dedicated_devices | unique if dedicated_devices | length > 0 else omit }}"
action: "batch" action: "batch"
environment: environment:
CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}" CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"

View File

@ -5,8 +5,6 @@
objectstore: "{{ osd_objectstore }}" objectstore: "{{ osd_objectstore }}"
data: "{{ item.data }}" data: "{{ item.data }}"
data_vg: "{{ item.data_vg|default(omit) }}" data_vg: "{{ item.data_vg|default(omit) }}"
journal: "{{ item.journal|default(omit) }}"
journal_vg: "{{ item.journal_vg|default(omit) }}"
db: "{{ item.db|default(omit) }}" db: "{{ item.db|default(omit) }}"
db_vg: "{{ item.db_vg|default(omit) }}" db_vg: "{{ item.db_vg|default(omit) }}"
wal: "{{ item.wal|default(omit) }}" wal: "{{ item.wal|default(omit) }}"

View File

@ -1,20 +1,4 @@
--- ---
- name: debian based systems tasks
when:
- osd_objectstore == 'filestore'
- ansible_facts['os_family'] == "Debian"
block:
- name: disable osd directory parsing by updatedb
command: updatedb -e /var/lib/ceph
changed_when: false
failed_when: false
- name: disable osd directory path in updatedb.conf
replace:
dest: /etc/updatedb.conf
regexp: '^(PRUNEPATHS(?!.*/var/lib/ceph).*)"$'
replace: '\1 /var/lib/ceph"'
failed_when: false
- name: create tmpfiles.d directory - name: create tmpfiles.d directory
file: file:
path: "/etc/tmpfiles.d" path: "/etc/tmpfiles.d"

View File

@ -35,9 +35,6 @@ numactl \
--privileged=true \ --privileged=true \
--pid=host \ --pid=host \
--ipc=host \ --ipc=host \
{% if osd_objectstore == 'filestore' -%}
--memory={{ ceph_osd_docker_memory_limit }} \
{% endif -%}
--cpus={{ cpu_limit }} \ --cpus={{ cpu_limit }} \
{% if ceph_osd_docker_cpuset_cpus is defined -%} {% if ceph_osd_docker_cpuset_cpus is defined -%}
--cpuset-cpus='{{ ceph_osd_docker_cpuset_cpus }}' \ --cpuset-cpus='{{ ceph_osd_docker_cpuset_cpus }}' \

View File

@ -114,21 +114,3 @@
- item.skipped is undefined - item.skipped is undefined
- not item.stat.exists | bool or not item.stat.isblk | bool - not item.stat.exists | bool or not item.stat.isblk | bool
- name: check filestore journal logical volume
stat:
path: "/dev/{{ item.journal_vg }}/{{ item.journal }}"
follow: true
register: lvm_volumes_journal
loop: "{{ lvm_volumes }}"
when:
- osd_objectstore == 'filestore'
- item.journal is defined
- item.journal_vg is defined
- name: fail if one of the filestore journal logical volume is not a device or doesn't exist
fail:
msg: "{{ item.item.journal_vg }}/{{ item.item.journal }} doesn't exist or isn't a block"
loop: "{{ lvm_volumes_journal.results }}"
when:
- item.skipped is undefined
- not item.stat.exists | bool or not item.stat.isblk | bool

View File

@ -8,8 +8,8 @@
- name: validate osd_objectstore - name: validate osd_objectstore
fail: fail:
msg: "osd_objectstore must be either 'bluestore' or 'filestore'" msg: "osd_objectstore must be either 'bluestore'"
when: osd_objectstore not in ['bluestore', 'filestore'] when: osd_objectstore not in ['bluestore']
- name: validate monitor network configuration - name: validate monitor network configuration
fail: fail:
@ -40,17 +40,6 @@
- devices is undefined - devices is undefined
- lvm_volumes is undefined - lvm_volumes is undefined
- name: validate filestore lvm osd scenario
fail:
msg: 'data and journal keys must be defined in lvm_volumes'
when:
- osd_objectstore == 'filestore'
- not osd_auto_discovery | default(false) | bool
- lvm_volumes is defined
- lvm_volumes | length > 0
- item.data is undefined or item.journal is undefined
with_items: '{{ lvm_volumes }}'
- name: validate bluestore lvm osd scenario - name: validate bluestore lvm osd scenario
fail: fail:
msg: 'data key must be defined in lvm_volumes' msg: 'data key must be defined in lvm_volumes'
@ -112,14 +101,6 @@
- ansible_facts['os_family'] == 'RedHat' - ansible_facts['os_family'] == 'RedHat'
- ntp_daemon_type == 'ntpd' - ntp_daemon_type == 'ntpd'
- name: make sure journal_size configured
debug:
msg: "WARNING: journal_size is configured to {{ journal_size }}, which is less than 5GB. This is not recommended and can lead to severe issues."
when:
- journal_size|int < 5120
- osd_objectstore == 'filestore'
- osd_group_name in group_names
- name: include check_devices.yml - name: include check_devices.yml
include_tasks: check_devices.yml include_tasks: check_devices.yml
when: when:

View File

@ -1 +0,0 @@
../../../Vagrantfile

View File

@ -1 +0,0 @@
../all_daemons/ceph-override.json

View File

@ -1 +0,0 @@
../../../../Vagrantfile

View File

@ -1 +0,0 @@
../../all_daemons/ceph-override.json

View File

@ -1,28 +0,0 @@
---
# this is only here to let the CI tests know
# that this scenario is using docker
docker: True
ceph_origin: repository
ceph_repository: community
public_network: "192.168.43.0/24"
cluster_network: "192.168.44.0/24"
monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 2048
copy_admin_key: true
containerized_deployment: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-main

View File

@ -1,10 +0,0 @@
[mons]
mon0
[osds]
osd0 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]"
osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" dmcrypt=true
osd2 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=2048
osd3 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
osd4 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]" dmcrypt=true
osd5 osd_objectstore=filestore osd_auto_discovery=true journal_size=2048

View File

@ -1,71 +0,0 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: true
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 6
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.43
cluster_subnet: 192.168.44
# MEMORY
# set 1024 for CentOS
memory: 1024
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sdb', '/dev/sdc' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/atomic-host
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: fs.file-max, value: 26234859 }

View File

@ -1,20 +0,0 @@
---
ceph_origin: repository
ceph_repository: community
public_network: "192.168.41.0/24"
cluster_network: "192.168.42.0/24"
monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 2048
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10

View File

@ -1,10 +0,0 @@
[mons]
mon0
[osds]
osd0 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]"
osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" dmcrypt=true
osd2 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=2048
osd3 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
osd4 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]" dmcrypt=true
osd5 osd_objectstore=filestore osd_auto_discovery=true journal_size=2048

View File

@ -1,71 +0,0 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 6
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.41
cluster_subnet: 192.168.42
# MEMORY
# set 1024 for CentOS
memory: 1024
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sdb', '/dev/sdc' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/stream8
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: fs.file-max, value: 26234859 }

View File

@ -3,4 +3,4 @@ mon0
[osds] [osds]
osd0 osd0
osd1 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=2048 osd1 osd_objectstore=bluestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']"

View File

@ -3,4 +3,4 @@ mon0
[osds] [osds]
osd0 osd0
osd1 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=2048 osd1 osd_objectstore=bluestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']"

View File

@ -2,7 +2,7 @@
mon0 mon0
[osds] [osds]
osd0 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" osd0 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" dmcrypt=true osd1 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
osd2 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]" osd2 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd3 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true osd3 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true

View File

@ -2,7 +2,7 @@
mon0 mon0
[osds] [osds]
osd0 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" osd0 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" dmcrypt=true osd1 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
osd2 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]" osd2 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd3 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true osd3 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true

View File

@ -2,7 +2,7 @@
mon0 mon0
[osds] [osds]
osd0 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" osd0 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" dmcrypt=true osd1 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
osd2 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]" osd2 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd3 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true osd3 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true

View File

@ -2,7 +2,7 @@
mon0 monitor_address=192.168.71.10 mon0 monitor_address=192.168.71.10
[osds] [osds]
osd0 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" osd0 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" dmcrypt=true osd1 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
osd2 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]" osd2 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd3 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true osd3 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true

View File

@ -207,7 +207,7 @@ class TestCephVolumeModule(object):
result = ceph_volume.list_storage_inventory(fake_module, fake_container_image) result = ceph_volume.list_storage_inventory(fake_module, fake_container_image)
assert result == expected_command_list assert result == expected_command_list
@pytest.mark.parametrize('objectstore', ['bluestore', 'filestore']) @pytest.mark.parametrize('objectstore', ['bluestore'])
def test_create_osd_container(self, objectstore): def test_create_osd_container(self, objectstore):
fake_module = MagicMock() fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda', fake_module.params = {'data': '/dev/sda',
@ -229,7 +229,7 @@ class TestCephVolumeModule(object):
fake_module, fake_action, fake_container_image) fake_module, fake_action, fake_container_image)
assert result == expected_command_list assert result == expected_command_list
@pytest.mark.parametrize('objectstore', ['bluestore', 'filestore']) @pytest.mark.parametrize('objectstore', ['bluestore'])
def test_create_osd(self, objectstore): def test_create_osd(self, objectstore):
fake_module = MagicMock() fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda', fake_module.params = {'data': '/dev/sda',
@ -250,7 +250,7 @@ class TestCephVolumeModule(object):
fake_module, fake_action, fake_container_image) fake_module, fake_action, fake_container_image)
assert result == expected_command_list assert result == expected_command_list
@pytest.mark.parametrize('objectstore', ['bluestore', 'filestore']) @pytest.mark.parametrize('objectstore', ['bluestore'])
def test_prepare_osd_container(self, objectstore): def test_prepare_osd_container(self, objectstore):
fake_module = MagicMock() fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda', fake_module.params = {'data': '/dev/sda',
@ -272,7 +272,7 @@ class TestCephVolumeModule(object):
fake_module, fake_action, fake_container_image) fake_module, fake_action, fake_container_image)
assert result == expected_command_list assert result == expected_command_list
@pytest.mark.parametrize('objectstore', ['bluestore', 'filestore']) @pytest.mark.parametrize('objectstore', ['bluestore'])
def test_prepare_osd(self, objectstore): def test_prepare_osd(self, objectstore):
fake_module = MagicMock() fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda', fake_module.params = {'data': '/dev/sda',
@ -293,7 +293,7 @@ class TestCephVolumeModule(object):
fake_module, fake_action, fake_container_image) fake_module, fake_action, fake_container_image)
assert result == expected_command_list assert result == expected_command_list
@pytest.mark.parametrize('objectstore', ['bluestore', 'filestore']) @pytest.mark.parametrize('objectstore', ['bluestore'])
def test_batch_osd_container(self, objectstore): def test_batch_osd_container(self, objectstore):
fake_module = MagicMock() fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda', fake_module.params = {'data': '/dev/sda',
@ -313,7 +313,7 @@ class TestCephVolumeModule(object):
'--%s' % objectstore, '--%s' % objectstore,
'--yes', '--yes',
'--prepare', '--prepare',
'--journal-size' if objectstore == 'filestore' else '--block-db-size', # noqa E501 '--block-db-size',
'4096', '4096',
'/dev/sda', '/dev/sda',
'/dev/sdb'] '/dev/sdb']
@ -321,7 +321,7 @@ class TestCephVolumeModule(object):
fake_module, fake_container_image) fake_module, fake_container_image)
assert result == expected_command_list assert result == expected_command_list
@pytest.mark.parametrize('objectstore', ['bluestore', 'filestore']) @pytest.mark.parametrize('objectstore', ['bluestore'])
def test_batch_osd(self, objectstore): def test_batch_osd(self, objectstore):
fake_module = MagicMock() fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda', fake_module.params = {'data': '/dev/sda',
@ -339,7 +339,7 @@ class TestCephVolumeModule(object):
'batch', 'batch',
'--%s' % objectstore, '--%s' % objectstore,
'--yes', '--yes',
'--journal-size' if objectstore == 'filestore' else '--block-db-size', # noqa E501 '--block-db-size',
'4096', '4096',
'/dev/sda', '/dev/sda',
'/dev/sdb'] '/dev/sdb']
@ -347,33 +347,6 @@ class TestCephVolumeModule(object):
fake_module, fake_container_image) fake_module, fake_container_image)
assert result == expected_command_list assert result == expected_command_list
def test_batch_filestore_with_dedicated_journal(self):
fake_module = MagicMock()
fake_module.params = {'objectstore': 'filestore',
'journal_size': '100',
'cluster': 'ceph',
'batch_devices': ["/dev/sda", "/dev/sdb"],
'journal_devices': ["/dev/sdc", "/dev/sdd"]}
fake_container_image = None
expected_command_list = ['ceph-volume',
'--cluster',
'ceph',
'lvm',
'batch',
'--filestore',
'--yes',
'--journal-size',
'100',
'/dev/sda',
'/dev/sdb',
'--journal-devices',
'/dev/sdc',
'/dev/sdd']
result = ceph_volume.batch(
fake_module, fake_container_image)
assert result == expected_command_list
def test_batch_bluestore_with_dedicated_db(self): def test_batch_bluestore_with_dedicated_db(self):
fake_module = MagicMock() fake_module = MagicMock()
fake_module.params = {'objectstore': 'bluestore', fake_module.params = {'objectstore': 'bluestore',

View File

@ -1,67 +0,0 @@
[tox]
envlist = centos-{container,non_container}-filestore_to_bluestore
skipsdist = True
[testenv]
allowlist_externals =
vagrant
bash
git
pip
passenv=*
setenv=
ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
ANSIBLE_COLLECTIONS_PATH = {envdir}/ansible_collections
ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
ANSIBLE_CALLBACK_ENABLED = profile_tasks
ANSIBLE_CACHE_PLUGIN = memory
ANSIBLE_GATHERING = implicit
# only available for ansible >= 2.5
ANSIBLE_STDOUT_CALLBACK = yaml
# non_container: DEV_SETUP = True
# Set the vagrant box image to use
centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
# Set the ansible inventory host file to be used according to which distrib we are running on
INVENTORY = {env:_INVENTORY:hosts}
container: CONTAINER_DIR = /container
container: PLAYBOOK = site-container.yml.sample
non_container: PLAYBOOK = site.yml.sample
non_container: DEV_SETUP = True
CEPH_DOCKER_IMAGE_TAG = latest-main
deps= -r{toxinidir}/tests/requirements.txt
changedir={toxinidir}/tests/functional/filestore-to-bluestore{env:CONTAINER_DIR:}
commands=
ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd0:osd1'
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd3:osd4' --tags partitions
# deploy the cluster
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/filestore-to-bluestore.yml --limit osds --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
bash -c "CEPH_STABLE_RELEASE=reef py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
vagrant destroy --force