osd: drop filestore support

filestore objectstore will be gone in the next Ceph release.the
This drops the filestore support in ceph-ansible.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/7431/head
Guillaume Abrioux 2023-01-20 19:14:35 +01:00 committed by Teoman ONAY
parent 0e086c4c61
commit 896d82877f
31 changed files with 20 additions and 88 deletions

View File

@ -37,10 +37,6 @@ It is automatically enabled.
Other (optional) supported settings:
- ``osd_objectstore``: Set the Ceph *objectstore* for the OSD. Available options
are ``filestore`` or ``bluestore``. You can only select ``bluestore`` with
the Ceph release is luminous or greater. Defaults to ``bluestore`` if unset.
- ``dmcrypt``: Enable Ceph's encryption on OSDs using ``dmcrypt``.
Defaults to ``false`` if unset.
@ -138,12 +134,6 @@ Supported ``lvm_volumes`` configuration settings:
per OSD when using ``devices`` like there is for ``lvm_volumes``.
``filestore`` objectstore variables:
- ``journal``: The logical volume name or full path to a partition.
- ``journal_vg``: The volume group name, **required** if ``journal`` is a logical volume.
.. warning:: Each entry must be unique, duplicate values are not allowed

View File

@ -373,7 +373,6 @@ dummy:
# Any device containing these patterns in their path will be excluded.
#osd_auto_discovery_exclude: "dm-*|loop*|md*|rbd*"
## MDS options
#
#mds_max_mds: 1
@ -545,7 +544,7 @@ dummy:
# OS TUNING #
#############
#disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' else true }}"
#disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' }}"
#os_tuning_params:
# - { name: fs.file-max, value: 26234859 }
# - { name: vm.zone_reclaim_mode, value: 0 }

View File

@ -373,7 +373,6 @@ ceph_iscsi_config_dev: false
# Any device containing these patterns in their path will be excluded.
#osd_auto_discovery_exclude: "dm-*|loop*|md*|rbd*"
## MDS options
#
#mds_max_mds: 1
@ -545,7 +544,7 @@ ceph_iscsi_config_dev: false
# OS TUNING #
#############
#disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' else true }}"
#disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' }}"
#os_tuning_params:
# - { name: fs.file-max, value: 26234859 }
# - { name: vm.zone_reclaim_mode, value: 0 }

View File

@ -73,15 +73,6 @@
run_once: true
when: delegate_facts_host | bool
- name: fail if one osd node is using filestore
fail:
msg: >
filestore OSDs are not supported with cephadm.
Please convert them with the filestore-to-bluestore.yml playbook first.
when:
- osd_group_name in group_names
- osd_objectstore == 'filestore'
- import_role:
name: ceph-facts
tasks_from: container_binary.yml

View File

@ -160,11 +160,6 @@
tasks_from: systemd.yml
when: inventory_hostname in groups.get(nfs_group_name, [])
- import_role:
name: ceph-osd
tasks_from: container_options_facts.yml
when: inventory_hostname in groups.get(osd_group_name, [])
- import_role:
name: ceph-osd
tasks_from: systemd.yml

View File

@ -40,7 +40,7 @@ options:
default: ceph
objectstore:
description:
- The objectstore of the OSD, (bluestore only)
- The objectstore of the OSD.
required: false
choices: ['bluestore']
default: bluestore

View File

@ -66,11 +66,9 @@ log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by S
{% endif %}
{% if inventory_hostname in groups.get(osd_group_name, []) %}
{% if osd_objectstore == 'bluestore' %}
[osd]
osd memory target = {{ _osd_memory_target | default(osd_memory_target) }}
{% endif %}
{% endif %}
{% if inventory_hostname in groups.get(rgw_group_name, []) %}
{% set _rgw_hostname = hostvars[inventory_hostname]['rgw_hostname'] | default(hostvars[inventory_hostname]['ansible_facts']['hostname']) %}

View File

@ -365,7 +365,6 @@ osd_objectstore: bluestore
# Any device containing these patterns in their path will be excluded.
osd_auto_discovery_exclude: "dm-*|loop*|md*|rbd*"
## MDS options
#
mds_max_mds: 1
@ -537,7 +536,7 @@ ceph_conf_overrides: {}
# OS TUNING #
#############
disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' else true }}"
disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' }}"
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
- { name: vm.zone_reclaim_mode, value: 0 }

View File

@ -1,14 +0,0 @@
---
- name: set_fact container_env_args '-e osd_bluestore=1 -e osd_filestore=0 -e osd_dmcrypt=0'
set_fact:
container_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0
when:
- osd_objectstore == 'bluestore'
- not dmcrypt | bool
- name: set_fact container_env_args '-e osd_bluestore=1 -e osd_filestore=0 -e osd_dmcrypt=1'
set_fact:
container_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=1
when:
- osd_objectstore == 'bluestore'
- dmcrypt | bool

View File

@ -51,10 +51,6 @@
- not rolling_update | default(False) | bool
- not switch_to_containers | default(False) | bool
- name: include container_options_facts.yml
include_tasks: container_options_facts.yml
when: containerized_deployment | bool
- name: include_tasks scenarios/lvm.yml
include_tasks: scenarios/lvm.yml
when:

View File

@ -14,9 +14,7 @@ OSD_FSID="$(echo "$CEPH_VOLUME_LIST_JSON" | $PYTHON -c "import sys, json; print(
OSD_TYPE="$(echo "$CEPH_VOLUME_LIST_JSON" | $PYTHON -c "import sys, json; print(json.load(sys.stdin)['$OSD_ID'][0]['type'])")"
# Discover the objectstore
if [[ "data journal" =~ $OSD_TYPE ]]; then
OSD_OBJECTSTORE=(--filestore)
elif [[ "block wal db" =~ $OSD_TYPE ]]; then
if [[ "block wal db" =~ $OSD_TYPE ]]; then
OSD_OBJECTSTORE=(--bluestore)
else
log "Unable to discover osd objectstore for OSD type: $OSD_TYPE"
@ -38,9 +36,6 @@ numactl \
--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
--rm --net=host --privileged=true --pid=host \
--ipc=host \
{% if osd_objectstore == 'filestore' -%}
--memory={{ ceph_osd_docker_memory_limit }} \
{% endif -%}
--cpus={{ cpu_limit }} \
{% if ceph_osd_docker_cpuset_cpus is defined -%}
--cpuset-cpus='{{ ceph_osd_docker_cpuset_cpus }}' \
@ -59,7 +54,6 @@ numactl \
{% if ansible_facts['distribution'] == 'Ubuntu' -%}
--security-opt apparmor:unconfined \
{% endif -%}
{{ container_env_args }} \
-e CLUSTER={{ cluster }} \
-e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
-v /run/lvm/:/run/lvm/ \

View File

@ -8,7 +8,7 @@
- name: validate osd_objectstore
fail:
msg: "osd_objectstore must be either 'bluestore'"
msg: "osd_objectstore must be 'bluestore''"
when: osd_objectstore not in ['bluestore']
- name: validate monitor network configuration

View File

@ -1,5 +1,4 @@
---
osd_objectstore: "bluestore"
lvm_volumes:
- data: data-lv1
data_vg: test_group

View File

@ -1,7 +1,6 @@
---
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
osd_objectstore: "bluestore"
lvm_volumes:
- data: data-lv1
data_vg: test_group

View File

@ -12,8 +12,6 @@ public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 2048
osd_objectstore: "bluestore"
crush_device_class: test
copy_admin_key: true
devices:

View File

@ -3,4 +3,3 @@ mon0
[osds]
osd0
osd1 osd_objectstore=bluestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']"

View File

@ -5,7 +5,7 @@ docker: true
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 2
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0

View File

@ -7,7 +7,6 @@ public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
osd_objectstore: "bluestore"
crush_device_class: test
copy_admin_key: true
devices:

View File

@ -3,4 +3,3 @@ mon0
[osds]
osd0
osd1 osd_objectstore=bluestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']"

View File

@ -5,7 +5,7 @@ docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 2
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0

View File

@ -9,7 +9,6 @@ ceph_repository: community
public_network: "192.168.33.0/24"
cluster_network: "192.168.34.0/24"
monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 100
copy_admin_key: true
containerized_deployment: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb

View File

@ -2,7 +2,7 @@
mon0
[osds]
osd0 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd1 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
osd2 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd3 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
osd0 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd1 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
osd2 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd3 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true

View File

@ -5,7 +5,6 @@ ceph_repository: community
public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 100
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
os_tuning_params:

View File

@ -2,7 +2,7 @@
mon0
[osds]
osd0 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd1 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
osd2 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd3 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
osd0 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd1 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
osd2 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd3 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true

View File

@ -1,2 +0,0 @@
---
journal_size: 100

View File

@ -4,5 +4,3 @@ mon0
[osds]
osd0 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd1 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
osd2 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd3 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true

View File

@ -5,7 +5,7 @@ docker: True
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 4
osd_vms: 2
mds_vms: 0
rgw_vms: 0
nfs_vms: 0

View File

@ -1,4 +1,3 @@
---
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
journal_size: 100

View File

@ -4,5 +4,3 @@ mon0 monitor_address=192.168.71.10
[osds]
osd0 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd1 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
osd2 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
osd3 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true

View File

@ -5,7 +5,7 @@ docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 4
osd_vms: 2
mds_vms: 0
rgw_vms: 0
nfs_vms: 0

View File

@ -370,7 +370,8 @@ commands=
# configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
!lvm_batch-!lvm_auto_discovery: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2'
lvm_osds,all_in_one: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit osd2
lvm_osds: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd2'
all_in_one: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
rhcs: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"