osd: remove variable osd_scenario

As of stable-4.0, the only valid scenario is `lvm`.
Thus, this makes this variable useless.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/3851/head
Guillaume Abrioux 2019-04-11 10:01:15 +02:00 committed by Dimitri Savineau
parent 4d5637fd8a
commit 4d35e9eeed
45 changed files with 95 additions and 435 deletions

1
Vagrantfile vendored
View File

@ -93,7 +93,6 @@ ansible_provision = proc do |ansible|
ansible.extra_vars = ansible.extra_vars.merge({
cluster_network: "#{CLUSTER_SUBNET}.0/16",
devices: ['/dev/sdc'], # hardcode leftover disk
osd_scenario: 'collocated',
monitor_address_block: "#{PUBLIC_SUBNET}.0/16",
radosgw_address_block: "#{PUBLIC_SUBNET}.0/16",
public_network: "#{PUBLIC_SUBNET}.0/16",

View File

@ -52,20 +52,13 @@ dummy:
#osd_auto_discovery: false
# Encrypt your OSD device using dmcrypt
# If set to True, no matter which osd_objecstore and osd_scenario you use the data will be encrypted
# If set to True, no matter which osd_objecstore you use the data will be encrypted
#dmcrypt: False
#osd_scenario: dummy
#valid_osd_scenarios:
# - lvm
#dedicated_devices: []
# III. Use ceph-volume to create OSDs from logical volumes.
# Use 'osd_scenario: lvm' to enable this scenario.
# when using lvm, not collocated journals.
# Use ceph-volume to create OSDs from logical volumes.
# lvm_volumes is a list of dictionaries.
#
# Filestore: Each dictionary must contain a data, journal and vg_name key. Any

View File

@ -387,7 +387,7 @@ ceph_rhcs_version: 3
## Rados Gateway options
#
#radosgw_frontend_type: beast # For additional frontends see: http://docs.ceph.com/docs/nautilus/radosgw/frontends/
#radosgw_frontend_type: beast # For additionnal frontends see: http://docs.ceph.com/docs/nautilus/radosgw/frontends/
#radosgw_civetweb_port: 8080
#radosgw_civetweb_num_threads: 512

View File

@ -1,11 +1,11 @@
- name: creates logical volumes for the bucket index or fs journals on a single device and prepares for use of osd_scenario=lvm.
- name: creates logical volumes for the bucket index or fs journals on a single device.
become: true
hosts:
- osds
vars:
logfile: |
Suggested cut and paste under "lvm_volumes:" in "group_vars/osds.yml" for configuring with osd_scenario=lvm
Suggested cut and paste under "lvm_volumes:" in "group_vars/osds.yml"
-----------------------------------------------------------------------------------------------------------
{% for lv in nvme_device_lvs %}
- data: {{ lv.lv_name }}

View File

@ -324,7 +324,6 @@
with_items: "{{ lvm_volumes }}"
when:
- lvm_volumes | default([]) | length > 0
- osd_scenario == "lvm"
- ceph_volume_present.rc == 0
- name: zap and destroy osds created by ceph-volume with devices
@ -336,7 +335,6 @@
with_items: "{{ devices | default([]) }}"
when:
- devices | default([]) | length > 0
- osd_scenario == "lvm"
- ceph_volume_present.rc == 0
- name: get ceph block partitions

View File

@ -344,142 +344,33 @@
register: remove_osd_mountpoints
ignore_errors: true
- name: for ceph-disk based deployment
block:
- name: get prepare container
command: "docker ps -a -q --filter='name=ceph-osd-prepare'"
register: prepare_containers
ignore_errors: true
- name: remove ceph osd prepare container
command: "docker rm -f {{ item }}"
with_items: "{{ prepare_containers.stdout_lines }}"
ignore_errors: true
# NOTE(leseb): hope someone will find a more elegant way one day...
- name: see if encrypted partitions are present
shell: |
blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2
register: encrypted_ceph_partuuid
- name: get ceph data partitions
command: |
blkid -o device -t PARTLABEL="ceph data"
failed_when: false
register: ceph_data_partition_to_erase_path
- name: get ceph lockbox partitions
command: |
blkid -o device -t PARTLABEL="ceph lockbox"
failed_when: false
register: ceph_lockbox_partition_to_erase_path
- name: get ceph block partitions
command: |
blkid -o device -t PARTLABEL="ceph block"
failed_when: false
register: ceph_block_partition_to_erase_path
- name: get ceph journal partitions
command: |
blkid -o device -t PARTLABEL="ceph journal"
failed_when: false
register: ceph_journal_partition_to_erase_path
- name: get ceph db partitions
command: |
blkid -o device -t PARTLABEL="ceph block.db"
failed_when: false
register: ceph_db_partition_to_erase_path
- name: get ceph wal partitions
command: |
blkid -o device -t PARTLABEL="ceph block.wal"
failed_when: false
register: ceph_wal_partition_to_erase_path
- name: set_fact combined_devices_list
set_fact:
combined_devices_list: "{{ ceph_data_partition_to_erase_path.get('stdout_lines', []) +
ceph_lockbox_partition_to_erase_path.get('stdout_lines', []) +
ceph_block_partition_to_erase_path.get('stdout_lines', []) +
ceph_journal_partition_to_erase_path.get('stdout_lines', []) +
ceph_db_partition_to_erase_path.get('stdout_lines', []) +
ceph_wal_partition_to_erase_path.get('stdout_lines', []) }}"
- name: resolve parent device
command: lsblk --nodeps -no pkname "{{ item }}"
register: tmp_resolved_parent_device
with_items:
- "{{ combined_devices_list }}"
- name: set_fact resolved_parent_device
set_fact:
resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
- name: zap ceph osd disks
shell: |
docker run --rm \
--privileged=true \
--name ceph-osd-zap-{{ ansible_hostname }}-{{ item }} \
-v /dev/:/dev/ \
-e OSD_DEVICE=/dev/{{ item }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
zap_device
with_items:
- "{{ resolved_parent_device }}"
- name: wait until the zap containers die
shell: |
docker ps | grep -sq ceph-osd-zap-{{ ansible_hostname }}
register: zap_alive
failed_when: false
until: zap_alive.rc != 0
retries: 5
delay: 10
- name: remove ceph osd zap disk container
docker_container:
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item }}"
state: absent
with_items:
- "{{ resolved_parent_device }}"
when:
- osd_scenario != "lvm"
- name: for ceph-volume based deployments
block:
- name: zap and destroy osds created by ceph-volume with lvm_volumes
ceph_volume:
data: "{{ item.data }}"
data_vg: "{{ item.data_vg|default(omit) }}"
journal: "{{ item.journal|default(omit) }}"
journal_vg: "{{ item.journal_vg|default(omit) }}"
db: "{{ item.db|default(omit) }}"
db_vg: "{{ item.db_vg|default(omit) }}"
wal: "{{ item.wal|default(omit) }}"
wal_vg: "{{ item.wal_vg|default(omit) }}"
action: "zap"
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items: "{{ lvm_volumes }}"
when: lvm_volumes | default([]) | length > 0
- name: zap and destroy osds created by ceph-volume with devices
ceph_volume:
data: "{{ item }}"
action: "zap"
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items: "{{ devices | default([]) }}"
when: devices | default([]) | length > 0
when:
- osd_scenario == "lvm"
- name: zap and destroy osds created by ceph-volume with lvm_volumes
ceph_volume:
data: "{{ item.data }}"
data_vg: "{{ item.data_vg|default(omit) }}"
journal: "{{ item.journal|default(omit) }}"
journal_vg: "{{ item.journal_vg|default(omit) }}"
db: "{{ item.db|default(omit) }}"
db_vg: "{{ item.db_vg|default(omit) }}"
wal: "{{ item.wal|default(omit) }}"
wal_vg: "{{ item.wal_vg|default(omit) }}"
action: "zap"
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items: "{{ lvm_volumes }}"
when: lvm_volumes | default([]) | length > 0
- name: zap and destroy osds created by ceph-volume with devices
ceph_volume:
data: "{{ item }}"
action: "zap"
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items: "{{ devices | default([]) }}"
when: devices | default([]) | length > 0
- name: remove ceph osd service
file:

View File

@ -1,6 +1,6 @@
# This file configures logical volume creation for FS Journals on NVMe, a NVMe based bucket index, and HDD based OSDs.
# This playbook configures one NVMe device at a time. If your OSD systems contain multiple NVMe devices, you will need to edit the key variables ("nvme_device", "hdd_devices") for each run.
# It is meant to be used when osd_objectstore=filestore and it outputs the necessary input for group_vars/osds.yml when configured with osd_scenario=lvm.
# It is meant to be used when osd_objectstore=filestore and it outputs the necessary input for group_vars/osds.yml.
# The LVs for journals are created first then the LVs for data. All LVs for journals correspond to a LV for data.
#
## CHANGE THESE VARS ##

View File

@ -8,19 +8,11 @@
when:
- inventory_hostname in groups.get(osd_group_name, [])
block:
- name: count number of osds for ceph-disk scenarios
set_fact:
num_osds: "{{ devices | length | int }}"
when:
- devices | default([]) | length > 0
- osd_scenario in ['collocated', 'non-collocated']
- name: count number of osds for lvm scenario
set_fact:
num_osds: "{{ lvm_volumes | length | int }}"
when:
- lvm_volumes | default([]) | length > 0
- osd_scenario == 'lvm'
- name: run 'ceph-volume lvm batch --report' to see how many osds are to be created
ceph_volume:
@ -40,14 +32,12 @@
PYTHONIOENCODING: utf-8
when:
- devices | default([]) | length > 0
- osd_scenario == 'lvm'
- name: set_fact num_osds from the output of 'ceph-volume lvm batch --report'
set_fact:
num_osds: "{{ (lvm_batch_report.stdout | from_json).osds | length | int }}"
when:
- devices | default([]) | length > 0
- osd_scenario == 'lvm'
- (lvm_batch_report.stdout | from_json).changed
- name: run 'ceph-volume lvm list' to see how many osds have already been created
@ -61,7 +51,6 @@
PYTHONIOENCODING: utf-8
when:
- devices | default([]) | length > 0
- osd_scenario == 'lvm'
- not (lvm_batch_report.stdout | from_json).changed
- name: set_fact num_osds from the output of 'ceph-volume lvm list'
@ -69,7 +58,6 @@
num_osds: "{{ lvm_list.stdout | from_json | length | int }}"
when:
- devices | default([]) | length > 0
- osd_scenario == 'lvm'
- not (lvm_batch_report.stdout | from_json).changed
# ceph-common

View File

@ -66,13 +66,7 @@ for unit in $(systemctl list-units | grep -E "loaded * active" | grep -oE "ceph-
# We need to wait because it may take some time for the socket to actually exists
COUNT=10
# Wait and ensure the socket exists after restarting the daemon
{% if containerized_deployment and osd_scenario != 'lvm' -%}
id=$(get_dev_name "$unit")
container_id=$(get_container_id_from_dev_name "$id")
wait_for_socket_in_container "$container_id"
osd_id=$whoami
container_exec="{{ container_binary }} exec $container_id"
{% elif containerized_deployment and osd_scenario == 'lvm' %}
{% if containerized_deployment %}
osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+')
container_id=$(get_container_id_from_dev_name "ceph-osd-${osd_id}")
container_exec="{{ container_binary }} exec $container_id"

View File

@ -44,20 +44,13 @@ devices: []
osd_auto_discovery: false
# Encrypt your OSD device using dmcrypt
# If set to True, no matter which osd_objecstore and osd_scenario you use the data will be encrypted
# If set to True, no matter which osd_objecstore you use the data will be encrypted
dmcrypt: False
osd_scenario: lvm
valid_osd_scenarios:
- lvm
dedicated_devices: []
# III. Use ceph-volume to create OSDs from logical volumes.
# Use 'osd_scenario: lvm' to enable this scenario.
# when using lvm, not collocated journals.
# Use ceph-volume to create OSDs from logical volumes.
# lvm_volumes is a list of dictionaries.
#
# Filestore: Each dictionary must contain a data, journal and vg_name key. Any

View File

@ -1,24 +0,0 @@
---
- name: resolve dedicated device link(s)
command: readlink -f {{ item }}
changed_when: false
with_items: "{{ dedicated_devices }}"
register: dedicated_devices_prepare_canonicalize
when:
- osd_scenario == 'non-collocated'
- not osd_auto_discovery
- name: set_fact build dedicated_devices from resolved symlinks
set_fact:
dedicated_devices_tmp: "{{ dedicated_devices_tmp | default([]) + [ item.stdout ] }}"
with_items: "{{ dedicated_devices_prepare_canonicalize.results }}"
when:
- osd_scenario == 'non-collocated'
- not osd_auto_discovery
- name: set_fact build final dedicated_devices list
set_fact:
dedicated_devices: "{{ dedicated_devices_tmp | reject('search','/dev/disk') | list }}"
when:
- osd_scenario == 'non-collocated'
- not osd_auto_discovery

View File

@ -29,7 +29,6 @@
register: result
until: result is succeeded
when:
- osd_scenario == 'lvm'
- not is_atomic
tags:
- with_pkg
@ -40,9 +39,6 @@
- name: include container_options_facts.yml
include_tasks: container_options_facts.yml
- name: include build_devices.yml
include_tasks: build_devices.yml
- name: read information about the devices
parted:
device: "{{ item }}"
@ -53,7 +49,6 @@
- name: include_tasks scenarios/lvm.yml
include_tasks: scenarios/lvm.yml
when:
- osd_scenario == 'lvm'
- lvm_volumes|length > 0
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
@ -61,17 +56,10 @@
- name: include_tasks scenarios/lvm-batch.yml
include_tasks: scenarios/lvm-batch.yml
when:
- osd_scenario == 'lvm'
- devices|length > 0
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- name: include_tasks activate_osds.yml
include_tasks: activate_osds.yml
when:
- not containerized_deployment
- osd_scenario != 'lvm'
- name: include_tasks start_osds.yml
include_tasks: start_osds.yml

View File

@ -12,22 +12,6 @@
when:
- ceph_docker_on_openstack
- name: test if the container image has directory {{ container_bin_path }}
command: "{{ container_binary }} run --rm --net=host --entrypoint=test {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} -d {{ container_bin_path }}"
changed_when: false
failed_when: false
register: test_container_bin_path
when:
- osd_scenario != 'lvm'
- name: test if the container image has the disk_list function
command: "{{ container_binary }} run --rm --net=host --entrypoint=stat {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} {{ container_bin_path + '/disk_list.sh' if test_container_bin_path.rc == 0 else 'disk_list.sh' }}"
changed_when: false
failed_when: false
register: disk_list
when:
- osd_scenario != 'lvm'
- name: generate ceph osd docker run script
become: true
template:
@ -72,12 +56,12 @@
- name: systemd start osd
systemd:
name: ceph-osd@{{ item | regex_replace('/dev/', '') if osd_scenario != 'lvm' and containerized_deployment else item }}
name: ceph-osd@{{ item }}
state: started
enabled: yes
masked: no
daemon_reload: yes
with_items: "{{ devices if osd_scenario != 'lvm' and containerized_deployment else ((ceph_osd_ids.stdout | from_json).keys() | list) if osd_scenario == 'lvm' and not containerized_deployment else osd_ids_non_container.stdout_lines }}"
with_items: "{{ ((ceph_osd_ids.stdout | from_json).keys() | list) if not containerized_deployment else osd_ids_non_container.stdout_lines }}"
- name: ensure systemd service override directory exists
file:

View File

@ -2,70 +2,6 @@
# {{ ansible_managed }}
{% if osd_scenario != 'lvm' -%}
{% if disk_list.get('rc') == 0 -%}
#############
# VARIABLES #
#############
DOCKER_ENV=""
#############
# FUNCTIONS #
#############
function expose_partitions () {
DOCKER_ENV=$({{ container_binary }} run --rm --net=host --name expose_partitions_${1} --privileged=true -v /dev/:/dev/ -v /etc/ceph:/etc/ceph:z -e CLUSTER={{ cluster }} -e OSD_DEVICE=/dev/${1} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} disk_list)
}
{% else -%}
# NOTE(leseb): maintains backwards compatibility with old ceph-docker Jewel images
# Jewel images prior to https://github.com/ceph/ceph-docker/pull/797
REGEX="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
function expose_partitions {
if {{ container_binary }} ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}; then
if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then
{{ container_binary }} logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log
fi
fi
if {{ container_binary }} ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-${1}; then
if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log ]]; then
{{ container_binary }} logs ceph-osd-prepare-{{ ansible_hostname }}-${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log
fi
fi
if [[ -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then
part=$(grep "Journal is GPT partition" {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
DOCKER_ENV="-e OSD_JOURNAL=$part"
fi
if [[ -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log ]]; then
part=$(grep "Journal is GPT partition" {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
DOCKER_ENV="-e OSD_JOURNAL=$part"
fi
if [[ -z $DOCKER_ENV ]]; then
# NOTE(leseb): if we arrive here this probably means we just switched from non-containers to containers.
# This is tricky as we don't have any info on the type of OSD, this is 'only' a problem for non-collocated scenarios
# We can't assume that the 'ceph' is still present so calling Docker exec instead
part=$({{ container_binary }} run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} list /dev/${1} | awk '/journal / {print $1}')
DOCKER_ENV="-e OSD_JOURNAL=$part"
fi
# if empty, the previous command didn't find anything so we fail
if [[ -z $DOCKER_ENV ]]; then
echo "ERROR: could not discover ceph partitions"
exit 1
fi
}
{% endif -%}
expose_partitions "$1"
# discover osd_objectstore for ceph-disk based osds
if [[ $DOCKER_ENV =~ "BLUESTORE" ]]; then
DOCKER_ENV="$DOCKER_ENV -e OSD_BLUESTORE=1"
elif [[ $DOCKER_ENV =~ "JOURNAL" ]]; then
DOCKER_ENV="$DOCKER_ENV -e OSD_FILESTORE=1"
fi
{% endif -%}
########
# MAIN #
########
@ -112,17 +48,9 @@ numactl \
{% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%}
-e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
{% endif -%}
{% if osd_scenario == 'lvm' -%}
-v /run/lvm/:/run/lvm/ \
-e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \
-e OSD_ID="$1" \
--name=ceph-osd-"$1" \
{% else -%}
$DOCKER_ENV \
-e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
-e OSD_DEVICE=/dev/"${1}" \
-e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
--name=ceph-osd-{{ ansible_hostname }}-"${1}" \
{% endif -%}
{{ ceph_osd_docker_extra_env }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}

View File

@ -7,19 +7,10 @@ After=docker.service
[Service]
EnvironmentFile=-/etc/environment
{% if osd_scenario == 'lvm' -%}
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-%i
{% else %}
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-{{ ansible_hostname }}-%i
ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-{{ ansible_hostname }}-%i
{% endif -%}
ExecStart={{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh %i
{% if osd_scenario == 'lvm' -%}
ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
{% else %}
ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-{{ ansible_hostname }}-%i
{% endif -%}
Restart=always
RestartSec=10s
TimeoutStartSec=120

View File

@ -95,13 +95,3 @@
when:
- iscsi_gw_group_name in group_names
- name: warn users that ceph-disk scenarios will be removed on 3.3
debug:
msg: |
osd_scenario is set to {{ osd_scenario }}, this variable is not used anymore and defaults to 'lvm'.
If you have something different than 'lvm', this means you want ceph-ansible to manage your ceph-disk OSDs.
So basically, ceph-ansible can still start your ceph-disk osd services
run_once: true
when:
- osd_group_name in group_names
- osd_scenario != 'lvm'

View File

@ -97,9 +97,7 @@ def node(host, request):
rolling_update = os.environ.get("ROLLING_UPDATE", "False")
group_names = ansible_vars["group_names"]
docker = ansible_vars.get("docker")
osd_scenario = ansible_vars.get("osd_scenario")
radosgw_num_instances = ansible_vars.get("radosgw_num_instances", 1)
lvm_scenario = osd_scenario in ['lvm', 'lvm-batch']
ceph_release_num = {
'jewel': 10,
'kraken': 11,
@ -123,12 +121,6 @@ def node(host, request):
request.function, group_names)
pytest.skip(reason)
if request.node.get_closest_marker("no_lvm_scenario") and lvm_scenario:
pytest.skip("Not a valid test for lvm scenarios")
if not lvm_scenario and request.node.get_closest_marker("lvm_scenario"):
pytest.skip("Not a valid test for non-lvm scenarios")
if request.node.get_closest_marker("no_docker") and docker:
pytest.skip(
"Not a valid test for containerized deployments or atomic hosts")
@ -137,11 +129,6 @@ def node(host, request):
pytest.skip(
"Not a valid test for non-containerized deployments or atomic hosts") # noqa E501
journal_collocation_test = ansible_vars.get("osd_scenario") == "collocated"
if request.node.get_closest_marker("journal_collocation") and not journal_collocation_test: # noqa E501
pytest.skip("Scenario is not using journal collocation")
data = dict(
vars=ansible_vars,

View File

@ -10,7 +10,6 @@ monitor_interface: eth1
radosgw_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:

View File

@ -8,7 +8,6 @@ monitor_interface: eth1
radosgw_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:

View File

@ -10,7 +10,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:

View File

@ -8,7 +8,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:

View File

@ -1,7 +1,6 @@
---
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore"
osd_scenario: lvm
lvm_volumes:
- data: data-lv1
data_vg: test_group

View File

@ -1,7 +1,6 @@
---
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
osd_scenario: lvm
osd_objectstore: "bluestore"
lvm_volumes:
- data: data-lv1

View File

@ -11,7 +11,6 @@ public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
osd_objectstore: "bluestore"
osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:

View File

@ -6,7 +6,6 @@ public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
osd_objectstore: "bluestore"
osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:

View File

@ -1,6 +1,5 @@
---
ceph_osd_docker_run_script_path: /var/tmp
osd_scenario: lvm
osd_objectstore: "bluestore"
lvm_volumes:
- data: data-lv1

View File

@ -1,6 +1,5 @@
---
ceph_osd_docker_run_script_path: /var/tmp
osd_scenario: lvm
osd_objectstore: "bluestore"
lvm_volumes:
- data: data-lv1

View File

@ -15,7 +15,6 @@ radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
journal_size: 100
osd_objectstore: "bluestore"
crush_device_class: test
osd_scenario: lvm
copy_admin_key: true
osd_auto_discovery: true
os_tuning_params:

View File

@ -9,7 +9,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
osd_objectstore: "bluestore"
crush_device_class: test
osd_scenario: lvm
copy_admin_key: true
osd_auto_discovery: true
os_tuning_params:

View File

@ -15,7 +15,6 @@ radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
journal_size: 100
osd_objectstore: "bluestore"
crush_device_class: test
osd_scenario: lvm
copy_admin_key: true
devices:
- /dev/sdb

View File

@ -9,7 +9,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
osd_objectstore: "bluestore"
crush_device_class: test
osd_scenario: lvm
copy_admin_key: true
devices:
- /dev/disk/by-id/ata-QEMU_HARDDISK_QM00002

View File

@ -15,7 +15,6 @@ osd_objectstore: "filestore"
copy_admin_key: true
containerized_deployment: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
osd_scenario: lvm
lvm_volumes:
- data: data-lv1
journal: /dev/sdc1

View File

@ -10,7 +10,6 @@ journal_size: 100
osd_objectstore: "filestore"
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
osd_scenario: lvm
lvm_volumes:
- data: data-lv1
journal: /dev/sdc1

View File

@ -9,73 +9,60 @@
gather_facts: false
become: yes
tasks:
- block:
- name: check if it is atomic host
stat:
path: /run/ostree-booted
register: stat_ostree
tags:
- always
- name: set_fact is_atomic
set_fact:
is_atomic: '{{ stat_ostree.stat.exists }}'
tags:
- always
# Some images may not have lvm2 installed
- name: install lvm2
package:
name: lvm2
state: present
register: result
until: result is succeeded
when:
- not is_atomic
- name: create physical volume
command: pvcreate /dev/sdb
failed_when: false
- name: create volume group
command: vgcreate test_group /dev/sdb
failed_when: false
- name: create logical volume 1
command: lvcreate --yes -l 50%FREE -n data-lv1 test_group
failed_when: false
- name: create logical volume 2
command: lvcreate --yes -l 50%FREE -n data-lv2 test_group
failed_when: false
- name: partition /dev/sdc for journals
parted:
device: /dev/sdc
number: 1
part_start: 0%
part_end: 50%
unit: '%'
label: gpt
state: present
- name: partition /dev/sdc for journals
parted:
device: /dev/sdc
number: 2
part_start: 50%
part_end: 100%
unit: '%'
state: present
label: gpt
- name: create journals vg from /dev/sdc2
lvg:
vg: journals
pvs: /dev/sdc2
- name: create journal1 lv
command: lvcreate --yes -l 100%FREE -n journal1 journals
failed_when: false
when: osd_scenario == 'lvm'
- name: check if it is atomic host
stat:
path: /run/ostree-booted
register: stat_ostree
tags:
- always
- name: set_fact is_atomic
set_fact:
is_atomic: '{{ stat_ostree.stat.exists }}'
tags:
- always
# Some images may not have lvm2 installed
- name: install lvm2
package:
name: lvm2
state: present
register: result
until: result is succeeded
when:
- not is_atomic
- name: create physical volume
command: pvcreate /dev/sdb
failed_when: false
- name: create volume group
command: vgcreate test_group /dev/sdb
failed_when: false
- name: create logical volume 1
command: lvcreate --yes -l 50%FREE -n data-lv1 test_group
failed_when: false
- name: create logical volume 2
command: lvcreate --yes -l 50%FREE -n data-lv2 test_group
failed_when: false
- name: partition /dev/sdc for journals
parted:
device: /dev/sdc
number: 1
part_start: 0%
part_end: 50%
unit: '%'
label: gpt
state: present
- name: partition /dev/sdc for journals
parted:
device: /dev/sdc
number: 2
part_start: 50%
part_end: 100%
unit: '%'
state: present
label: gpt
- name: create journals vg from /dev/sdc2
lvg:
vg: journals
pvs: /dev/sdc2
- name: create journal1 lv
command: lvcreate --yes -l 100%FREE -n journal1 journals
failed_when: false

View File

@ -49,7 +49,6 @@ all:
- {name: vms, pg_num: 8, rule_name: ''}
- {name: volumes, pg_num: 8, rule_name: ''}
osd_objectstore: filestore
osd_scenario: collocated
ceph_osd_docker_run_script_path: /opt
pools: []
public_network: 192.168.95.0/24

View File

@ -1,7 +1,6 @@
---
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore"
osd_scenario: lvm
lvm_volumes:
- data: data-lv1
data_vg: test_group

View File

@ -1,7 +1,6 @@
---
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore"
osd_scenario: lvm
lvm_volumes:
- data: data-lv1
data_vg: test_group

View File

@ -10,7 +10,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:

View File

@ -10,7 +10,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:

View File

@ -8,7 +8,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:

View File

@ -8,7 +8,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:

View File

@ -1,7 +1,6 @@
---
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore"
osd_scenario: lvm
lvm_volumes:
- data: data-lv1
data_vg: test_group

View File

@ -1,7 +1,6 @@
---
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
osd_scenario: lvm
osd_objectstore: "bluestore"
lvm_volumes:
- data: data-lv1

View File

@ -2,7 +2,6 @@
ceph_osd_docker_run_script_path: /var/tmp
journal_size: 100
osd_objectstore: "filestore"
osd_scenario: lvm
lvm_volumes:
- data: data-lv1
journal: /dev/sdc1

View File

@ -1,7 +1,6 @@
---
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
osd_scenario: lvm
journal_size: 100
osd_objectstore: "filestore"
lvm_volumes: