mirror of https://github.com/ceph/ceph-ansible.git
osd: remove variable osd_scenario
As of stable-4.0, the only valid scenario is `lvm`. Thus, this makes this variable useless. Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>pull/3851/head
parent
4d5637fd8a
commit
4d35e9eeed
|
@ -93,7 +93,6 @@ ansible_provision = proc do |ansible|
|
||||||
ansible.extra_vars = ansible.extra_vars.merge({
|
ansible.extra_vars = ansible.extra_vars.merge({
|
||||||
cluster_network: "#{CLUSTER_SUBNET}.0/16",
|
cluster_network: "#{CLUSTER_SUBNET}.0/16",
|
||||||
devices: ['/dev/sdc'], # hardcode leftover disk
|
devices: ['/dev/sdc'], # hardcode leftover disk
|
||||||
osd_scenario: 'collocated',
|
|
||||||
monitor_address_block: "#{PUBLIC_SUBNET}.0/16",
|
monitor_address_block: "#{PUBLIC_SUBNET}.0/16",
|
||||||
radosgw_address_block: "#{PUBLIC_SUBNET}.0/16",
|
radosgw_address_block: "#{PUBLIC_SUBNET}.0/16",
|
||||||
public_network: "#{PUBLIC_SUBNET}.0/16",
|
public_network: "#{PUBLIC_SUBNET}.0/16",
|
||||||
|
|
|
@ -52,20 +52,13 @@ dummy:
|
||||||
#osd_auto_discovery: false
|
#osd_auto_discovery: false
|
||||||
|
|
||||||
# Encrypt your OSD device using dmcrypt
|
# Encrypt your OSD device using dmcrypt
|
||||||
# If set to True, no matter which osd_objecstore and osd_scenario you use the data will be encrypted
|
# If set to True, no matter which osd_objecstore you use the data will be encrypted
|
||||||
#dmcrypt: False
|
#dmcrypt: False
|
||||||
|
|
||||||
|
|
||||||
#osd_scenario: dummy
|
|
||||||
#valid_osd_scenarios:
|
|
||||||
# - lvm
|
|
||||||
|
|
||||||
|
|
||||||
#dedicated_devices: []
|
#dedicated_devices: []
|
||||||
|
|
||||||
# III. Use ceph-volume to create OSDs from logical volumes.
|
# Use ceph-volume to create OSDs from logical volumes.
|
||||||
# Use 'osd_scenario: lvm' to enable this scenario.
|
|
||||||
# when using lvm, not collocated journals.
|
|
||||||
# lvm_volumes is a list of dictionaries.
|
# lvm_volumes is a list of dictionaries.
|
||||||
#
|
#
|
||||||
# Filestore: Each dictionary must contain a data, journal and vg_name key. Any
|
# Filestore: Each dictionary must contain a data, journal and vg_name key. Any
|
||||||
|
|
|
@ -387,7 +387,7 @@ ceph_rhcs_version: 3
|
||||||
|
|
||||||
## Rados Gateway options
|
## Rados Gateway options
|
||||||
#
|
#
|
||||||
#radosgw_frontend_type: beast # For additional frontends see: http://docs.ceph.com/docs/nautilus/radosgw/frontends/
|
#radosgw_frontend_type: beast # For additionnal frontends see: http://docs.ceph.com/docs/nautilus/radosgw/frontends/
|
||||||
|
|
||||||
#radosgw_civetweb_port: 8080
|
#radosgw_civetweb_port: 8080
|
||||||
#radosgw_civetweb_num_threads: 512
|
#radosgw_civetweb_num_threads: 512
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
- name: creates logical volumes for the bucket index or fs journals on a single device and prepares for use of osd_scenario=lvm.
|
- name: creates logical volumes for the bucket index or fs journals on a single device.
|
||||||
become: true
|
become: true
|
||||||
hosts:
|
hosts:
|
||||||
- osds
|
- osds
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
logfile: |
|
logfile: |
|
||||||
Suggested cut and paste under "lvm_volumes:" in "group_vars/osds.yml" for configuring with osd_scenario=lvm
|
Suggested cut and paste under "lvm_volumes:" in "group_vars/osds.yml"
|
||||||
-----------------------------------------------------------------------------------------------------------
|
-----------------------------------------------------------------------------------------------------------
|
||||||
{% for lv in nvme_device_lvs %}
|
{% for lv in nvme_device_lvs %}
|
||||||
- data: {{ lv.lv_name }}
|
- data: {{ lv.lv_name }}
|
||||||
|
|
|
@ -324,7 +324,6 @@
|
||||||
with_items: "{{ lvm_volumes }}"
|
with_items: "{{ lvm_volumes }}"
|
||||||
when:
|
when:
|
||||||
- lvm_volumes | default([]) | length > 0
|
- lvm_volumes | default([]) | length > 0
|
||||||
- osd_scenario == "lvm"
|
|
||||||
- ceph_volume_present.rc == 0
|
- ceph_volume_present.rc == 0
|
||||||
|
|
||||||
- name: zap and destroy osds created by ceph-volume with devices
|
- name: zap and destroy osds created by ceph-volume with devices
|
||||||
|
@ -336,7 +335,6 @@
|
||||||
with_items: "{{ devices | default([]) }}"
|
with_items: "{{ devices | default([]) }}"
|
||||||
when:
|
when:
|
||||||
- devices | default([]) | length > 0
|
- devices | default([]) | length > 0
|
||||||
- osd_scenario == "lvm"
|
|
||||||
- ceph_volume_present.rc == 0
|
- ceph_volume_present.rc == 0
|
||||||
|
|
||||||
- name: get ceph block partitions
|
- name: get ceph block partitions
|
||||||
|
|
|
@ -344,142 +344,33 @@
|
||||||
register: remove_osd_mountpoints
|
register: remove_osd_mountpoints
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
- name: for ceph-disk based deployment
|
- name: zap and destroy osds created by ceph-volume with lvm_volumes
|
||||||
block:
|
ceph_volume:
|
||||||
- name: get prepare container
|
data: "{{ item.data }}"
|
||||||
command: "docker ps -a -q --filter='name=ceph-osd-prepare'"
|
data_vg: "{{ item.data_vg|default(omit) }}"
|
||||||
register: prepare_containers
|
journal: "{{ item.journal|default(omit) }}"
|
||||||
ignore_errors: true
|
journal_vg: "{{ item.journal_vg|default(omit) }}"
|
||||||
|
db: "{{ item.db|default(omit) }}"
|
||||||
- name: remove ceph osd prepare container
|
db_vg: "{{ item.db_vg|default(omit) }}"
|
||||||
command: "docker rm -f {{ item }}"
|
wal: "{{ item.wal|default(omit) }}"
|
||||||
with_items: "{{ prepare_containers.stdout_lines }}"
|
wal_vg: "{{ item.wal_vg|default(omit) }}"
|
||||||
ignore_errors: true
|
action: "zap"
|
||||||
|
environment:
|
||||||
# NOTE(leseb): hope someone will find a more elegant way one day...
|
CEPH_VOLUME_DEBUG: 1
|
||||||
- name: see if encrypted partitions are present
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
shell: |
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||||
blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2
|
with_items: "{{ lvm_volumes }}"
|
||||||
register: encrypted_ceph_partuuid
|
when: lvm_volumes | default([]) | length > 0
|
||||||
|
- name: zap and destroy osds created by ceph-volume with devices
|
||||||
- name: get ceph data partitions
|
ceph_volume:
|
||||||
command: |
|
data: "{{ item }}"
|
||||||
blkid -o device -t PARTLABEL="ceph data"
|
action: "zap"
|
||||||
failed_when: false
|
environment:
|
||||||
register: ceph_data_partition_to_erase_path
|
CEPH_VOLUME_DEBUG: 1
|
||||||
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
- name: get ceph lockbox partitions
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||||
command: |
|
with_items: "{{ devices | default([]) }}"
|
||||||
blkid -o device -t PARTLABEL="ceph lockbox"
|
when: devices | default([]) | length > 0
|
||||||
failed_when: false
|
|
||||||
register: ceph_lockbox_partition_to_erase_path
|
|
||||||
|
|
||||||
- name: get ceph block partitions
|
|
||||||
command: |
|
|
||||||
blkid -o device -t PARTLABEL="ceph block"
|
|
||||||
failed_when: false
|
|
||||||
register: ceph_block_partition_to_erase_path
|
|
||||||
|
|
||||||
- name: get ceph journal partitions
|
|
||||||
command: |
|
|
||||||
blkid -o device -t PARTLABEL="ceph journal"
|
|
||||||
failed_when: false
|
|
||||||
register: ceph_journal_partition_to_erase_path
|
|
||||||
|
|
||||||
- name: get ceph db partitions
|
|
||||||
command: |
|
|
||||||
blkid -o device -t PARTLABEL="ceph block.db"
|
|
||||||
failed_when: false
|
|
||||||
register: ceph_db_partition_to_erase_path
|
|
||||||
|
|
||||||
- name: get ceph wal partitions
|
|
||||||
command: |
|
|
||||||
blkid -o device -t PARTLABEL="ceph block.wal"
|
|
||||||
failed_when: false
|
|
||||||
register: ceph_wal_partition_to_erase_path
|
|
||||||
|
|
||||||
- name: set_fact combined_devices_list
|
|
||||||
set_fact:
|
|
||||||
combined_devices_list: "{{ ceph_data_partition_to_erase_path.get('stdout_lines', []) +
|
|
||||||
ceph_lockbox_partition_to_erase_path.get('stdout_lines', []) +
|
|
||||||
ceph_block_partition_to_erase_path.get('stdout_lines', []) +
|
|
||||||
ceph_journal_partition_to_erase_path.get('stdout_lines', []) +
|
|
||||||
ceph_db_partition_to_erase_path.get('stdout_lines', []) +
|
|
||||||
ceph_wal_partition_to_erase_path.get('stdout_lines', []) }}"
|
|
||||||
|
|
||||||
- name: resolve parent device
|
|
||||||
command: lsblk --nodeps -no pkname "{{ item }}"
|
|
||||||
register: tmp_resolved_parent_device
|
|
||||||
with_items:
|
|
||||||
- "{{ combined_devices_list }}"
|
|
||||||
|
|
||||||
- name: set_fact resolved_parent_device
|
|
||||||
set_fact:
|
|
||||||
resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
|
|
||||||
|
|
||||||
- name: zap ceph osd disks
|
|
||||||
shell: |
|
|
||||||
docker run --rm \
|
|
||||||
--privileged=true \
|
|
||||||
--name ceph-osd-zap-{{ ansible_hostname }}-{{ item }} \
|
|
||||||
-v /dev/:/dev/ \
|
|
||||||
-e OSD_DEVICE=/dev/{{ item }} \
|
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
|
|
||||||
zap_device
|
|
||||||
with_items:
|
|
||||||
- "{{ resolved_parent_device }}"
|
|
||||||
|
|
||||||
- name: wait until the zap containers die
|
|
||||||
shell: |
|
|
||||||
docker ps | grep -sq ceph-osd-zap-{{ ansible_hostname }}
|
|
||||||
register: zap_alive
|
|
||||||
failed_when: false
|
|
||||||
until: zap_alive.rc != 0
|
|
||||||
retries: 5
|
|
||||||
delay: 10
|
|
||||||
|
|
||||||
- name: remove ceph osd zap disk container
|
|
||||||
docker_container:
|
|
||||||
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
|
||||||
name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item }}"
|
|
||||||
state: absent
|
|
||||||
with_items:
|
|
||||||
- "{{ resolved_parent_device }}"
|
|
||||||
when:
|
|
||||||
- osd_scenario != "lvm"
|
|
||||||
|
|
||||||
- name: for ceph-volume based deployments
|
|
||||||
block:
|
|
||||||
- name: zap and destroy osds created by ceph-volume with lvm_volumes
|
|
||||||
ceph_volume:
|
|
||||||
data: "{{ item.data }}"
|
|
||||||
data_vg: "{{ item.data_vg|default(omit) }}"
|
|
||||||
journal: "{{ item.journal|default(omit) }}"
|
|
||||||
journal_vg: "{{ item.journal_vg|default(omit) }}"
|
|
||||||
db: "{{ item.db|default(omit) }}"
|
|
||||||
db_vg: "{{ item.db_vg|default(omit) }}"
|
|
||||||
wal: "{{ item.wal|default(omit) }}"
|
|
||||||
wal_vg: "{{ item.wal_vg|default(omit) }}"
|
|
||||||
action: "zap"
|
|
||||||
environment:
|
|
||||||
CEPH_VOLUME_DEBUG: 1
|
|
||||||
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
|
||||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
|
||||||
with_items: "{{ lvm_volumes }}"
|
|
||||||
when: lvm_volumes | default([]) | length > 0
|
|
||||||
|
|
||||||
- name: zap and destroy osds created by ceph-volume with devices
|
|
||||||
ceph_volume:
|
|
||||||
data: "{{ item }}"
|
|
||||||
action: "zap"
|
|
||||||
environment:
|
|
||||||
CEPH_VOLUME_DEBUG: 1
|
|
||||||
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
|
||||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
|
||||||
with_items: "{{ devices | default([]) }}"
|
|
||||||
when: devices | default([]) | length > 0
|
|
||||||
when:
|
|
||||||
- osd_scenario == "lvm"
|
|
||||||
|
|
||||||
- name: remove ceph osd service
|
- name: remove ceph osd service
|
||||||
file:
|
file:
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# This file configures logical volume creation for FS Journals on NVMe, a NVMe based bucket index, and HDD based OSDs.
|
# This file configures logical volume creation for FS Journals on NVMe, a NVMe based bucket index, and HDD based OSDs.
|
||||||
# This playbook configures one NVMe device at a time. If your OSD systems contain multiple NVMe devices, you will need to edit the key variables ("nvme_device", "hdd_devices") for each run.
|
# This playbook configures one NVMe device at a time. If your OSD systems contain multiple NVMe devices, you will need to edit the key variables ("nvme_device", "hdd_devices") for each run.
|
||||||
# It is meant to be used when osd_objectstore=filestore and it outputs the necessary input for group_vars/osds.yml when configured with osd_scenario=lvm.
|
# It is meant to be used when osd_objectstore=filestore and it outputs the necessary input for group_vars/osds.yml.
|
||||||
# The LVs for journals are created first then the LVs for data. All LVs for journals correspond to a LV for data.
|
# The LVs for journals are created first then the LVs for data. All LVs for journals correspond to a LV for data.
|
||||||
#
|
#
|
||||||
## CHANGE THESE VARS ##
|
## CHANGE THESE VARS ##
|
||||||
|
|
|
@ -8,19 +8,11 @@
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups.get(osd_group_name, [])
|
- inventory_hostname in groups.get(osd_group_name, [])
|
||||||
block:
|
block:
|
||||||
- name: count number of osds for ceph-disk scenarios
|
|
||||||
set_fact:
|
|
||||||
num_osds: "{{ devices | length | int }}"
|
|
||||||
when:
|
|
||||||
- devices | default([]) | length > 0
|
|
||||||
- osd_scenario in ['collocated', 'non-collocated']
|
|
||||||
|
|
||||||
- name: count number of osds for lvm scenario
|
- name: count number of osds for lvm scenario
|
||||||
set_fact:
|
set_fact:
|
||||||
num_osds: "{{ lvm_volumes | length | int }}"
|
num_osds: "{{ lvm_volumes | length | int }}"
|
||||||
when:
|
when:
|
||||||
- lvm_volumes | default([]) | length > 0
|
- lvm_volumes | default([]) | length > 0
|
||||||
- osd_scenario == 'lvm'
|
|
||||||
|
|
||||||
- name: run 'ceph-volume lvm batch --report' to see how many osds are to be created
|
- name: run 'ceph-volume lvm batch --report' to see how many osds are to be created
|
||||||
ceph_volume:
|
ceph_volume:
|
||||||
|
@ -40,14 +32,12 @@
|
||||||
PYTHONIOENCODING: utf-8
|
PYTHONIOENCODING: utf-8
|
||||||
when:
|
when:
|
||||||
- devices | default([]) | length > 0
|
- devices | default([]) | length > 0
|
||||||
- osd_scenario == 'lvm'
|
|
||||||
|
|
||||||
- name: set_fact num_osds from the output of 'ceph-volume lvm batch --report'
|
- name: set_fact num_osds from the output of 'ceph-volume lvm batch --report'
|
||||||
set_fact:
|
set_fact:
|
||||||
num_osds: "{{ (lvm_batch_report.stdout | from_json).osds | length | int }}"
|
num_osds: "{{ (lvm_batch_report.stdout | from_json).osds | length | int }}"
|
||||||
when:
|
when:
|
||||||
- devices | default([]) | length > 0
|
- devices | default([]) | length > 0
|
||||||
- osd_scenario == 'lvm'
|
|
||||||
- (lvm_batch_report.stdout | from_json).changed
|
- (lvm_batch_report.stdout | from_json).changed
|
||||||
|
|
||||||
- name: run 'ceph-volume lvm list' to see how many osds have already been created
|
- name: run 'ceph-volume lvm list' to see how many osds have already been created
|
||||||
|
@ -61,7 +51,6 @@
|
||||||
PYTHONIOENCODING: utf-8
|
PYTHONIOENCODING: utf-8
|
||||||
when:
|
when:
|
||||||
- devices | default([]) | length > 0
|
- devices | default([]) | length > 0
|
||||||
- osd_scenario == 'lvm'
|
|
||||||
- not (lvm_batch_report.stdout | from_json).changed
|
- not (lvm_batch_report.stdout | from_json).changed
|
||||||
|
|
||||||
- name: set_fact num_osds from the output of 'ceph-volume lvm list'
|
- name: set_fact num_osds from the output of 'ceph-volume lvm list'
|
||||||
|
@ -69,7 +58,6 @@
|
||||||
num_osds: "{{ lvm_list.stdout | from_json | length | int }}"
|
num_osds: "{{ lvm_list.stdout | from_json | length | int }}"
|
||||||
when:
|
when:
|
||||||
- devices | default([]) | length > 0
|
- devices | default([]) | length > 0
|
||||||
- osd_scenario == 'lvm'
|
|
||||||
- not (lvm_batch_report.stdout | from_json).changed
|
- not (lvm_batch_report.stdout | from_json).changed
|
||||||
|
|
||||||
# ceph-common
|
# ceph-common
|
||||||
|
|
|
@ -66,13 +66,7 @@ for unit in $(systemctl list-units | grep -E "loaded * active" | grep -oE "ceph-
|
||||||
# We need to wait because it may take some time for the socket to actually exists
|
# We need to wait because it may take some time for the socket to actually exists
|
||||||
COUNT=10
|
COUNT=10
|
||||||
# Wait and ensure the socket exists after restarting the daemon
|
# Wait and ensure the socket exists after restarting the daemon
|
||||||
{% if containerized_deployment and osd_scenario != 'lvm' -%}
|
{% if containerized_deployment %}
|
||||||
id=$(get_dev_name "$unit")
|
|
||||||
container_id=$(get_container_id_from_dev_name "$id")
|
|
||||||
wait_for_socket_in_container "$container_id"
|
|
||||||
osd_id=$whoami
|
|
||||||
container_exec="{{ container_binary }} exec $container_id"
|
|
||||||
{% elif containerized_deployment and osd_scenario == 'lvm' %}
|
|
||||||
osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+')
|
osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+')
|
||||||
container_id=$(get_container_id_from_dev_name "ceph-osd-${osd_id}")
|
container_id=$(get_container_id_from_dev_name "ceph-osd-${osd_id}")
|
||||||
container_exec="{{ container_binary }} exec $container_id"
|
container_exec="{{ container_binary }} exec $container_id"
|
||||||
|
|
|
@ -44,20 +44,13 @@ devices: []
|
||||||
osd_auto_discovery: false
|
osd_auto_discovery: false
|
||||||
|
|
||||||
# Encrypt your OSD device using dmcrypt
|
# Encrypt your OSD device using dmcrypt
|
||||||
# If set to True, no matter which osd_objecstore and osd_scenario you use the data will be encrypted
|
# If set to True, no matter which osd_objecstore you use the data will be encrypted
|
||||||
dmcrypt: False
|
dmcrypt: False
|
||||||
|
|
||||||
|
|
||||||
osd_scenario: lvm
|
|
||||||
valid_osd_scenarios:
|
|
||||||
- lvm
|
|
||||||
|
|
||||||
|
|
||||||
dedicated_devices: []
|
dedicated_devices: []
|
||||||
|
|
||||||
# III. Use ceph-volume to create OSDs from logical volumes.
|
# Use ceph-volume to create OSDs from logical volumes.
|
||||||
# Use 'osd_scenario: lvm' to enable this scenario.
|
|
||||||
# when using lvm, not collocated journals.
|
|
||||||
# lvm_volumes is a list of dictionaries.
|
# lvm_volumes is a list of dictionaries.
|
||||||
#
|
#
|
||||||
# Filestore: Each dictionary must contain a data, journal and vg_name key. Any
|
# Filestore: Each dictionary must contain a data, journal and vg_name key. Any
|
||||||
|
|
|
@ -1,24 +0,0 @@
|
||||||
---
|
|
||||||
- name: resolve dedicated device link(s)
|
|
||||||
command: readlink -f {{ item }}
|
|
||||||
changed_when: false
|
|
||||||
with_items: "{{ dedicated_devices }}"
|
|
||||||
register: dedicated_devices_prepare_canonicalize
|
|
||||||
when:
|
|
||||||
- osd_scenario == 'non-collocated'
|
|
||||||
- not osd_auto_discovery
|
|
||||||
|
|
||||||
- name: set_fact build dedicated_devices from resolved symlinks
|
|
||||||
set_fact:
|
|
||||||
dedicated_devices_tmp: "{{ dedicated_devices_tmp | default([]) + [ item.stdout ] }}"
|
|
||||||
with_items: "{{ dedicated_devices_prepare_canonicalize.results }}"
|
|
||||||
when:
|
|
||||||
- osd_scenario == 'non-collocated'
|
|
||||||
- not osd_auto_discovery
|
|
||||||
|
|
||||||
- name: set_fact build final dedicated_devices list
|
|
||||||
set_fact:
|
|
||||||
dedicated_devices: "{{ dedicated_devices_tmp | reject('search','/dev/disk') | list }}"
|
|
||||||
when:
|
|
||||||
- osd_scenario == 'non-collocated'
|
|
||||||
- not osd_auto_discovery
|
|
|
@ -29,7 +29,6 @@
|
||||||
register: result
|
register: result
|
||||||
until: result is succeeded
|
until: result is succeeded
|
||||||
when:
|
when:
|
||||||
- osd_scenario == 'lvm'
|
|
||||||
- not is_atomic
|
- not is_atomic
|
||||||
tags:
|
tags:
|
||||||
- with_pkg
|
- with_pkg
|
||||||
|
@ -40,9 +39,6 @@
|
||||||
- name: include container_options_facts.yml
|
- name: include container_options_facts.yml
|
||||||
include_tasks: container_options_facts.yml
|
include_tasks: container_options_facts.yml
|
||||||
|
|
||||||
- name: include build_devices.yml
|
|
||||||
include_tasks: build_devices.yml
|
|
||||||
|
|
||||||
- name: read information about the devices
|
- name: read information about the devices
|
||||||
parted:
|
parted:
|
||||||
device: "{{ item }}"
|
device: "{{ item }}"
|
||||||
|
@ -53,7 +49,6 @@
|
||||||
- name: include_tasks scenarios/lvm.yml
|
- name: include_tasks scenarios/lvm.yml
|
||||||
include_tasks: scenarios/lvm.yml
|
include_tasks: scenarios/lvm.yml
|
||||||
when:
|
when:
|
||||||
- osd_scenario == 'lvm'
|
|
||||||
- lvm_volumes|length > 0
|
- lvm_volumes|length > 0
|
||||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||||
static: False
|
static: False
|
||||||
|
@ -61,17 +56,10 @@
|
||||||
- name: include_tasks scenarios/lvm-batch.yml
|
- name: include_tasks scenarios/lvm-batch.yml
|
||||||
include_tasks: scenarios/lvm-batch.yml
|
include_tasks: scenarios/lvm-batch.yml
|
||||||
when:
|
when:
|
||||||
- osd_scenario == 'lvm'
|
|
||||||
- devices|length > 0
|
- devices|length > 0
|
||||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||||
static: False
|
static: False
|
||||||
|
|
||||||
- name: include_tasks activate_osds.yml
|
|
||||||
include_tasks: activate_osds.yml
|
|
||||||
when:
|
|
||||||
- not containerized_deployment
|
|
||||||
- osd_scenario != 'lvm'
|
|
||||||
|
|
||||||
- name: include_tasks start_osds.yml
|
- name: include_tasks start_osds.yml
|
||||||
include_tasks: start_osds.yml
|
include_tasks: start_osds.yml
|
||||||
|
|
||||||
|
|
|
@ -12,22 +12,6 @@
|
||||||
when:
|
when:
|
||||||
- ceph_docker_on_openstack
|
- ceph_docker_on_openstack
|
||||||
|
|
||||||
- name: test if the container image has directory {{ container_bin_path }}
|
|
||||||
command: "{{ container_binary }} run --rm --net=host --entrypoint=test {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} -d {{ container_bin_path }}"
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
register: test_container_bin_path
|
|
||||||
when:
|
|
||||||
- osd_scenario != 'lvm'
|
|
||||||
|
|
||||||
- name: test if the container image has the disk_list function
|
|
||||||
command: "{{ container_binary }} run --rm --net=host --entrypoint=stat {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} {{ container_bin_path + '/disk_list.sh' if test_container_bin_path.rc == 0 else 'disk_list.sh' }}"
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
register: disk_list
|
|
||||||
when:
|
|
||||||
- osd_scenario != 'lvm'
|
|
||||||
|
|
||||||
- name: generate ceph osd docker run script
|
- name: generate ceph osd docker run script
|
||||||
become: true
|
become: true
|
||||||
template:
|
template:
|
||||||
|
@ -72,12 +56,12 @@
|
||||||
|
|
||||||
- name: systemd start osd
|
- name: systemd start osd
|
||||||
systemd:
|
systemd:
|
||||||
name: ceph-osd@{{ item | regex_replace('/dev/', '') if osd_scenario != 'lvm' and containerized_deployment else item }}
|
name: ceph-osd@{{ item }}
|
||||||
state: started
|
state: started
|
||||||
enabled: yes
|
enabled: yes
|
||||||
masked: no
|
masked: no
|
||||||
daemon_reload: yes
|
daemon_reload: yes
|
||||||
with_items: "{{ devices if osd_scenario != 'lvm' and containerized_deployment else ((ceph_osd_ids.stdout | from_json).keys() | list) if osd_scenario == 'lvm' and not containerized_deployment else osd_ids_non_container.stdout_lines }}"
|
with_items: "{{ ((ceph_osd_ids.stdout | from_json).keys() | list) if not containerized_deployment else osd_ids_non_container.stdout_lines }}"
|
||||||
|
|
||||||
- name: ensure systemd service override directory exists
|
- name: ensure systemd service override directory exists
|
||||||
file:
|
file:
|
||||||
|
|
|
@ -2,70 +2,6 @@
|
||||||
# {{ ansible_managed }}
|
# {{ ansible_managed }}
|
||||||
|
|
||||||
|
|
||||||
{% if osd_scenario != 'lvm' -%}
|
|
||||||
{% if disk_list.get('rc') == 0 -%}
|
|
||||||
#############
|
|
||||||
# VARIABLES #
|
|
||||||
#############
|
|
||||||
DOCKER_ENV=""
|
|
||||||
|
|
||||||
#############
|
|
||||||
# FUNCTIONS #
|
|
||||||
#############
|
|
||||||
function expose_partitions () {
|
|
||||||
DOCKER_ENV=$({{ container_binary }} run --rm --net=host --name expose_partitions_${1} --privileged=true -v /dev/:/dev/ -v /etc/ceph:/etc/ceph:z -e CLUSTER={{ cluster }} -e OSD_DEVICE=/dev/${1} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} disk_list)
|
|
||||||
}
|
|
||||||
{% else -%}
|
|
||||||
# NOTE(leseb): maintains backwards compatibility with old ceph-docker Jewel images
|
|
||||||
# Jewel images prior to https://github.com/ceph/ceph-docker/pull/797
|
|
||||||
REGEX="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
|
|
||||||
function expose_partitions {
|
|
||||||
if {{ container_binary }} ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}; then
|
|
||||||
if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then
|
|
||||||
{{ container_binary }} logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
if {{ container_binary }} ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-${1}; then
|
|
||||||
if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log ]]; then
|
|
||||||
{{ container_binary }} logs ceph-osd-prepare-{{ ansible_hostname }}-${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
if [[ -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then
|
|
||||||
part=$(grep "Journal is GPT partition" {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
|
|
||||||
DOCKER_ENV="-e OSD_JOURNAL=$part"
|
|
||||||
fi
|
|
||||||
if [[ -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log ]]; then
|
|
||||||
part=$(grep "Journal is GPT partition" {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
|
|
||||||
DOCKER_ENV="-e OSD_JOURNAL=$part"
|
|
||||||
fi
|
|
||||||
if [[ -z $DOCKER_ENV ]]; then
|
|
||||||
# NOTE(leseb): if we arrive here this probably means we just switched from non-containers to containers.
|
|
||||||
# This is tricky as we don't have any info on the type of OSD, this is 'only' a problem for non-collocated scenarios
|
|
||||||
# We can't assume that the 'ceph' is still present so calling Docker exec instead
|
|
||||||
part=$({{ container_binary }} run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} list /dev/${1} | awk '/journal / {print $1}')
|
|
||||||
DOCKER_ENV="-e OSD_JOURNAL=$part"
|
|
||||||
fi
|
|
||||||
# if empty, the previous command didn't find anything so we fail
|
|
||||||
if [[ -z $DOCKER_ENV ]]; then
|
|
||||||
echo "ERROR: could not discover ceph partitions"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
expose_partitions "$1"
|
|
||||||
|
|
||||||
# discover osd_objectstore for ceph-disk based osds
|
|
||||||
if [[ $DOCKER_ENV =~ "BLUESTORE" ]]; then
|
|
||||||
DOCKER_ENV="$DOCKER_ENV -e OSD_BLUESTORE=1"
|
|
||||||
elif [[ $DOCKER_ENV =~ "JOURNAL" ]]; then
|
|
||||||
DOCKER_ENV="$DOCKER_ENV -e OSD_FILESTORE=1"
|
|
||||||
fi
|
|
||||||
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
|
|
||||||
########
|
########
|
||||||
# MAIN #
|
# MAIN #
|
||||||
########
|
########
|
||||||
|
@ -112,17 +48,9 @@ numactl \
|
||||||
{% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%}
|
{% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%}
|
||||||
-e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
|
-e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
{% if osd_scenario == 'lvm' -%}
|
|
||||||
-v /run/lvm/:/run/lvm/ \
|
-v /run/lvm/:/run/lvm/ \
|
||||||
-e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \
|
-e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \
|
||||||
-e OSD_ID="$1" \
|
-e OSD_ID="$1" \
|
||||||
--name=ceph-osd-"$1" \
|
--name=ceph-osd-"$1" \
|
||||||
{% else -%}
|
|
||||||
$DOCKER_ENV \
|
|
||||||
-e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
|
|
||||||
-e OSD_DEVICE=/dev/"${1}" \
|
|
||||||
-e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
|
|
||||||
--name=ceph-osd-{{ ansible_hostname }}-"${1}" \
|
|
||||||
{% endif -%}
|
|
||||||
{{ ceph_osd_docker_extra_env }} \
|
{{ ceph_osd_docker_extra_env }} \
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
|
|
|
@ -7,19 +7,10 @@ After=docker.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
{% if osd_scenario == 'lvm' -%}
|
|
||||||
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
|
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
|
||||||
ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-%i
|
ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-%i
|
||||||
{% else %}
|
|
||||||
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-{{ ansible_hostname }}-%i
|
|
||||||
ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-{{ ansible_hostname }}-%i
|
|
||||||
{% endif -%}
|
|
||||||
ExecStart={{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh %i
|
ExecStart={{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh %i
|
||||||
{% if osd_scenario == 'lvm' -%}
|
|
||||||
ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
|
ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
|
||||||
{% else %}
|
|
||||||
ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-{{ ansible_hostname }}-%i
|
|
||||||
{% endif -%}
|
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
TimeoutStartSec=120
|
TimeoutStartSec=120
|
||||||
|
|
|
@ -95,13 +95,3 @@
|
||||||
when:
|
when:
|
||||||
- iscsi_gw_group_name in group_names
|
- iscsi_gw_group_name in group_names
|
||||||
|
|
||||||
- name: warn users that ceph-disk scenarios will be removed on 3.3
|
|
||||||
debug:
|
|
||||||
msg: |
|
|
||||||
osd_scenario is set to {{ osd_scenario }}, this variable is not used anymore and defaults to 'lvm'.
|
|
||||||
If you have something different than 'lvm', this means you want ceph-ansible to manage your ceph-disk OSDs.
|
|
||||||
So basically, ceph-ansible can still start your ceph-disk osd services
|
|
||||||
run_once: true
|
|
||||||
when:
|
|
||||||
- osd_group_name in group_names
|
|
||||||
- osd_scenario != 'lvm'
|
|
||||||
|
|
|
@ -97,9 +97,7 @@ def node(host, request):
|
||||||
rolling_update = os.environ.get("ROLLING_UPDATE", "False")
|
rolling_update = os.environ.get("ROLLING_UPDATE", "False")
|
||||||
group_names = ansible_vars["group_names"]
|
group_names = ansible_vars["group_names"]
|
||||||
docker = ansible_vars.get("docker")
|
docker = ansible_vars.get("docker")
|
||||||
osd_scenario = ansible_vars.get("osd_scenario")
|
|
||||||
radosgw_num_instances = ansible_vars.get("radosgw_num_instances", 1)
|
radosgw_num_instances = ansible_vars.get("radosgw_num_instances", 1)
|
||||||
lvm_scenario = osd_scenario in ['lvm', 'lvm-batch']
|
|
||||||
ceph_release_num = {
|
ceph_release_num = {
|
||||||
'jewel': 10,
|
'jewel': 10,
|
||||||
'kraken': 11,
|
'kraken': 11,
|
||||||
|
@ -123,12 +121,6 @@ def node(host, request):
|
||||||
request.function, group_names)
|
request.function, group_names)
|
||||||
pytest.skip(reason)
|
pytest.skip(reason)
|
||||||
|
|
||||||
if request.node.get_closest_marker("no_lvm_scenario") and lvm_scenario:
|
|
||||||
pytest.skip("Not a valid test for lvm scenarios")
|
|
||||||
|
|
||||||
if not lvm_scenario and request.node.get_closest_marker("lvm_scenario"):
|
|
||||||
pytest.skip("Not a valid test for non-lvm scenarios")
|
|
||||||
|
|
||||||
if request.node.get_closest_marker("no_docker") and docker:
|
if request.node.get_closest_marker("no_docker") and docker:
|
||||||
pytest.skip(
|
pytest.skip(
|
||||||
"Not a valid test for containerized deployments or atomic hosts")
|
"Not a valid test for containerized deployments or atomic hosts")
|
||||||
|
@ -137,11 +129,6 @@ def node(host, request):
|
||||||
pytest.skip(
|
pytest.skip(
|
||||||
"Not a valid test for non-containerized deployments or atomic hosts") # noqa E501
|
"Not a valid test for non-containerized deployments or atomic hosts") # noqa E501
|
||||||
|
|
||||||
journal_collocation_test = ansible_vars.get("osd_scenario") == "collocated"
|
|
||||||
if request.node.get_closest_marker("journal_collocation") and not journal_collocation_test: # noqa E501
|
|
||||||
pytest.skip("Scenario is not using journal collocation")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
data = dict(
|
data = dict(
|
||||||
vars=ansible_vars,
|
vars=ansible_vars,
|
||||||
|
|
|
@ -10,7 +10,6 @@ monitor_interface: eth1
|
||||||
radosgw_interface: eth1
|
radosgw_interface: eth1
|
||||||
journal_size: 100
|
journal_size: 100
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
osd_scenario: lvm
|
|
||||||
copy_admin_key: true
|
copy_admin_key: true
|
||||||
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
|
|
|
@ -8,7 +8,6 @@ monitor_interface: eth1
|
||||||
radosgw_interface: eth1
|
radosgw_interface: eth1
|
||||||
journal_size: 100
|
journal_size: 100
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
osd_scenario: lvm
|
|
||||||
copy_admin_key: true
|
copy_admin_key: true
|
||||||
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
|
|
|
@ -10,7 +10,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
|
||||||
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
||||||
journal_size: 100
|
journal_size: 100
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
osd_scenario: lvm
|
|
||||||
copy_admin_key: true
|
copy_admin_key: true
|
||||||
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
|
|
|
@ -8,7 +8,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
|
||||||
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
||||||
journal_size: 100
|
journal_size: 100
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
osd_scenario: lvm
|
|
||||||
copy_admin_key: true
|
copy_admin_key: true
|
||||||
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
---
|
---
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
ceph_osd_docker_run_script_path: /var/tmp
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
osd_scenario: lvm
|
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
data_vg: test_group
|
data_vg: test_group
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
---
|
---
|
||||||
os_tuning_params:
|
os_tuning_params:
|
||||||
- { name: fs.file-max, value: 26234859 }
|
- { name: fs.file-max, value: 26234859 }
|
||||||
osd_scenario: lvm
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
|
@ -11,7 +11,6 @@ public_network: "192.168.39.0/24"
|
||||||
cluster_network: "192.168.40.0/24"
|
cluster_network: "192.168.40.0/24"
|
||||||
monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
osd_scenario: lvm
|
|
||||||
copy_admin_key: true
|
copy_admin_key: true
|
||||||
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
|
|
|
@ -6,7 +6,6 @@ public_network: "192.168.39.0/24"
|
||||||
cluster_network: "192.168.40.0/24"
|
cluster_network: "192.168.40.0/24"
|
||||||
monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
osd_scenario: lvm
|
|
||||||
copy_admin_key: true
|
copy_admin_key: true
|
||||||
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
---
|
---
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
ceph_osd_docker_run_script_path: /var/tmp
|
||||||
osd_scenario: lvm
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
---
|
---
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
ceph_osd_docker_run_script_path: /var/tmp
|
||||||
osd_scenario: lvm
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
|
@ -15,7 +15,6 @@ radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
|
||||||
journal_size: 100
|
journal_size: 100
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
crush_device_class: test
|
crush_device_class: test
|
||||||
osd_scenario: lvm
|
|
||||||
copy_admin_key: true
|
copy_admin_key: true
|
||||||
osd_auto_discovery: true
|
osd_auto_discovery: true
|
||||||
os_tuning_params:
|
os_tuning_params:
|
||||||
|
|
|
@ -9,7 +9,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
|
||||||
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
crush_device_class: test
|
crush_device_class: test
|
||||||
osd_scenario: lvm
|
|
||||||
copy_admin_key: true
|
copy_admin_key: true
|
||||||
osd_auto_discovery: true
|
osd_auto_discovery: true
|
||||||
os_tuning_params:
|
os_tuning_params:
|
||||||
|
|
|
@ -15,7 +15,6 @@ radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
|
||||||
journal_size: 100
|
journal_size: 100
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
crush_device_class: test
|
crush_device_class: test
|
||||||
osd_scenario: lvm
|
|
||||||
copy_admin_key: true
|
copy_admin_key: true
|
||||||
devices:
|
devices:
|
||||||
- /dev/sdb
|
- /dev/sdb
|
||||||
|
|
|
@ -9,7 +9,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
|
||||||
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
crush_device_class: test
|
crush_device_class: test
|
||||||
osd_scenario: lvm
|
|
||||||
copy_admin_key: true
|
copy_admin_key: true
|
||||||
devices:
|
devices:
|
||||||
- /dev/disk/by-id/ata-QEMU_HARDDISK_QM00002
|
- /dev/disk/by-id/ata-QEMU_HARDDISK_QM00002
|
||||||
|
|
|
@ -15,7 +15,6 @@ osd_objectstore: "filestore"
|
||||||
copy_admin_key: true
|
copy_admin_key: true
|
||||||
containerized_deployment: true
|
containerized_deployment: true
|
||||||
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
||||||
osd_scenario: lvm
|
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
journal: /dev/sdc1
|
journal: /dev/sdc1
|
||||||
|
|
|
@ -10,7 +10,6 @@ journal_size: 100
|
||||||
osd_objectstore: "filestore"
|
osd_objectstore: "filestore"
|
||||||
copy_admin_key: true
|
copy_admin_key: true
|
||||||
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
||||||
osd_scenario: lvm
|
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
journal: /dev/sdc1
|
journal: /dev/sdc1
|
||||||
|
|
|
@ -9,73 +9,60 @@
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
become: yes
|
become: yes
|
||||||
tasks:
|
tasks:
|
||||||
|
- name: check if it is atomic host
|
||||||
- block:
|
stat:
|
||||||
- name: check if it is atomic host
|
path: /run/ostree-booted
|
||||||
stat:
|
register: stat_ostree
|
||||||
path: /run/ostree-booted
|
tags:
|
||||||
register: stat_ostree
|
- always
|
||||||
tags:
|
- name: set_fact is_atomic
|
||||||
- always
|
set_fact:
|
||||||
|
is_atomic: '{{ stat_ostree.stat.exists }}'
|
||||||
- name: set_fact is_atomic
|
tags:
|
||||||
set_fact:
|
- always
|
||||||
is_atomic: '{{ stat_ostree.stat.exists }}'
|
# Some images may not have lvm2 installed
|
||||||
tags:
|
- name: install lvm2
|
||||||
- always
|
package:
|
||||||
|
name: lvm2
|
||||||
# Some images may not have lvm2 installed
|
state: present
|
||||||
- name: install lvm2
|
register: result
|
||||||
package:
|
until: result is succeeded
|
||||||
name: lvm2
|
when:
|
||||||
state: present
|
- not is_atomic
|
||||||
register: result
|
- name: create physical volume
|
||||||
until: result is succeeded
|
command: pvcreate /dev/sdb
|
||||||
when:
|
failed_when: false
|
||||||
- not is_atomic
|
- name: create volume group
|
||||||
|
command: vgcreate test_group /dev/sdb
|
||||||
- name: create physical volume
|
failed_when: false
|
||||||
command: pvcreate /dev/sdb
|
- name: create logical volume 1
|
||||||
failed_when: false
|
command: lvcreate --yes -l 50%FREE -n data-lv1 test_group
|
||||||
|
failed_when: false
|
||||||
- name: create volume group
|
- name: create logical volume 2
|
||||||
command: vgcreate test_group /dev/sdb
|
command: lvcreate --yes -l 50%FREE -n data-lv2 test_group
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
- name: partition /dev/sdc for journals
|
||||||
- name: create logical volume 1
|
parted:
|
||||||
command: lvcreate --yes -l 50%FREE -n data-lv1 test_group
|
device: /dev/sdc
|
||||||
failed_when: false
|
number: 1
|
||||||
|
part_start: 0%
|
||||||
- name: create logical volume 2
|
part_end: 50%
|
||||||
command: lvcreate --yes -l 50%FREE -n data-lv2 test_group
|
unit: '%'
|
||||||
failed_when: false
|
label: gpt
|
||||||
|
state: present
|
||||||
- name: partition /dev/sdc for journals
|
- name: partition /dev/sdc for journals
|
||||||
parted:
|
parted:
|
||||||
device: /dev/sdc
|
device: /dev/sdc
|
||||||
number: 1
|
number: 2
|
||||||
part_start: 0%
|
part_start: 50%
|
||||||
part_end: 50%
|
part_end: 100%
|
||||||
unit: '%'
|
unit: '%'
|
||||||
label: gpt
|
state: present
|
||||||
state: present
|
label: gpt
|
||||||
|
- name: create journals vg from /dev/sdc2
|
||||||
- name: partition /dev/sdc for journals
|
lvg:
|
||||||
parted:
|
vg: journals
|
||||||
device: /dev/sdc
|
pvs: /dev/sdc2
|
||||||
number: 2
|
- name: create journal1 lv
|
||||||
part_start: 50%
|
command: lvcreate --yes -l 100%FREE -n journal1 journals
|
||||||
part_end: 100%
|
failed_when: false
|
||||||
unit: '%'
|
|
||||||
state: present
|
|
||||||
label: gpt
|
|
||||||
|
|
||||||
- name: create journals vg from /dev/sdc2
|
|
||||||
lvg:
|
|
||||||
vg: journals
|
|
||||||
pvs: /dev/sdc2
|
|
||||||
|
|
||||||
- name: create journal1 lv
|
|
||||||
command: lvcreate --yes -l 100%FREE -n journal1 journals
|
|
||||||
failed_when: false
|
|
||||||
when: osd_scenario == 'lvm'
|
|
||||||
|
|
|
@ -49,7 +49,6 @@ all:
|
||||||
- {name: vms, pg_num: 8, rule_name: ''}
|
- {name: vms, pg_num: 8, rule_name: ''}
|
||||||
- {name: volumes, pg_num: 8, rule_name: ''}
|
- {name: volumes, pg_num: 8, rule_name: ''}
|
||||||
osd_objectstore: filestore
|
osd_objectstore: filestore
|
||||||
osd_scenario: collocated
|
|
||||||
ceph_osd_docker_run_script_path: /opt
|
ceph_osd_docker_run_script_path: /opt
|
||||||
pools: []
|
pools: []
|
||||||
public_network: 192.168.95.0/24
|
public_network: 192.168.95.0/24
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
---
|
---
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
ceph_osd_docker_run_script_path: /var/tmp
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
osd_scenario: lvm
|
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
data_vg: test_group
|
data_vg: test_group
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
---
|
---
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
ceph_osd_docker_run_script_path: /var/tmp
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
osd_scenario: lvm
|
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
data_vg: test_group
|
data_vg: test_group
|
||||||
|
|
|
@ -10,7 +10,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
|
||||||
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
||||||
journal_size: 100
|
journal_size: 100
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
osd_scenario: lvm
|
|
||||||
copy_admin_key: true
|
copy_admin_key: true
|
||||||
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
|
|
|
@ -10,7 +10,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
|
||||||
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
||||||
journal_size: 100
|
journal_size: 100
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
osd_scenario: lvm
|
|
||||||
copy_admin_key: true
|
copy_admin_key: true
|
||||||
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
|
|
|
@ -8,7 +8,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
|
||||||
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
||||||
journal_size: 100
|
journal_size: 100
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
osd_scenario: lvm
|
|
||||||
copy_admin_key: true
|
copy_admin_key: true
|
||||||
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
|
|
|
@ -8,7 +8,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}
|
||||||
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
|
||||||
journal_size: 100
|
journal_size: 100
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
osd_scenario: lvm
|
|
||||||
copy_admin_key: true
|
copy_admin_key: true
|
||||||
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
---
|
---
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
ceph_osd_docker_run_script_path: /var/tmp
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
osd_scenario: lvm
|
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
data_vg: test_group
|
data_vg: test_group
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
---
|
---
|
||||||
os_tuning_params:
|
os_tuning_params:
|
||||||
- { name: fs.file-max, value: 26234859 }
|
- { name: fs.file-max, value: 26234859 }
|
||||||
osd_scenario: lvm
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
ceph_osd_docker_run_script_path: /var/tmp
|
||||||
journal_size: 100
|
journal_size: 100
|
||||||
osd_objectstore: "filestore"
|
osd_objectstore: "filestore"
|
||||||
osd_scenario: lvm
|
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
journal: /dev/sdc1
|
journal: /dev/sdc1
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
---
|
---
|
||||||
os_tuning_params:
|
os_tuning_params:
|
||||||
- { name: fs.file-max, value: 26234859 }
|
- { name: fs.file-max, value: 26234859 }
|
||||||
osd_scenario: lvm
|
|
||||||
journal_size: 100
|
journal_size: 100
|
||||||
osd_objectstore: "filestore"
|
osd_objectstore: "filestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
|
|
Loading…
Reference in New Issue