diff --git a/Vagrantfile b/Vagrantfile index f86968ec4..7916593cf 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -93,7 +93,6 @@ ansible_provision = proc do |ansible| ansible.extra_vars = ansible.extra_vars.merge({ cluster_network: "#{CLUSTER_SUBNET}.0/16", devices: ['/dev/sdc'], # hardcode leftover disk - osd_scenario: 'collocated', monitor_address_block: "#{PUBLIC_SUBNET}.0/16", radosgw_address_block: "#{PUBLIC_SUBNET}.0/16", public_network: "#{PUBLIC_SUBNET}.0/16", diff --git a/group_vars/osds.yml.sample b/group_vars/osds.yml.sample index 53aeb3b88..f64cd524f 100644 --- a/group_vars/osds.yml.sample +++ b/group_vars/osds.yml.sample @@ -52,20 +52,13 @@ dummy: #osd_auto_discovery: false # Encrypt your OSD device using dmcrypt -# If set to True, no matter which osd_objecstore and osd_scenario you use the data will be encrypted +# If set to True, no matter which osd_objecstore you use the data will be encrypted #dmcrypt: False -#osd_scenario: dummy -#valid_osd_scenarios: -# - lvm - - #dedicated_devices: [] -# III. Use ceph-volume to create OSDs from logical volumes. -# Use 'osd_scenario: lvm' to enable this scenario. -# when using lvm, not collocated journals. +# Use ceph-volume to create OSDs from logical volumes. # lvm_volumes is a list of dictionaries. # # Filestore: Each dictionary must contain a data, journal and vg_name key. Any diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index 639314060..be3ca9f09 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -387,7 +387,7 @@ ceph_rhcs_version: 3 ## Rados Gateway options # -#radosgw_frontend_type: beast # For additional frontends see: http://docs.ceph.com/docs/nautilus/radosgw/frontends/ +#radosgw_frontend_type: beast # For additionnal frontends see: http://docs.ceph.com/docs/nautilus/radosgw/frontends/ #radosgw_civetweb_port: 8080 #radosgw_civetweb_num_threads: 512 diff --git a/infrastructure-playbooks/lv-create.yml b/infrastructure-playbooks/lv-create.yml index a05ce15a0..ec3c72b87 100644 --- a/infrastructure-playbooks/lv-create.yml +++ b/infrastructure-playbooks/lv-create.yml @@ -1,11 +1,11 @@ -- name: creates logical volumes for the bucket index or fs journals on a single device and prepares for use of osd_scenario=lvm. +- name: creates logical volumes for the bucket index or fs journals on a single device. become: true hosts: - osds vars: logfile: | - Suggested cut and paste under "lvm_volumes:" in "group_vars/osds.yml" for configuring with osd_scenario=lvm + Suggested cut and paste under "lvm_volumes:" in "group_vars/osds.yml" ----------------------------------------------------------------------------------------------------------- {% for lv in nvme_device_lvs %} - data: {{ lv.lv_name }} diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index b4aa40e3d..eb544bd79 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -324,7 +324,6 @@ with_items: "{{ lvm_volumes }}" when: - lvm_volumes | default([]) | length > 0 - - osd_scenario == "lvm" - ceph_volume_present.rc == 0 - name: zap and destroy osds created by ceph-volume with devices @@ -336,7 +335,6 @@ with_items: "{{ devices | default([]) }}" when: - devices | default([]) | length > 0 - - osd_scenario == "lvm" - ceph_volume_present.rc == 0 - name: get ceph block partitions diff --git a/infrastructure-playbooks/purge-docker-cluster.yml b/infrastructure-playbooks/purge-docker-cluster.yml index 04d0587c0..6dc0ab4d9 100644 --- a/infrastructure-playbooks/purge-docker-cluster.yml +++ b/infrastructure-playbooks/purge-docker-cluster.yml @@ -344,142 +344,33 @@ register: remove_osd_mountpoints ignore_errors: true - - name: for ceph-disk based deployment - block: - - name: get prepare container - command: "docker ps -a -q --filter='name=ceph-osd-prepare'" - register: prepare_containers - ignore_errors: true - - - name: remove ceph osd prepare container - command: "docker rm -f {{ item }}" - with_items: "{{ prepare_containers.stdout_lines }}" - ignore_errors: true - - # NOTE(leseb): hope someone will find a more elegant way one day... - - name: see if encrypted partitions are present - shell: | - blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2 - register: encrypted_ceph_partuuid - - - name: get ceph data partitions - command: | - blkid -o device -t PARTLABEL="ceph data" - failed_when: false - register: ceph_data_partition_to_erase_path - - - name: get ceph lockbox partitions - command: | - blkid -o device -t PARTLABEL="ceph lockbox" - failed_when: false - register: ceph_lockbox_partition_to_erase_path - - - name: get ceph block partitions - command: | - blkid -o device -t PARTLABEL="ceph block" - failed_when: false - register: ceph_block_partition_to_erase_path - - - name: get ceph journal partitions - command: | - blkid -o device -t PARTLABEL="ceph journal" - failed_when: false - register: ceph_journal_partition_to_erase_path - - - name: get ceph db partitions - command: | - blkid -o device -t PARTLABEL="ceph block.db" - failed_when: false - register: ceph_db_partition_to_erase_path - - - name: get ceph wal partitions - command: | - blkid -o device -t PARTLABEL="ceph block.wal" - failed_when: false - register: ceph_wal_partition_to_erase_path - - - name: set_fact combined_devices_list - set_fact: - combined_devices_list: "{{ ceph_data_partition_to_erase_path.get('stdout_lines', []) + - ceph_lockbox_partition_to_erase_path.get('stdout_lines', []) + - ceph_block_partition_to_erase_path.get('stdout_lines', []) + - ceph_journal_partition_to_erase_path.get('stdout_lines', []) + - ceph_db_partition_to_erase_path.get('stdout_lines', []) + - ceph_wal_partition_to_erase_path.get('stdout_lines', []) }}" - - - name: resolve parent device - command: lsblk --nodeps -no pkname "{{ item }}" - register: tmp_resolved_parent_device - with_items: - - "{{ combined_devices_list }}" - - - name: set_fact resolved_parent_device - set_fact: - resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}" - - - name: zap ceph osd disks - shell: | - docker run --rm \ - --privileged=true \ - --name ceph-osd-zap-{{ ansible_hostname }}-{{ item }} \ - -v /dev/:/dev/ \ - -e OSD_DEVICE=/dev/{{ item }} \ - {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ - zap_device - with_items: - - "{{ resolved_parent_device }}" - - - name: wait until the zap containers die - shell: | - docker ps | grep -sq ceph-osd-zap-{{ ansible_hostname }} - register: zap_alive - failed_when: false - until: zap_alive.rc != 0 - retries: 5 - delay: 10 - - - name: remove ceph osd zap disk container - docker_container: - image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" - name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item }}" - state: absent - with_items: - - "{{ resolved_parent_device }}" - when: - - osd_scenario != "lvm" - - - name: for ceph-volume based deployments - block: - - name: zap and destroy osds created by ceph-volume with lvm_volumes - ceph_volume: - data: "{{ item.data }}" - data_vg: "{{ item.data_vg|default(omit) }}" - journal: "{{ item.journal|default(omit) }}" - journal_vg: "{{ item.journal_vg|default(omit) }}" - db: "{{ item.db|default(omit) }}" - db_vg: "{{ item.db_vg|default(omit) }}" - wal: "{{ item.wal|default(omit) }}" - wal_vg: "{{ item.wal_vg|default(omit) }}" - action: "zap" - environment: - CEPH_VOLUME_DEBUG: 1 - CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" - CEPH_CONTAINER_BINARY: "{{ container_binary }}" - with_items: "{{ lvm_volumes }}" - when: lvm_volumes | default([]) | length > 0 - - - name: zap and destroy osds created by ceph-volume with devices - ceph_volume: - data: "{{ item }}" - action: "zap" - environment: - CEPH_VOLUME_DEBUG: 1 - CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" - CEPH_CONTAINER_BINARY: "{{ container_binary }}" - with_items: "{{ devices | default([]) }}" - when: devices | default([]) | length > 0 - when: - - osd_scenario == "lvm" + - name: zap and destroy osds created by ceph-volume with lvm_volumes + ceph_volume: + data: "{{ item.data }}" + data_vg: "{{ item.data_vg|default(omit) }}" + journal: "{{ item.journal|default(omit) }}" + journal_vg: "{{ item.journal_vg|default(omit) }}" + db: "{{ item.db|default(omit) }}" + db_vg: "{{ item.db_vg|default(omit) }}" + wal: "{{ item.wal|default(omit) }}" + wal_vg: "{{ item.wal_vg|default(omit) }}" + action: "zap" + environment: + CEPH_VOLUME_DEBUG: 1 + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + with_items: "{{ lvm_volumes }}" + when: lvm_volumes | default([]) | length > 0 + - name: zap and destroy osds created by ceph-volume with devices + ceph_volume: + data: "{{ item }}" + action: "zap" + environment: + CEPH_VOLUME_DEBUG: 1 + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + with_items: "{{ devices | default([]) }}" + when: devices | default([]) | length > 0 - name: remove ceph osd service file: diff --git a/infrastructure-playbooks/vars/lv_vars.yaml.sample b/infrastructure-playbooks/vars/lv_vars.yaml.sample index ba618a10a..790d277ae 100644 --- a/infrastructure-playbooks/vars/lv_vars.yaml.sample +++ b/infrastructure-playbooks/vars/lv_vars.yaml.sample @@ -1,6 +1,6 @@ # This file configures logical volume creation for FS Journals on NVMe, a NVMe based bucket index, and HDD based OSDs. # This playbook configures one NVMe device at a time. If your OSD systems contain multiple NVMe devices, you will need to edit the key variables ("nvme_device", "hdd_devices") for each run. -# It is meant to be used when osd_objectstore=filestore and it outputs the necessary input for group_vars/osds.yml when configured with osd_scenario=lvm. +# It is meant to be used when osd_objectstore=filestore and it outputs the necessary input for group_vars/osds.yml. # The LVs for journals are created first then the LVs for data. All LVs for journals correspond to a LV for data. # ## CHANGE THESE VARS ## diff --git a/roles/ceph-config/tasks/main.yml b/roles/ceph-config/tasks/main.yml index 2d63cc352..0c14d2d97 100644 --- a/roles/ceph-config/tasks/main.yml +++ b/roles/ceph-config/tasks/main.yml @@ -8,19 +8,11 @@ when: - inventory_hostname in groups.get(osd_group_name, []) block: - - name: count number of osds for ceph-disk scenarios - set_fact: - num_osds: "{{ devices | length | int }}" - when: - - devices | default([]) | length > 0 - - osd_scenario in ['collocated', 'non-collocated'] - - name: count number of osds for lvm scenario set_fact: num_osds: "{{ lvm_volumes | length | int }}" when: - lvm_volumes | default([]) | length > 0 - - osd_scenario == 'lvm' - name: run 'ceph-volume lvm batch --report' to see how many osds are to be created ceph_volume: @@ -40,14 +32,12 @@ PYTHONIOENCODING: utf-8 when: - devices | default([]) | length > 0 - - osd_scenario == 'lvm' - name: set_fact num_osds from the output of 'ceph-volume lvm batch --report' set_fact: num_osds: "{{ (lvm_batch_report.stdout | from_json).osds | length | int }}" when: - devices | default([]) | length > 0 - - osd_scenario == 'lvm' - (lvm_batch_report.stdout | from_json).changed - name: run 'ceph-volume lvm list' to see how many osds have already been created @@ -61,7 +51,6 @@ PYTHONIOENCODING: utf-8 when: - devices | default([]) | length > 0 - - osd_scenario == 'lvm' - not (lvm_batch_report.stdout | from_json).changed - name: set_fact num_osds from the output of 'ceph-volume lvm list' @@ -69,7 +58,6 @@ num_osds: "{{ lvm_list.stdout | from_json | length | int }}" when: - devices | default([]) | length > 0 - - osd_scenario == 'lvm' - not (lvm_batch_report.stdout | from_json).changed # ceph-common diff --git a/roles/ceph-handler/templates/restart_osd_daemon.sh.j2 b/roles/ceph-handler/templates/restart_osd_daemon.sh.j2 index 76ed0d5ea..6bddbf74d 100644 --- a/roles/ceph-handler/templates/restart_osd_daemon.sh.j2 +++ b/roles/ceph-handler/templates/restart_osd_daemon.sh.j2 @@ -66,13 +66,7 @@ for unit in $(systemctl list-units | grep -E "loaded * active" | grep -oE "ceph- # We need to wait because it may take some time for the socket to actually exists COUNT=10 # Wait and ensure the socket exists after restarting the daemon - {% if containerized_deployment and osd_scenario != 'lvm' -%} - id=$(get_dev_name "$unit") - container_id=$(get_container_id_from_dev_name "$id") - wait_for_socket_in_container "$container_id" - osd_id=$whoami - container_exec="{{ container_binary }} exec $container_id" - {% elif containerized_deployment and osd_scenario == 'lvm' %} + {% if containerized_deployment %} osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+') container_id=$(get_container_id_from_dev_name "ceph-osd-${osd_id}") container_exec="{{ container_binary }} exec $container_id" diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index 9d603c265..545685c6f 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -44,20 +44,13 @@ devices: [] osd_auto_discovery: false # Encrypt your OSD device using dmcrypt -# If set to True, no matter which osd_objecstore and osd_scenario you use the data will be encrypted +# If set to True, no matter which osd_objecstore you use the data will be encrypted dmcrypt: False -osd_scenario: lvm -valid_osd_scenarios: - - lvm - - dedicated_devices: [] -# III. Use ceph-volume to create OSDs from logical volumes. -# Use 'osd_scenario: lvm' to enable this scenario. -# when using lvm, not collocated journals. +# Use ceph-volume to create OSDs from logical volumes. # lvm_volumes is a list of dictionaries. # # Filestore: Each dictionary must contain a data, journal and vg_name key. Any diff --git a/roles/ceph-osd/tasks/build_devices.yml b/roles/ceph-osd/tasks/build_devices.yml deleted file mode 100644 index 350e085d3..000000000 --- a/roles/ceph-osd/tasks/build_devices.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: resolve dedicated device link(s) - command: readlink -f {{ item }} - changed_when: false - with_items: "{{ dedicated_devices }}" - register: dedicated_devices_prepare_canonicalize - when: - - osd_scenario == 'non-collocated' - - not osd_auto_discovery - -- name: set_fact build dedicated_devices from resolved symlinks - set_fact: - dedicated_devices_tmp: "{{ dedicated_devices_tmp | default([]) + [ item.stdout ] }}" - with_items: "{{ dedicated_devices_prepare_canonicalize.results }}" - when: - - osd_scenario == 'non-collocated' - - not osd_auto_discovery - -- name: set_fact build final dedicated_devices list - set_fact: - dedicated_devices: "{{ dedicated_devices_tmp | reject('search','/dev/disk') | list }}" - when: - - osd_scenario == 'non-collocated' - - not osd_auto_discovery diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index 103542440..32b707584 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -29,7 +29,6 @@ register: result until: result is succeeded when: - - osd_scenario == 'lvm' - not is_atomic tags: - with_pkg @@ -40,9 +39,6 @@ - name: include container_options_facts.yml include_tasks: container_options_facts.yml -- name: include build_devices.yml - include_tasks: build_devices.yml - - name: read information about the devices parted: device: "{{ item }}" @@ -53,7 +49,6 @@ - name: include_tasks scenarios/lvm.yml include_tasks: scenarios/lvm.yml when: - - osd_scenario == 'lvm' - lvm_volumes|length > 0 # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False @@ -61,17 +56,10 @@ - name: include_tasks scenarios/lvm-batch.yml include_tasks: scenarios/lvm-batch.yml when: - - osd_scenario == 'lvm' - devices|length > 0 # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False -- name: include_tasks activate_osds.yml - include_tasks: activate_osds.yml - when: - - not containerized_deployment - - osd_scenario != 'lvm' - - name: include_tasks start_osds.yml include_tasks: start_osds.yml diff --git a/roles/ceph-osd/tasks/start_osds.yml b/roles/ceph-osd/tasks/start_osds.yml index ede90d654..27b357b7f 100644 --- a/roles/ceph-osd/tasks/start_osds.yml +++ b/roles/ceph-osd/tasks/start_osds.yml @@ -12,22 +12,6 @@ when: - ceph_docker_on_openstack - - name: test if the container image has directory {{ container_bin_path }} - command: "{{ container_binary }} run --rm --net=host --entrypoint=test {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} -d {{ container_bin_path }}" - changed_when: false - failed_when: false - register: test_container_bin_path - when: - - osd_scenario != 'lvm' - - - name: test if the container image has the disk_list function - command: "{{ container_binary }} run --rm --net=host --entrypoint=stat {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} {{ container_bin_path + '/disk_list.sh' if test_container_bin_path.rc == 0 else 'disk_list.sh' }}" - changed_when: false - failed_when: false - register: disk_list - when: - - osd_scenario != 'lvm' - - name: generate ceph osd docker run script become: true template: @@ -72,12 +56,12 @@ - name: systemd start osd systemd: - name: ceph-osd@{{ item | regex_replace('/dev/', '') if osd_scenario != 'lvm' and containerized_deployment else item }} + name: ceph-osd@{{ item }} state: started enabled: yes masked: no daemon_reload: yes - with_items: "{{ devices if osd_scenario != 'lvm' and containerized_deployment else ((ceph_osd_ids.stdout | from_json).keys() | list) if osd_scenario == 'lvm' and not containerized_deployment else osd_ids_non_container.stdout_lines }}" + with_items: "{{ ((ceph_osd_ids.stdout | from_json).keys() | list) if not containerized_deployment else osd_ids_non_container.stdout_lines }}" - name: ensure systemd service override directory exists file: diff --git a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 index 79779b354..02c2d1f7c 100644 --- a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 +++ b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 @@ -2,70 +2,6 @@ # {{ ansible_managed }} -{% if osd_scenario != 'lvm' -%} -{% if disk_list.get('rc') == 0 -%} -############# -# VARIABLES # -############# -DOCKER_ENV="" - -############# -# FUNCTIONS # -############# -function expose_partitions () { -DOCKER_ENV=$({{ container_binary }} run --rm --net=host --name expose_partitions_${1} --privileged=true -v /dev/:/dev/ -v /etc/ceph:/etc/ceph:z -e CLUSTER={{ cluster }} -e OSD_DEVICE=/dev/${1} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} disk_list) -} -{% else -%} -# NOTE(leseb): maintains backwards compatibility with old ceph-docker Jewel images -# Jewel images prior to https://github.com/ceph/ceph-docker/pull/797 -REGEX="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" -function expose_partitions { - if {{ container_binary }} ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}; then - if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then - {{ container_binary }} logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log - fi - fi - if {{ container_binary }} ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-${1}; then - if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log ]]; then - {{ container_binary }} logs ceph-osd-prepare-{{ ansible_hostname }}-${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log - fi - fi - if [[ -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then - part=$(grep "Journal is GPT partition" {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq) - DOCKER_ENV="-e OSD_JOURNAL=$part" - fi - if [[ -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log ]]; then - part=$(grep "Journal is GPT partition" {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq) - DOCKER_ENV="-e OSD_JOURNAL=$part" - fi - if [[ -z $DOCKER_ENV ]]; then - # NOTE(leseb): if we arrive here this probably means we just switched from non-containers to containers. - # This is tricky as we don't have any info on the type of OSD, this is 'only' a problem for non-collocated scenarios - # We can't assume that the 'ceph' is still present so calling Docker exec instead - part=$({{ container_binary }} run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} list /dev/${1} | awk '/journal / {print $1}') - DOCKER_ENV="-e OSD_JOURNAL=$part" - fi - # if empty, the previous command didn't find anything so we fail - if [[ -z $DOCKER_ENV ]]; then - echo "ERROR: could not discover ceph partitions" - exit 1 - fi -} - -{% endif -%} - -expose_partitions "$1" - -# discover osd_objectstore for ceph-disk based osds -if [[ $DOCKER_ENV =~ "BLUESTORE" ]]; then - DOCKER_ENV="$DOCKER_ENV -e OSD_BLUESTORE=1" -elif [[ $DOCKER_ENV =~ "JOURNAL" ]]; then - DOCKER_ENV="$DOCKER_ENV -e OSD_FILESTORE=1" -fi - -{% endif -%} - - ######## # MAIN # ######## @@ -112,17 +48,9 @@ numactl \ {% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%} -e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \ {% endif -%} - {% if osd_scenario == 'lvm' -%} -v /run/lvm/:/run/lvm/ \ -e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \ -e OSD_ID="$1" \ --name=ceph-osd-"$1" \ - {% else -%} - $DOCKER_ENV \ - -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \ - -e OSD_DEVICE=/dev/"${1}" \ - -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ - --name=ceph-osd-{{ ansible_hostname }}-"${1}" \ - {% endif -%} {{ ceph_osd_docker_extra_env }} \ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} diff --git a/roles/ceph-osd/templates/ceph-osd.service.j2 b/roles/ceph-osd/templates/ceph-osd.service.j2 index 31e117c68..ea67df29b 100644 --- a/roles/ceph-osd/templates/ceph-osd.service.j2 +++ b/roles/ceph-osd/templates/ceph-osd.service.j2 @@ -7,19 +7,10 @@ After=docker.service [Service] EnvironmentFile=-/etc/environment -{% if osd_scenario == 'lvm' -%} ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-%i ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-%i -{% else %} -ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-{{ ansible_hostname }}-%i -ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-{{ ansible_hostname }}-%i -{% endif -%} ExecStart={{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh %i -{% if osd_scenario == 'lvm' -%} ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-%i -{% else %} -ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-{{ ansible_hostname }}-%i -{% endif -%} Restart=always RestartSec=10s TimeoutStartSec=120 diff --git a/roles/ceph-validate/tasks/check_system.yml b/roles/ceph-validate/tasks/check_system.yml index e184aa4cc..2bb55cc0b 100644 --- a/roles/ceph-validate/tasks/check_system.yml +++ b/roles/ceph-validate/tasks/check_system.yml @@ -95,13 +95,3 @@ when: - iscsi_gw_group_name in group_names -- name: warn users that ceph-disk scenarios will be removed on 3.3 - debug: - msg: | - osd_scenario is set to {{ osd_scenario }}, this variable is not used anymore and defaults to 'lvm'. - If you have something different than 'lvm', this means you want ceph-ansible to manage your ceph-disk OSDs. - So basically, ceph-ansible can still start your ceph-disk osd services - run_once: true - when: - - osd_group_name in group_names - - osd_scenario != 'lvm' diff --git a/tests/conftest.py b/tests/conftest.py index 0be84c96a..3e2cee0ad 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -97,9 +97,7 @@ def node(host, request): rolling_update = os.environ.get("ROLLING_UPDATE", "False") group_names = ansible_vars["group_names"] docker = ansible_vars.get("docker") - osd_scenario = ansible_vars.get("osd_scenario") radosgw_num_instances = ansible_vars.get("radosgw_num_instances", 1) - lvm_scenario = osd_scenario in ['lvm', 'lvm-batch'] ceph_release_num = { 'jewel': 10, 'kraken': 11, @@ -123,12 +121,6 @@ def node(host, request): request.function, group_names) pytest.skip(reason) - if request.node.get_closest_marker("no_lvm_scenario") and lvm_scenario: - pytest.skip("Not a valid test for lvm scenarios") - - if not lvm_scenario and request.node.get_closest_marker("lvm_scenario"): - pytest.skip("Not a valid test for non-lvm scenarios") - if request.node.get_closest_marker("no_docker") and docker: pytest.skip( "Not a valid test for containerized deployments or atomic hosts") @@ -137,11 +129,6 @@ def node(host, request): pytest.skip( "Not a valid test for non-containerized deployments or atomic hosts") # noqa E501 - journal_collocation_test = ansible_vars.get("osd_scenario") == "collocated" - if request.node.get_closest_marker("journal_collocation") and not journal_collocation_test: # noqa E501 - pytest.skip("Scenario is not using journal collocation") - - data = dict( vars=ansible_vars, diff --git a/tests/functional/add-mdss/container/group_vars/all b/tests/functional/add-mdss/container/group_vars/all index fdd8ac490..389bf9e14 100644 --- a/tests/functional/add-mdss/container/group_vars/all +++ b/tests/functional/add-mdss/container/group_vars/all @@ -10,7 +10,6 @@ monitor_interface: eth1 radosgw_interface: eth1 journal_size: 100 osd_objectstore: "bluestore" -osd_scenario: lvm copy_admin_key: true # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb lvm_volumes: diff --git a/tests/functional/add-mdss/group_vars/all b/tests/functional/add-mdss/group_vars/all index 6c0561398..4ce40af30 100644 --- a/tests/functional/add-mdss/group_vars/all +++ b/tests/functional/add-mdss/group_vars/all @@ -8,7 +8,6 @@ monitor_interface: eth1 radosgw_interface: eth1 journal_size: 100 osd_objectstore: "bluestore" -osd_scenario: lvm copy_admin_key: true # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb lvm_volumes: diff --git a/tests/functional/add-osds/container/group_vars/all b/tests/functional/add-osds/container/group_vars/all index 87a64289c..8c484f9ab 100644 --- a/tests/functional/add-osds/container/group_vars/all +++ b/tests/functional/add-osds/container/group_vars/all @@ -10,7 +10,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }} radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}" journal_size: 100 osd_objectstore: "bluestore" -osd_scenario: lvm copy_admin_key: true # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb lvm_volumes: diff --git a/tests/functional/add-osds/group_vars/all b/tests/functional/add-osds/group_vars/all index 99d4aeb33..7e6c13112 100644 --- a/tests/functional/add-osds/group_vars/all +++ b/tests/functional/add-osds/group_vars/all @@ -8,7 +8,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }} radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}" journal_size: 100 osd_objectstore: "bluestore" -osd_scenario: lvm copy_admin_key: true # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb lvm_volumes: diff --git a/tests/functional/all_daemons/container/group_vars/osds b/tests/functional/all_daemons/container/group_vars/osds index 672a0f956..9cea91d6a 100644 --- a/tests/functional/all_daemons/container/group_vars/osds +++ b/tests/functional/all_daemons/container/group_vars/osds @@ -1,7 +1,6 @@ --- ceph_osd_docker_run_script_path: /var/tmp osd_objectstore: "bluestore" -osd_scenario: lvm lvm_volumes: - data: data-lv1 data_vg: test_group diff --git a/tests/functional/all_daemons/group_vars/osds b/tests/functional/all_daemons/group_vars/osds index e27c47422..3ec1d6e4c 100644 --- a/tests/functional/all_daemons/group_vars/osds +++ b/tests/functional/all_daemons/group_vars/osds @@ -1,7 +1,6 @@ --- os_tuning_params: - { name: fs.file-max, value: 26234859 } -osd_scenario: lvm osd_objectstore: "bluestore" lvm_volumes: - data: data-lv1 diff --git a/tests/functional/bs-lvm-osds/container/group_vars/all b/tests/functional/bs-lvm-osds/container/group_vars/all index 8d41cbb17..490f8e591 100644 --- a/tests/functional/bs-lvm-osds/container/group_vars/all +++ b/tests/functional/bs-lvm-osds/container/group_vars/all @@ -11,7 +11,6 @@ public_network: "192.168.39.0/24" cluster_network: "192.168.40.0/24" monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}" osd_objectstore: "bluestore" -osd_scenario: lvm copy_admin_key: true # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb lvm_volumes: diff --git a/tests/functional/bs-lvm-osds/group_vars/all b/tests/functional/bs-lvm-osds/group_vars/all index eae5a497c..ea2c10f8b 100644 --- a/tests/functional/bs-lvm-osds/group_vars/all +++ b/tests/functional/bs-lvm-osds/group_vars/all @@ -6,7 +6,6 @@ public_network: "192.168.39.0/24" cluster_network: "192.168.40.0/24" monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}" osd_objectstore: "bluestore" -osd_scenario: lvm copy_admin_key: true # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb lvm_volumes: diff --git a/tests/functional/collocation/container/group_vars/osds b/tests/functional/collocation/container/group_vars/osds index 988ec50e2..9cea91d6a 100644 --- a/tests/functional/collocation/container/group_vars/osds +++ b/tests/functional/collocation/container/group_vars/osds @@ -1,6 +1,5 @@ --- ceph_osd_docker_run_script_path: /var/tmp -osd_scenario: lvm osd_objectstore: "bluestore" lvm_volumes: - data: data-lv1 diff --git a/tests/functional/collocation/group_vars/osds b/tests/functional/collocation/group_vars/osds index 988ec50e2..9cea91d6a 100644 --- a/tests/functional/collocation/group_vars/osds +++ b/tests/functional/collocation/group_vars/osds @@ -1,6 +1,5 @@ --- ceph_osd_docker_run_script_path: /var/tmp -osd_scenario: lvm osd_objectstore: "bluestore" lvm_volumes: - data: data-lv1 diff --git a/tests/functional/lvm-auto-discovery/container/group_vars/all b/tests/functional/lvm-auto-discovery/container/group_vars/all index f537a13a1..742d0c45a 100644 --- a/tests/functional/lvm-auto-discovery/container/group_vars/all +++ b/tests/functional/lvm-auto-discovery/container/group_vars/all @@ -15,7 +15,6 @@ radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }} journal_size: 100 osd_objectstore: "bluestore" crush_device_class: test -osd_scenario: lvm copy_admin_key: true osd_auto_discovery: true os_tuning_params: diff --git a/tests/functional/lvm-auto-discovery/group_vars/all b/tests/functional/lvm-auto-discovery/group_vars/all index f2d1c1417..99dc63fe0 100644 --- a/tests/functional/lvm-auto-discovery/group_vars/all +++ b/tests/functional/lvm-auto-discovery/group_vars/all @@ -9,7 +9,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }} radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}" osd_objectstore: "bluestore" crush_device_class: test -osd_scenario: lvm copy_admin_key: true osd_auto_discovery: true os_tuning_params: diff --git a/tests/functional/lvm-batch/container/group_vars/all b/tests/functional/lvm-batch/container/group_vars/all index 81b853c45..5265e2cf1 100644 --- a/tests/functional/lvm-batch/container/group_vars/all +++ b/tests/functional/lvm-batch/container/group_vars/all @@ -15,7 +15,6 @@ radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }} journal_size: 100 osd_objectstore: "bluestore" crush_device_class: test -osd_scenario: lvm copy_admin_key: true devices: - /dev/sdb diff --git a/tests/functional/lvm-batch/group_vars/all b/tests/functional/lvm-batch/group_vars/all index cde0eaa22..fb6611b32 100644 --- a/tests/functional/lvm-batch/group_vars/all +++ b/tests/functional/lvm-batch/group_vars/all @@ -9,7 +9,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }} radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}" osd_objectstore: "bluestore" crush_device_class: test -osd_scenario: lvm copy_admin_key: true devices: - /dev/disk/by-id/ata-QEMU_HARDDISK_QM00002 diff --git a/tests/functional/lvm-osds/container/group_vars/all b/tests/functional/lvm-osds/container/group_vars/all index f080d7db0..83e87a486 100644 --- a/tests/functional/lvm-osds/container/group_vars/all +++ b/tests/functional/lvm-osds/container/group_vars/all @@ -15,7 +15,6 @@ osd_objectstore: "filestore" copy_admin_key: true containerized_deployment: true # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb -osd_scenario: lvm lvm_volumes: - data: data-lv1 journal: /dev/sdc1 diff --git a/tests/functional/lvm-osds/group_vars/all b/tests/functional/lvm-osds/group_vars/all index c23c858b0..a9d14cb94 100644 --- a/tests/functional/lvm-osds/group_vars/all +++ b/tests/functional/lvm-osds/group_vars/all @@ -10,7 +10,6 @@ journal_size: 100 osd_objectstore: "filestore" copy_admin_key: true # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb -osd_scenario: lvm lvm_volumes: - data: data-lv1 journal: /dev/sdc1 diff --git a/tests/functional/lvm_setup.yml b/tests/functional/lvm_setup.yml index 86e1fbee0..250507983 100644 --- a/tests/functional/lvm_setup.yml +++ b/tests/functional/lvm_setup.yml @@ -9,73 +9,60 @@ gather_facts: false become: yes tasks: - - - block: - - name: check if it is atomic host - stat: - path: /run/ostree-booted - register: stat_ostree - tags: - - always - - - name: set_fact is_atomic - set_fact: - is_atomic: '{{ stat_ostree.stat.exists }}' - tags: - - always - - # Some images may not have lvm2 installed - - name: install lvm2 - package: - name: lvm2 - state: present - register: result - until: result is succeeded - when: - - not is_atomic - - - name: create physical volume - command: pvcreate /dev/sdb - failed_when: false - - - name: create volume group - command: vgcreate test_group /dev/sdb - failed_when: false - - - name: create logical volume 1 - command: lvcreate --yes -l 50%FREE -n data-lv1 test_group - failed_when: false - - - name: create logical volume 2 - command: lvcreate --yes -l 50%FREE -n data-lv2 test_group - failed_when: false - - - name: partition /dev/sdc for journals - parted: - device: /dev/sdc - number: 1 - part_start: 0% - part_end: 50% - unit: '%' - label: gpt - state: present - - - name: partition /dev/sdc for journals - parted: - device: /dev/sdc - number: 2 - part_start: 50% - part_end: 100% - unit: '%' - state: present - label: gpt - - - name: create journals vg from /dev/sdc2 - lvg: - vg: journals - pvs: /dev/sdc2 - - - name: create journal1 lv - command: lvcreate --yes -l 100%FREE -n journal1 journals - failed_when: false - when: osd_scenario == 'lvm' + - name: check if it is atomic host + stat: + path: /run/ostree-booted + register: stat_ostree + tags: + - always + - name: set_fact is_atomic + set_fact: + is_atomic: '{{ stat_ostree.stat.exists }}' + tags: + - always + # Some images may not have lvm2 installed + - name: install lvm2 + package: + name: lvm2 + state: present + register: result + until: result is succeeded + when: + - not is_atomic + - name: create physical volume + command: pvcreate /dev/sdb + failed_when: false + - name: create volume group + command: vgcreate test_group /dev/sdb + failed_when: false + - name: create logical volume 1 + command: lvcreate --yes -l 50%FREE -n data-lv1 test_group + failed_when: false + - name: create logical volume 2 + command: lvcreate --yes -l 50%FREE -n data-lv2 test_group + failed_when: false + - name: partition /dev/sdc for journals + parted: + device: /dev/sdc + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + - name: partition /dev/sdc for journals + parted: + device: /dev/sdc + number: 2 + part_start: 50% + part_end: 100% + unit: '%' + state: present + label: gpt + - name: create journals vg from /dev/sdc2 + lvg: + vg: journals + pvs: /dev/sdc2 + - name: create journal1 lv + command: lvcreate --yes -l 100%FREE -n journal1 journals + failed_when: false diff --git a/tests/functional/ooo-collocation/hosts b/tests/functional/ooo-collocation/hosts index ae671a1be..e4321f41c 100644 --- a/tests/functional/ooo-collocation/hosts +++ b/tests/functional/ooo-collocation/hosts @@ -49,7 +49,6 @@ all: - {name: vms, pg_num: 8, rule_name: ''} - {name: volumes, pg_num: 8, rule_name: ''} osd_objectstore: filestore - osd_scenario: collocated ceph_osd_docker_run_script_path: /opt pools: [] public_network: 192.168.95.0/24 diff --git a/tests/functional/ooo_rhel8/group_vars/osds b/tests/functional/ooo_rhel8/group_vars/osds index 672a0f956..9cea91d6a 100644 --- a/tests/functional/ooo_rhel8/group_vars/osds +++ b/tests/functional/ooo_rhel8/group_vars/osds @@ -1,7 +1,6 @@ --- ceph_osd_docker_run_script_path: /var/tmp osd_objectstore: "bluestore" -osd_scenario: lvm lvm_volumes: - data: data-lv1 data_vg: test_group diff --git a/tests/functional/podman/group_vars/osds b/tests/functional/podman/group_vars/osds index 672a0f956..9cea91d6a 100644 --- a/tests/functional/podman/group_vars/osds +++ b/tests/functional/podman/group_vars/osds @@ -1,7 +1,6 @@ --- ceph_osd_docker_run_script_path: /var/tmp osd_objectstore: "bluestore" -osd_scenario: lvm lvm_volumes: - data: data-lv1 data_vg: test_group diff --git a/tests/functional/rgw-multisite/container/group_vars/all b/tests/functional/rgw-multisite/container/group_vars/all index 59346718a..c451e157c 100644 --- a/tests/functional/rgw-multisite/container/group_vars/all +++ b/tests/functional/rgw-multisite/container/group_vars/all @@ -10,7 +10,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }} radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}" journal_size: 100 osd_objectstore: "bluestore" -osd_scenario: lvm copy_admin_key: true # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb lvm_volumes: diff --git a/tests/functional/rgw-multisite/container/secondary/group_vars/all b/tests/functional/rgw-multisite/container/secondary/group_vars/all index af43d618f..2c42970c6 100644 --- a/tests/functional/rgw-multisite/container/secondary/group_vars/all +++ b/tests/functional/rgw-multisite/container/secondary/group_vars/all @@ -10,7 +10,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }} radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}" journal_size: 100 osd_objectstore: "bluestore" -osd_scenario: lvm copy_admin_key: true # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb lvm_volumes: diff --git a/tests/functional/rgw-multisite/group_vars/all b/tests/functional/rgw-multisite/group_vars/all index ab53a5f40..b89bd8206 100644 --- a/tests/functional/rgw-multisite/group_vars/all +++ b/tests/functional/rgw-multisite/group_vars/all @@ -8,7 +8,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }} radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}" journal_size: 100 osd_objectstore: "bluestore" -osd_scenario: lvm copy_admin_key: true # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb lvm_volumes: diff --git a/tests/functional/rgw-multisite/secondary/group_vars/all b/tests/functional/rgw-multisite/secondary/group_vars/all index 70ba003d9..0fb25fb51 100644 --- a/tests/functional/rgw-multisite/secondary/group_vars/all +++ b/tests/functional/rgw-multisite/secondary/group_vars/all @@ -8,7 +8,6 @@ monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }} radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}" journal_size: 100 osd_objectstore: "bluestore" -osd_scenario: lvm copy_admin_key: true # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb lvm_volumes: diff --git a/tests/functional/shrink_mon/container/group_vars/osds b/tests/functional/shrink_mon/container/group_vars/osds index 672a0f956..9cea91d6a 100644 --- a/tests/functional/shrink_mon/container/group_vars/osds +++ b/tests/functional/shrink_mon/container/group_vars/osds @@ -1,7 +1,6 @@ --- ceph_osd_docker_run_script_path: /var/tmp osd_objectstore: "bluestore" -osd_scenario: lvm lvm_volumes: - data: data-lv1 data_vg: test_group diff --git a/tests/functional/shrink_mon/group_vars/osds b/tests/functional/shrink_mon/group_vars/osds index e27c47422..3ec1d6e4c 100644 --- a/tests/functional/shrink_mon/group_vars/osds +++ b/tests/functional/shrink_mon/group_vars/osds @@ -1,7 +1,6 @@ --- os_tuning_params: - { name: fs.file-max, value: 26234859 } -osd_scenario: lvm osd_objectstore: "bluestore" lvm_volumes: - data: data-lv1 diff --git a/tests/functional/shrink_osd/container/group_vars/osds b/tests/functional/shrink_osd/container/group_vars/osds index 7a4bf9276..2558deb1b 100644 --- a/tests/functional/shrink_osd/container/group_vars/osds +++ b/tests/functional/shrink_osd/container/group_vars/osds @@ -2,7 +2,6 @@ ceph_osd_docker_run_script_path: /var/tmp journal_size: 100 osd_objectstore: "filestore" -osd_scenario: lvm lvm_volumes: - data: data-lv1 journal: /dev/sdc1 diff --git a/tests/functional/shrink_osd/group_vars/osds b/tests/functional/shrink_osd/group_vars/osds index d07a317ff..59fd2abf8 100644 --- a/tests/functional/shrink_osd/group_vars/osds +++ b/tests/functional/shrink_osd/group_vars/osds @@ -1,7 +1,6 @@ --- os_tuning_params: - { name: fs.file-max, value: 26234859 } -osd_scenario: lvm journal_size: 100 osd_objectstore: "filestore" lvm_volumes: