diff --git a/infrastructure-playbooks/purge-docker-cluster.yml b/infrastructure-playbooks/purge-docker-cluster.yml index 512860377..3e301bf67 100644 --- a/infrastructure-playbooks/purge-docker-cluster.yml +++ b/infrastructure-playbooks/purge-docker-cluster.yml @@ -270,53 +270,135 @@ tasks: + - name: get all the running osds + shell: | + systemctl list-units | grep "loaded active" | grep -oE "ceph-osd@([0-9]{1,2}|[a-z]+).service" + register: osd_units + - name: disable ceph osd service service: - name: "ceph-osd@{{ item | basename }}" + name: "ceph-osd@{{ item }}" state: stopped enabled: no - with_items: "{{ devices }}" - ignore_errors: true + with_items: "{{ osd_units.stdout_lines }}" - - name: resolve device link - command: readlink -f {{ item }} - changed_when: false - with_items: "{{ devices }}" - register: purge_devices_links - - - name: set_fact devices generate device list when osd_auto_discovery - set_fact: - devices: "{{ devices | default([]) + [ item.stdout ] }}" - with_items: "{{ purge_devices_links.results }}" + - name: get prepare container + command: "docker ps -a -q --filter='name=ceph-osd-prepare'" + register: prepare_containers - name: remove ceph osd prepare container - docker: - image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" - name: "ceph-osd-prepare-{{ ansible_hostname }}{{ item | regex_replace('/dev/', '') }}" + command: "docker rm -f {{ item }}" + with_items: "{{ prepare_containers.stdout_lines }}" + + - name: see if ceph-disk-created data partitions are present + shell: | + ls /dev/disk/by-partlabel | grep -q "ceph.*.data" + failed_when: false + register: ceph_data_partlabels + + - name: see if ceph-disk-created block partitions are present + shell: | + ls /dev/disk/by-partlabel | grep -q "ceph.*block$" + failed_when: false + register: ceph_block_partlabels + + - name: see if ceph-disk-created journal partitions are present + shell: | + ls /dev/disk/by-partlabel | grep -q "ceph.*.journal" + failed_when: false + register: ceph_journal_partlabels + + - name: see if ceph-disk-created block db partitions are present + shell: | + ls /dev/disk/by-partlabel | grep -q "ceph.*.block.db" + failed_when: false + register: ceph_db_partlabels + + - name: see if ceph-disk-created block wal partitions are present + shell: | + ls /dev/disk/by-partlabel | grep -q "ceph.*.block.wal" + failed_when: false + register: ceph_wal_partlabels + + - name: see if ceph-disk-created lockbox partitions are present + shell: | + ls /dev/disk/by-partlabel | grep -q "ceph.*.lockbox" + failed_when: false + register: ceph_lockbox_partlabels + + # NOTE(leseb): hope someone will find a more elegant way one day... + - name: see if encrypted partitions are present + shell: | + blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2 + register: encrypted_ceph_partuuid + + - name: remove osd mountpoint tree + file: + path: /var/lib/ceph/osd/ state: absent - with_items: "{{ devices }}" + register: remove_osd_mountpoints ignore_errors: true - - name: remove ceph osd container - docker: - image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" - name: "ceph-osd-{{ ansible_hostname }}-{{ item | regex_replace('/dev/', '') }}" - state: absent - with_items: "{{ devices }}" - ignore_errors: true + - name: get ceph data partitions + shell: | + blkid | awk -F: '/ceph data/ { print $1 }' + when: ceph_data_partlabels.rc == 0 + failed_when: false + register: ceph_data_partition_to_erase_path + + - name: get ceph lockbox partitions + shell: | + blkid | awk '/ceph lockbox/ { sub (":", "", $1); print $1 }' + when: ceph_lockbox_partlabels.rc == 0 + failed_when: false + register: ceph_lockbox_partition_to_erase_path + + - name: get ceph block partitions + shell: | + blkid | awk '/ceph block"/ { sub (":", "", $1); print $1 }' + when: ceph_block_partlabels.rc == 0 + failed_when: false + register: ceph_block_partition_to_erase_path + + - name: get ceph journal partitions + shell: | + blkid | awk '/ceph journal/ { sub (":", "", $1); print $1 }' + when: ceph_journal_partlabels.rc == 0 + failed_when: false + register: ceph_journal_partition_to_erase_path + + - name: get ceph db partitions + shell: | + blkid | awk '/ceph block.db/ { sub (":", "", $1); print $1 }' + when: ceph_db_partlabels.rc == 0 + failed_when: false + register: ceph_db_partition_to_erase_path + + - name: get ceph wal partitions + shell: | + blkid | awk '/ceph block.wal/ { sub (":", "", $1); print $1 }' + when: ceph_wal_partlabels.rc == 0 + failed_when: false + register: ceph_wal_partition_to_erase_path - name: zap ceph osd disks shell: | - docker run \ + docker run --rm \ --privileged=true \ - --name ceph-osd-zap-{{ ansible_hostname }}-{{ item | regex_replace('/dev/', '') }} \ + --name ceph-osd-zap-{{ ansible_hostname }}-{{ item[:-1] | regex_replace('/dev/', '') }} \ -v /dev/:/dev/ \ - -e OSD_DEVICE={{ item }} \ + -e OSD_DEVICE={{ item[:-1] }} \ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ zap_device with_items: - - "{{ devices }}" - - "{{ dedicated_devices|default([]) }}" + - "{{ ceph_data_partition_to_erase_path.stdout_lines | default([]) }}" + - "{{ ceph_lockbox_partition_to_erase_path.stdout_lines | default([]) }}" + - "{{ ceph_block_partition_to_erase_path.stdout_lines | default([]) }}" + - "{{ ceph_journal_partition_to_erase_path.stdout_lines | default([]) }}" + - "{{ ceph_db_partition_to_erase_path.stdout_lines | default([]) }}" + - "{{ ceph_wal_partition_to_erase_path.stdout_lines | default([]) }}" + when: + - (ceph_data_partlabels.rc == 0 or ceph_block_partlabels.rc == 0 or ceph_journal_partlabels.rc == 0 or ceph_db_partlabels.rc == 0 or ceph_wal_partlabels.rc == 0) - name: wait until the zap containers die shell: | @@ -330,11 +412,15 @@ - name: remove ceph osd zap disk container docker: image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" - name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item | regex_replace('/dev/', '') }}" + name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item[:-1] | regex_replace('/dev/', '') }}" state: absent with_items: - - "{{ devices }}" - - "{{ dedicated_devices|default([]) }}" + - "{{ ceph_data_partition_to_erase_path.stdout_lines | default([]) }}" + - "{{ ceph_lockbox_partition_to_erase_path.stdout_lines | default([]) }}" + - "{{ ceph_block_partition_to_erase_path.stdout_lines | default([]) }}" + - "{{ ceph_journal_partition_to_erase_path.stdout_lines | default([]) }}" + - "{{ ceph_db_partition_to_erase_path.stdout_lines | default([]) }}" + - "{{ ceph_wal_partition_to_erase_path.stdout_lines | default([]) }}" - name: remove ceph osd service file: diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index c08dd98c5..6690108db 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -147,7 +147,21 @@ when: - mon_host_count | int == 1 - - name: waiting for the monitor to join the quorum... + - name: get num_pgs - non container + command: ceph --cluster "{{ cluster }}" -s --format json + register: ceph_pgs + delegate_to: "{{ mon_host }}" + when: + - not containerized_deployment + + - name: get num_pgs - container + command: docker exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }} ceph --cluster "{{ cluster }}" -s --format json + register: ceph_pgs + delegate_to: "{{ mon_host }}" + when: + - containerized_deployment + + - name: non container | waiting for the monitor to join the quorum... command: ceph --cluster "{{ cluster }}" -s --format json register: ceph_health_raw until: > @@ -157,8 +171,9 @@ delegate_to: "{{ mon_host }}" when: - not containerized_deployment + - (ceph_pgs.stdout | from_json).pgmap.num_pgs != "0" - - name: waiting for the containerized monitor to join the quorum... + - name: container | waiting for the containerized monitor to join the quorum... command: docker exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }} ceph --cluster "{{ cluster }}" -s --format json register: ceph_health_raw until: > @@ -168,6 +183,7 @@ delegate_to: "{{ mon_host }}" when: - containerized_deployment + - (ceph_pgs.stdout | from_json).pgmap.num_pgs != "0" - name: set osd flags command: ceph osd set {{ item }} --cluster {{ cluster }} diff --git a/infrastructure-playbooks/shrink-osd.yml b/infrastructure-playbooks/shrink-osd.yml index 842c79cfd..6e8b29c4f 100644 --- a/infrastructure-playbooks/shrink-osd.yml +++ b/infrastructure-playbooks/shrink-osd.yml @@ -91,13 +91,16 @@ with_items: "{{ osd_hosts }}" delegate_to: "{{ item }}" failed_when: false + when: + - not containerized_deployment - name: fail when admin key is not present fail: msg: "The Ceph admin key is not present on the OSD node, please add it and remove it after the playbook is done." with_items: "{{ ceph_admin_key.results }}" when: - - item.stat.exists == false + - not containerized_deployment + - item.stat.exists == false # NOTE(leseb): using '>' is the only way I could have the command working - name: find osd device based on the id @@ -127,8 +130,6 @@ - name: deactivating osd(s) command: ceph-disk deactivate --cluster {{ cluster }} --deactivate-by-id {{ item.0 }} --mark-out - register: deactivate - ignore_errors: yes run_once: true with_together: - "{{ osd_to_kill.split(',') }}" @@ -137,21 +138,8 @@ when: - not containerized_deployment - - name: set osd(s) out when ceph-disk deactivating fail - command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd out osd.{{ item.0 }}" - delegate_to: "{{ groups[mon_group_name][0] }}" - with_together: - - "{{ osd_to_kill.split(',') }}" - - "{{ deactivate.results }}" - when: - - not containerized_deployment - - not item.1.get("skipped") - - item.1.stderr|length > 0 - - name: destroying osd(s) command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.0 }} --zap - register: destroy - ignore_errors: yes run_once: true with_together: - "{{ osd_to_kill.split(',') }}" @@ -161,32 +149,20 @@ - not containerized_deployment - name: remove osd(s) from crush_map when ceph-disk destroy fail - command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd crush remove osd.{{ item.0 }}" + command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd crush remove osd.{{ item }}" run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" - with_together: - - "{{ osd_to_kill.split(',') }}" - - "{{ destroy.results }}" - when: - - (item.1.get("skipped") or item.1.stderr|length > 0) + with_items: "{{ osd_to_kill.split(',') }}" - name: delete osd(s) auth key when ceph-disk destroy fail - command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} auth del osd.{{ item.0 }}" + command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} auth del osd.{{ item }}" delegate_to: "{{ groups[mon_group_name][0] }}" - with_together: - - "{{ osd_to_kill.split(',') }}" - - "{{ destroy.results }}" - when: - - (item.1.get("skipped") or item.1.stderr|length > 0) + with_items: "{{ osd_to_kill.split(',') }}" - name: deallocate osd(s) id when ceph-disk destroy fail - command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd rm {{ item.0 }}" + command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd rm {{ item }}" delegate_to: "{{ groups[mon_group_name][0] }}" - with_together: - - "{{ osd_to_kill.split(',') }}" - - "{{ destroy.results }}" - when: - - (item.1.get("skipped") or item.1.stderr|length > 0) + with_items: "{{ osd_to_kill.split(',') }}" - name: show ceph health command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s" diff --git a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml index a924d5eed..e0e7b2ac8 100644 --- a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml +++ b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml @@ -280,14 +280,16 @@ - ceph-osd post_tasks: - - name: waiting for clean pgs... - shell: | - test "[""$(docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$(docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"]')" - register: result - until: result.rc == 0 + - name: container - waiting for clean pgs... + command: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s --format json" + register: ceph_health_post + until: > + ((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) == 1 + and + (ceph_health_post.stdout | from_json).pgmap.pgs_by_state.0.state_name == "active+clean" + delegate_to: "{{ groups[mon_group_name][0] }}" retries: "{{ health_osd_check_retries }}" delay: "{{ health_osd_check_delay }}" - delegate_to: "{{ groups[mon_group_name][0] }}" - name: switching from non-containerized to containerized ceph mds diff --git a/roles/ceph-common/tasks/installs/prerequisite_rhcs_iso_install_debian.yml b/roles/ceph-common/tasks/installs/prerequisite_rhcs_iso_install_debian.yml index 1d35ab281..32370f8e3 100644 --- a/roles/ceph-common/tasks/installs/prerequisite_rhcs_iso_install_debian.yml +++ b/roles/ceph-common/tasks/installs/prerequisite_rhcs_iso_install_debian.yml @@ -45,13 +45,15 @@ - name: install red hat storage repository key for debian systems apt_key: - file: "{{ ceph_rhcs_repository_path }}/RPM-GPG-KEY-redhat-release" + file: "{{ ceph_rhcs_repository_path }}/MON/release.asc" state: present - name: add red hat storage repository for debian systems - template: - src: "{{ role_path }}/templates/redhat_storage_repo.j2" - dest: /etc/apt/sources.list.d/rh_storage.list - owner: root - group: root - mode: 0644 + apt_repository: + repo: "deb file://{{ ceph_rhcs_repository_path }}/{{ item }} {{ ansible_lsb.codename }} main" + state: present + changed_when: false + with_items: + - MON + - OSD + - Tools diff --git a/roles/ceph-defaults/handlers/main.yml b/roles/ceph-defaults/handlers/main.yml index d3a4629e9..e0e9ca4e4 100644 --- a/roles/ceph-defaults/handlers/main.yml +++ b/roles/ceph-defaults/handlers/main.yml @@ -25,6 +25,9 @@ - mon_group_name in group_names - not containerized_deployment - mon_socket_stat.rc == 0 + with_items: "{{ ansible_play_batch }}" + run_once: true + delegate_to: "{{ item }}" - name: restart ceph mon daemon(s) - container command: /tmp/restart_mon_daemon.sh @@ -33,7 +36,10 @@ # We do not want to run these checks on initial deployment (`socket.rc == 0`) - mon_group_name in group_names - containerized_deployment - - ceph_mon_container_stat.stdout_lines|length != 0 + - ceph_mon_container_stat.get('stdout_lines', [])|length != 0 + with_items: "{{ ansible_play_batch }}" + run_once: true + delegate_to: "{{ item }}" # This does not just restart OSDs but everything else too. Unfortunately # at this time the ansible role does not have an OSD id list to use @@ -63,6 +69,9 @@ - handler_health_osd_check # See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below - inventory_hostname in play_hosts + with_items: "{{ ansible_play_batch }}" + run_once: true + delegate_to: "{{ item }}" - name: restart ceph osds daemon(s) - container command: /tmp/restart_osd_daemon.sh @@ -72,10 +81,13 @@ # except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified - osd_group_name in group_names - containerized_deployment - - ((crush_location is defined and crush_location) or ceph_osd_container_stat.stdout_lines|length != 0) + - ((crush_location is defined and crush_location) or ceph_osd_container_stat.get('stdout_lines', [])|length != 0) - handler_health_osd_check # See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below - inventory_hostname in play_hosts + with_items: "{{ ansible_play_batch }}" + run_once: true + delegate_to: "{{ item }}" - name: copy mds restart script template: @@ -97,6 +109,9 @@ - mds_group_name in group_names - not containerized_deployment - mds_socket_stat.rc == 0 + with_items: "{{ ansible_play_batch }}" + run_once: true + delegate_to: "{{ item }}" - name: restart ceph mds daemon(s) - container command: /tmp/restart_mds_daemon.sh @@ -105,7 +120,10 @@ # We do not want to run these checks on initial deployment (`socket.rc == 0`) - mds_group_name in group_names - containerized_deployment - - ceph_mds_container_stat.stdout_lines|length != 0 + - ceph_mds_container_stat.get('stdout_lines', [])|length != 0 + with_items: "{{ ansible_play_batch }}" + run_once: true + delegate_to: "{{ item }}" - name: copy rgw restart script template: @@ -127,6 +145,9 @@ - rgw_group_name in group_names - not containerized_deployment - rgw_socket_stat.rc == 0 + with_items: "{{ ansible_play_batch }}" + run_once: true + delegate_to: "{{ item }}" - name: restart ceph rgw daemon(s) - container command: /tmp/restart_rgw_daemon.sh @@ -135,7 +156,10 @@ # We do not want to run these checks on initial deployment (`socket.rc == 0`) - rgw_group_name in group_names - containerized_deployment - - ceph_rgw_container_stat.stdout_lines|length != 0 + - ceph_rgw_container_stat.get('stdout_lines', [])|length != 0 + with_items: "{{ ansible_play_batch }}" + run_once: true + delegate_to: "{{ item }}" - name: copy nfs restart script template: @@ -157,6 +181,9 @@ - nfs_group_name in group_names - not containerized_deployment - nfs_socket_stat.rc == 0 + with_items: "{{ ansible_play_batch }}" + run_once: true + delegate_to: "{{ item }}" - name: restart ceph nfs daemon(s) - container command: /tmp/restart_nfs_daemon.sh @@ -165,7 +192,10 @@ # We do not want to run these checks on initial deployment (`socket.rc == 0`) - nfs_group_name in group_names - containerized_deployment - - ceph_nfs_container_stat.stdout_lines|length != 0 + - ceph_nfs_container_stat.get('stdout_lines', [])|length != 0 + with_items: "{{ ansible_play_batch }}" + run_once: true + delegate_to: "{{ item }}" - name: copy rbd mirror restart script template: @@ -187,6 +217,9 @@ - rbdmirror_group_name in group_names - not containerized_deployment - rbd_mirror_socket_stat.rc == 0 + with_items: "{{ ansible_play_batch }}" + run_once: true + delegate_to: "{{ item }}" - name: restart ceph rbd mirror daemon(s) - container command: /tmp/restart_rbd_mirror_daemon.sh @@ -195,7 +228,10 @@ # We do not want to run these checks on initial deployment (`socket.rc == 0`) - rbdmirror_group_name in group_names - containerized_deployment - - ceph_rbd_mirror_container_stat.stdout_lines|length != 0 + - ceph_rbd_mirror_container_stat.get('stdout_lines', [])|length != 0 + with_items: "{{ ansible_play_batch }}" + run_once: true + delegate_to: "{{ item }}" - name: copy mgr restart script template: @@ -217,6 +253,9 @@ - mgr_group_name in group_names - not containerized_deployment - mgr_socket_stat.rc == 0 + with_items: "{{ ansible_play_batch }}" + run_once: true + delegate_to: "{{ item }}" - name: restart ceph mgr daemon(s) - container command: /tmp/restart_mgr_daemon.sh @@ -225,4 +264,7 @@ # We do not want to run these checks on initial deployment (`socket.rc == 0`) - mgr_group_name in group_names - containerized_deployment - - ceph_mgr_container_stat.stdout_lines|length != 0 + - ceph_mgr_container_stat.get('stdout_lines', [])|length != 0 + with_items: "{{ ansible_play_batch }}" + run_once: true + delegate_to: "{{ item }}" diff --git a/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 b/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 index 856464ffe..4ad28c521 100644 --- a/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 +++ b/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 @@ -5,6 +5,10 @@ DELAY="{{ handler_health_osd_check_delay }}" CEPH_CLI="--name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring --cluster {{ cluster }}" check_pgs() { + num_pgs=$($docker_exec ceph $CEPH_CLI -s -f json|python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])') + if [[ "$num_pgs" == "0" ]]; then + return 0 + fi while [ $RETRIES -ne 0 ]; do test "[""$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"]')" RET=$? diff --git a/roles/ceph-osd/tasks/scenarios/collocated.yml b/roles/ceph-osd/tasks/scenarios/collocated.yml index a9b84baa0..4d1a92088 100644 --- a/roles/ceph-osd/tasks/scenarios/collocated.yml +++ b/roles/ceph-osd/tasks/scenarios/collocated.yml @@ -5,6 +5,7 @@ - name: prepare ceph containerized osd disk collocated shell: | docker run --net=host \ + --rm \ --pid=host \ --privileged=true \ --name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.1 | regex_replace('/dev/', '') }} \ @@ -30,6 +31,7 @@ - name: automatic prepare ceph containerized osd disk collocated shell: | docker run --net=host \ + --rm \ --pid=host \ --privileged=true \ --name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.key }} \ diff --git a/roles/ceph-osd/tasks/scenarios/non-collocated.yml b/roles/ceph-osd/tasks/scenarios/non-collocated.yml index 22a8e0e72..b7cce9120 100644 --- a/roles/ceph-osd/tasks/scenarios/non-collocated.yml +++ b/roles/ceph-osd/tasks/scenarios/non-collocated.yml @@ -5,6 +5,7 @@ - name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated shell: | docker run --net=host \ + --rm \ --pid=host \ --privileged=true \ --name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.1 | regex_replace('/dev/', '') }} \ @@ -32,6 +33,7 @@ - name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated with a dedicated device for db and wal shell: | docker run --net=host \ + --rm \ --pid=host \ --privileged=true \ --name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.1 | regex_replace('/dev/', '') }} \ diff --git a/tests/functional/centos/7/bluestore/group_vars/all b/tests/functional/centos/7/bluestore/group_vars/all index 518170988..cf761a178 100644 --- a/tests/functional/centos/7/bluestore/group_vars/all +++ b/tests/functional/centos/7/bluestore/group_vars/all @@ -10,7 +10,7 @@ cluster_network: "192.168.36.0/24" journal_size: 100 osd_objectstore: "bluestore" devices: - - '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0' + - '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001' - '/dev/sdb' dedicated_devices: - '/dev/sdc' diff --git a/tests/functional/centos/7/bs-crypt-ded-jrn/group_vars/all b/tests/functional/centos/7/bs-crypt-ded-jrn/group_vars/all index 4d9eb02b4..1a4e31c3e 100644 --- a/tests/functional/centos/7/bs-crypt-ded-jrn/group_vars/all +++ b/tests/functional/centos/7/bs-crypt-ded-jrn/group_vars/all @@ -11,7 +11,7 @@ osd_scenario: non-collocated dmcrypt: true osd_objectstore: "bluestore" devices: - - '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0' + - '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001' - '/dev/sdb' dedicated_devices: - '/dev/sdc' diff --git a/tests/functional/centos/7/bs-crypt-jrn-col/group_vars/all b/tests/functional/centos/7/bs-crypt-jrn-col/group_vars/all index 3ecc8bd43..2a47dd394 100644 --- a/tests/functional/centos/7/bs-crypt-jrn-col/group_vars/all +++ b/tests/functional/centos/7/bs-crypt-jrn-col/group_vars/all @@ -11,7 +11,7 @@ osd_scenario: collocated dmcrypt: true osd_objectstore: bluestore devices: - - '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0' + - '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001' - '/dev/sdb' os_tuning_params: - { name: kernel.pid_max, value: 4194303 } diff --git a/tests/functional/centos/7/bs-dock-crypt-jrn-col/group_vars/all b/tests/functional/centos/7/bs-dock-crypt-jrn-col/group_vars/all index 2380f8c5a..96be81a58 100644 --- a/tests/functional/centos/7/bs-dock-crypt-jrn-col/group_vars/all +++ b/tests/functional/centos/7/bs-dock-crypt-jrn-col/group_vars/all @@ -11,7 +11,7 @@ osd_scenario: collocated dmcrypt: true osd_objectstore: bluestore devices: - - '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0' + - '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001' - '/dev/sdb' os_tuning_params: - { name: kernel.pid_max, value: 4194303 } diff --git a/tests/functional/centos/7/bs-dock-ded-jrn/group_vars/all b/tests/functional/centos/7/bs-dock-ded-jrn/group_vars/all index 902bb4f93..0b31b8e8b 100644 --- a/tests/functional/centos/7/bs-dock-ded-jrn/group_vars/all +++ b/tests/functional/centos/7/bs-dock-ded-jrn/group_vars/all @@ -16,7 +16,7 @@ ceph_rgw_civetweb_port: 8080 osd_scenario: non-collocated osd_objectstore: bluestore devices: - - '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0' + - '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001' - /dev/sdb dedicated_devices: - /dev/sdc diff --git a/tests/functional/centos/7/bs-jrn-col/group_vars/all b/tests/functional/centos/7/bs-jrn-col/group_vars/all index 29c887a8b..2fb10a77b 100644 --- a/tests/functional/centos/7/bs-jrn-col/group_vars/all +++ b/tests/functional/centos/7/bs-jrn-col/group_vars/all @@ -9,7 +9,7 @@ public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" journal_size: 100 devices: - - '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0' + - '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001' - '/dev/sdb' osd_scenario: collocated osd_objectstore: "bluestore" diff --git a/tests/functional/centos/7/cluster/group_vars/all b/tests/functional/centos/7/cluster/group_vars/all index eb589f77d..8cb595495 100644 --- a/tests/functional/centos/7/cluster/group_vars/all +++ b/tests/functional/centos/7/cluster/group_vars/all @@ -9,7 +9,7 @@ journal_size: 100 radosgw_interface: eth1 osd_objectstore: filestore devices: - - '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0' + - '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001' - '/dev/sdb' dedicated_devices: - '/dev/sdc' diff --git a/tests/functional/centos/7/crypt-ded-jrn/group_vars/all b/tests/functional/centos/7/crypt-ded-jrn/group_vars/all index 6fe0e959c..302d09f7d 100644 --- a/tests/functional/centos/7/crypt-ded-jrn/group_vars/all +++ b/tests/functional/centos/7/crypt-ded-jrn/group_vars/all @@ -11,7 +11,7 @@ osd_scenario: non-collocated dmcrypt: true osd_objectstore: filestore devices: - - '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0' + - '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001' - '/dev/sdb' dedicated_devices: - '/dev/sdc' diff --git a/tests/functional/centos/7/crypt-jrn-col/group_vars/all b/tests/functional/centos/7/crypt-jrn-col/group_vars/all index 36cd697b7..d2c3ec381 100644 --- a/tests/functional/centos/7/crypt-jrn-col/group_vars/all +++ b/tests/functional/centos/7/crypt-jrn-col/group_vars/all @@ -11,7 +11,7 @@ osd_scenario: collocated osd_objectstore: filestore dmcrypt: true devices: - - '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0' + - '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001' - '/dev/sdb' os_tuning_params: - { name: kernel.pid_max, value: 4194303 } diff --git a/tests/functional/centos/7/docker-crypt-jrn-col/group_vars/all b/tests/functional/centos/7/docker-crypt-jrn-col/group_vars/all index 983f1c265..b0093cbea 100644 --- a/tests/functional/centos/7/docker-crypt-jrn-col/group_vars/all +++ b/tests/functional/centos/7/docker-crypt-jrn-col/group_vars/all @@ -17,6 +17,6 @@ osd_objectstore: filestore dmcrypt: true ceph_rgw_civetweb_port: 8080 devices: - - '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0' + - '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001' - /dev/sdb ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 diff --git a/tests/functional/centos/7/docker-ded-jrn/group_vars/all b/tests/functional/centos/7/docker-ded-jrn/group_vars/all index b719d082b..722c9e528 100644 --- a/tests/functional/centos/7/docker-ded-jrn/group_vars/all +++ b/tests/functional/centos/7/docker-ded-jrn/group_vars/all @@ -16,7 +16,7 @@ ceph_rgw_civetweb_port: 8080 osd_objectstore: filestore osd_scenario: non-collocated devices: - - '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0' + - '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001' - /dev/sdb dedicated_devices: - /dev/sdc diff --git a/tests/functional/centos/7/docker/group_vars/all b/tests/functional/centos/7/docker/group_vars/all index 7603b20af..5fe27d989 100644 --- a/tests/functional/centos/7/docker/group_vars/all +++ b/tests/functional/centos/7/docker/group_vars/all @@ -17,7 +17,7 @@ ceph_rgw_civetweb_port: 8080 osd_objectstore: filestore ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 devices: - - '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0' + - '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001' - /dev/sdb ceph_osd_docker_run_script_path: /var/tmp rgw_override_bucket_index_max_shards: 16 diff --git a/tests/functional/centos/7/jrn-col/group_vars/all b/tests/functional/centos/7/jrn-col/group_vars/all index 17340d4ed..392e9c194 100644 --- a/tests/functional/centos/7/jrn-col/group_vars/all +++ b/tests/functional/centos/7/jrn-col/group_vars/all @@ -10,7 +10,7 @@ radosgw_interface: eth1 journal_size: 100 osd_objectstore: filestore devices: - - '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0' + - '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001' - '/dev/sdb' osd_scenario: collocated os_tuning_params: diff --git a/tests/functional/ubuntu/16.04/cluster/group_vars/all b/tests/functional/ubuntu/16.04/cluster/group_vars/all index 9548e2293..d3574a191 100644 --- a/tests/functional/ubuntu/16.04/cluster/group_vars/all +++ b/tests/functional/ubuntu/16.04/cluster/group_vars/all @@ -9,7 +9,7 @@ monitor_interface: eth1 radosgw_interface: eth1 journal_size: 100 devices: - - '/dev/disk/by-path/pci-0000:00:01.1-ata-1' + - '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001' - '/dev/sdb' dedicated_devices: - '/dev/sdc'