mirror of https://github.com/ceph/ceph-ansible.git
Merge pull request #2009 from ceph/fix-clean-pg
[skip ci] handler: do not test if pgs_num = 0pull/2011/head
commit
7054abef99
|
@ -270,53 +270,135 @@
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
|
||||||
|
- name: get all the running osds
|
||||||
|
shell: |
|
||||||
|
systemctl list-units | grep "loaded active" | grep -oE "ceph-osd@([0-9]{1,2}|[a-z]+).service"
|
||||||
|
register: osd_units
|
||||||
|
|
||||||
- name: disable ceph osd service
|
- name: disable ceph osd service
|
||||||
service:
|
service:
|
||||||
name: "ceph-osd@{{ item | basename }}"
|
name: "ceph-osd@{{ item }}"
|
||||||
state: stopped
|
state: stopped
|
||||||
enabled: no
|
enabled: no
|
||||||
with_items: "{{ devices }}"
|
with_items: "{{ osd_units.stdout_lines }}"
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- name: resolve device link
|
- name: get prepare container
|
||||||
command: readlink -f {{ item }}
|
command: "docker ps -a -q --filter='name=ceph-osd-prepare'"
|
||||||
changed_when: false
|
register: prepare_containers
|
||||||
with_items: "{{ devices }}"
|
|
||||||
register: purge_devices_links
|
|
||||||
|
|
||||||
- name: set_fact devices generate device list when osd_auto_discovery
|
|
||||||
set_fact:
|
|
||||||
devices: "{{ devices | default([]) + [ item.stdout ] }}"
|
|
||||||
with_items: "{{ purge_devices_links.results }}"
|
|
||||||
|
|
||||||
- name: remove ceph osd prepare container
|
- name: remove ceph osd prepare container
|
||||||
docker:
|
command: "docker rm -f {{ item }}"
|
||||||
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
with_items: "{{ prepare_containers.stdout_lines }}"
|
||||||
name: "ceph-osd-prepare-{{ ansible_hostname }}{{ item | regex_replace('/dev/', '') }}"
|
|
||||||
|
- name: see if ceph-disk-created data partitions are present
|
||||||
|
shell: |
|
||||||
|
ls /dev/disk/by-partlabel | grep -q "ceph.*.data"
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_data_partlabels
|
||||||
|
|
||||||
|
- name: see if ceph-disk-created block partitions are present
|
||||||
|
shell: |
|
||||||
|
ls /dev/disk/by-partlabel | grep -q "ceph.*block$"
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_block_partlabels
|
||||||
|
|
||||||
|
- name: see if ceph-disk-created journal partitions are present
|
||||||
|
shell: |
|
||||||
|
ls /dev/disk/by-partlabel | grep -q "ceph.*.journal"
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_journal_partlabels
|
||||||
|
|
||||||
|
- name: see if ceph-disk-created block db partitions are present
|
||||||
|
shell: |
|
||||||
|
ls /dev/disk/by-partlabel | grep -q "ceph.*.block.db"
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_db_partlabels
|
||||||
|
|
||||||
|
- name: see if ceph-disk-created block wal partitions are present
|
||||||
|
shell: |
|
||||||
|
ls /dev/disk/by-partlabel | grep -q "ceph.*.block.wal"
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_wal_partlabels
|
||||||
|
|
||||||
|
- name: see if ceph-disk-created lockbox partitions are present
|
||||||
|
shell: |
|
||||||
|
ls /dev/disk/by-partlabel | grep -q "ceph.*.lockbox"
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_lockbox_partlabels
|
||||||
|
|
||||||
|
# NOTE(leseb): hope someone will find a more elegant way one day...
|
||||||
|
- name: see if encrypted partitions are present
|
||||||
|
shell: |
|
||||||
|
blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2
|
||||||
|
register: encrypted_ceph_partuuid
|
||||||
|
|
||||||
|
- name: remove osd mountpoint tree
|
||||||
|
file:
|
||||||
|
path: /var/lib/ceph/osd/
|
||||||
state: absent
|
state: absent
|
||||||
with_items: "{{ devices }}"
|
register: remove_osd_mountpoints
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
- name: remove ceph osd container
|
- name: get ceph data partitions
|
||||||
docker:
|
shell: |
|
||||||
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
blkid | awk -F: '/ceph data/ { print $1 }'
|
||||||
name: "ceph-osd-{{ ansible_hostname }}-{{ item | regex_replace('/dev/', '') }}"
|
when: ceph_data_partlabels.rc == 0
|
||||||
state: absent
|
failed_when: false
|
||||||
with_items: "{{ devices }}"
|
register: ceph_data_partition_to_erase_path
|
||||||
ignore_errors: true
|
|
||||||
|
- name: get ceph lockbox partitions
|
||||||
|
shell: |
|
||||||
|
blkid | awk '/ceph lockbox/ { sub (":", "", $1); print $1 }'
|
||||||
|
when: ceph_lockbox_partlabels.rc == 0
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_lockbox_partition_to_erase_path
|
||||||
|
|
||||||
|
- name: get ceph block partitions
|
||||||
|
shell: |
|
||||||
|
blkid | awk '/ceph block"/ { sub (":", "", $1); print $1 }'
|
||||||
|
when: ceph_block_partlabels.rc == 0
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_block_partition_to_erase_path
|
||||||
|
|
||||||
|
- name: get ceph journal partitions
|
||||||
|
shell: |
|
||||||
|
blkid | awk '/ceph journal/ { sub (":", "", $1); print $1 }'
|
||||||
|
when: ceph_journal_partlabels.rc == 0
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_journal_partition_to_erase_path
|
||||||
|
|
||||||
|
- name: get ceph db partitions
|
||||||
|
shell: |
|
||||||
|
blkid | awk '/ceph block.db/ { sub (":", "", $1); print $1 }'
|
||||||
|
when: ceph_db_partlabels.rc == 0
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_db_partition_to_erase_path
|
||||||
|
|
||||||
|
- name: get ceph wal partitions
|
||||||
|
shell: |
|
||||||
|
blkid | awk '/ceph block.wal/ { sub (":", "", $1); print $1 }'
|
||||||
|
when: ceph_wal_partlabels.rc == 0
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_wal_partition_to_erase_path
|
||||||
|
|
||||||
- name: zap ceph osd disks
|
- name: zap ceph osd disks
|
||||||
shell: |
|
shell: |
|
||||||
docker run \
|
docker run --rm \
|
||||||
--privileged=true \
|
--privileged=true \
|
||||||
--name ceph-osd-zap-{{ ansible_hostname }}-{{ item | regex_replace('/dev/', '') }} \
|
--name ceph-osd-zap-{{ ansible_hostname }}-{{ item[:-1] | regex_replace('/dev/', '') }} \
|
||||||
-v /dev/:/dev/ \
|
-v /dev/:/dev/ \
|
||||||
-e OSD_DEVICE={{ item }} \
|
-e OSD_DEVICE={{ item[:-1] }} \
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
|
||||||
zap_device
|
zap_device
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ devices }}"
|
- "{{ ceph_data_partition_to_erase_path.stdout_lines | default([]) }}"
|
||||||
- "{{ dedicated_devices|default([]) }}"
|
- "{{ ceph_lockbox_partition_to_erase_path.stdout_lines | default([]) }}"
|
||||||
|
- "{{ ceph_block_partition_to_erase_path.stdout_lines | default([]) }}"
|
||||||
|
- "{{ ceph_journal_partition_to_erase_path.stdout_lines | default([]) }}"
|
||||||
|
- "{{ ceph_db_partition_to_erase_path.stdout_lines | default([]) }}"
|
||||||
|
- "{{ ceph_wal_partition_to_erase_path.stdout_lines | default([]) }}"
|
||||||
|
when:
|
||||||
|
- (ceph_data_partlabels.rc == 0 or ceph_block_partlabels.rc == 0 or ceph_journal_partlabels.rc == 0 or ceph_db_partlabels.rc == 0 or ceph_wal_partlabels.rc == 0)
|
||||||
|
|
||||||
- name: wait until the zap containers die
|
- name: wait until the zap containers die
|
||||||
shell: |
|
shell: |
|
||||||
|
@ -330,11 +412,15 @@
|
||||||
- name: remove ceph osd zap disk container
|
- name: remove ceph osd zap disk container
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item | regex_replace('/dev/', '') }}"
|
name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item[:-1] | regex_replace('/dev/', '') }}"
|
||||||
state: absent
|
state: absent
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ devices }}"
|
- "{{ ceph_data_partition_to_erase_path.stdout_lines | default([]) }}"
|
||||||
- "{{ dedicated_devices|default([]) }}"
|
- "{{ ceph_lockbox_partition_to_erase_path.stdout_lines | default([]) }}"
|
||||||
|
- "{{ ceph_block_partition_to_erase_path.stdout_lines | default([]) }}"
|
||||||
|
- "{{ ceph_journal_partition_to_erase_path.stdout_lines | default([]) }}"
|
||||||
|
- "{{ ceph_db_partition_to_erase_path.stdout_lines | default([]) }}"
|
||||||
|
- "{{ ceph_wal_partition_to_erase_path.stdout_lines | default([]) }}"
|
||||||
|
|
||||||
- name: remove ceph osd service
|
- name: remove ceph osd service
|
||||||
file:
|
file:
|
||||||
|
|
|
@ -147,7 +147,21 @@
|
||||||
when:
|
when:
|
||||||
- mon_host_count | int == 1
|
- mon_host_count | int == 1
|
||||||
|
|
||||||
- name: waiting for the monitor to join the quorum...
|
- name: get num_pgs - non container
|
||||||
|
command: ceph --cluster "{{ cluster }}" -s --format json
|
||||||
|
register: ceph_pgs
|
||||||
|
delegate_to: "{{ mon_host }}"
|
||||||
|
when:
|
||||||
|
- not containerized_deployment
|
||||||
|
|
||||||
|
- name: get num_pgs - container
|
||||||
|
command: docker exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }} ceph --cluster "{{ cluster }}" -s --format json
|
||||||
|
register: ceph_pgs
|
||||||
|
delegate_to: "{{ mon_host }}"
|
||||||
|
when:
|
||||||
|
- containerized_deployment
|
||||||
|
|
||||||
|
- name: non container | waiting for the monitor to join the quorum...
|
||||||
command: ceph --cluster "{{ cluster }}" -s --format json
|
command: ceph --cluster "{{ cluster }}" -s --format json
|
||||||
register: ceph_health_raw
|
register: ceph_health_raw
|
||||||
until: >
|
until: >
|
||||||
|
@ -157,8 +171,9 @@
|
||||||
delegate_to: "{{ mon_host }}"
|
delegate_to: "{{ mon_host }}"
|
||||||
when:
|
when:
|
||||||
- not containerized_deployment
|
- not containerized_deployment
|
||||||
|
- (ceph_pgs.stdout | from_json).pgmap.num_pgs != "0"
|
||||||
|
|
||||||
- name: waiting for the containerized monitor to join the quorum...
|
- name: container | waiting for the containerized monitor to join the quorum...
|
||||||
command: docker exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }} ceph --cluster "{{ cluster }}" -s --format json
|
command: docker exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }} ceph --cluster "{{ cluster }}" -s --format json
|
||||||
register: ceph_health_raw
|
register: ceph_health_raw
|
||||||
until: >
|
until: >
|
||||||
|
@ -168,6 +183,7 @@
|
||||||
delegate_to: "{{ mon_host }}"
|
delegate_to: "{{ mon_host }}"
|
||||||
when:
|
when:
|
||||||
- containerized_deployment
|
- containerized_deployment
|
||||||
|
- (ceph_pgs.stdout | from_json).pgmap.num_pgs != "0"
|
||||||
|
|
||||||
- name: set osd flags
|
- name: set osd flags
|
||||||
command: ceph osd set {{ item }} --cluster {{ cluster }}
|
command: ceph osd set {{ item }} --cluster {{ cluster }}
|
||||||
|
|
|
@ -91,13 +91,16 @@
|
||||||
with_items: "{{ osd_hosts }}"
|
with_items: "{{ osd_hosts }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
when:
|
||||||
|
- not containerized_deployment
|
||||||
|
|
||||||
- name: fail when admin key is not present
|
- name: fail when admin key is not present
|
||||||
fail:
|
fail:
|
||||||
msg: "The Ceph admin key is not present on the OSD node, please add it and remove it after the playbook is done."
|
msg: "The Ceph admin key is not present on the OSD node, please add it and remove it after the playbook is done."
|
||||||
with_items: "{{ ceph_admin_key.results }}"
|
with_items: "{{ ceph_admin_key.results }}"
|
||||||
when:
|
when:
|
||||||
- item.stat.exists == false
|
- not containerized_deployment
|
||||||
|
- item.stat.exists == false
|
||||||
|
|
||||||
# NOTE(leseb): using '>' is the only way I could have the command working
|
# NOTE(leseb): using '>' is the only way I could have the command working
|
||||||
- name: find osd device based on the id
|
- name: find osd device based on the id
|
||||||
|
@ -127,8 +130,6 @@
|
||||||
|
|
||||||
- name: deactivating osd(s)
|
- name: deactivating osd(s)
|
||||||
command: ceph-disk deactivate --cluster {{ cluster }} --deactivate-by-id {{ item.0 }} --mark-out
|
command: ceph-disk deactivate --cluster {{ cluster }} --deactivate-by-id {{ item.0 }} --mark-out
|
||||||
register: deactivate
|
|
||||||
ignore_errors: yes
|
|
||||||
run_once: true
|
run_once: true
|
||||||
with_together:
|
with_together:
|
||||||
- "{{ osd_to_kill.split(',') }}"
|
- "{{ osd_to_kill.split(',') }}"
|
||||||
|
@ -137,21 +138,8 @@
|
||||||
when:
|
when:
|
||||||
- not containerized_deployment
|
- not containerized_deployment
|
||||||
|
|
||||||
- name: set osd(s) out when ceph-disk deactivating fail
|
|
||||||
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd out osd.{{ item.0 }}"
|
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
|
||||||
with_together:
|
|
||||||
- "{{ osd_to_kill.split(',') }}"
|
|
||||||
- "{{ deactivate.results }}"
|
|
||||||
when:
|
|
||||||
- not containerized_deployment
|
|
||||||
- not item.1.get("skipped")
|
|
||||||
- item.1.stderr|length > 0
|
|
||||||
|
|
||||||
- name: destroying osd(s)
|
- name: destroying osd(s)
|
||||||
command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.0 }} --zap
|
command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.0 }} --zap
|
||||||
register: destroy
|
|
||||||
ignore_errors: yes
|
|
||||||
run_once: true
|
run_once: true
|
||||||
with_together:
|
with_together:
|
||||||
- "{{ osd_to_kill.split(',') }}"
|
- "{{ osd_to_kill.split(',') }}"
|
||||||
|
@ -161,32 +149,20 @@
|
||||||
- not containerized_deployment
|
- not containerized_deployment
|
||||||
|
|
||||||
- name: remove osd(s) from crush_map when ceph-disk destroy fail
|
- name: remove osd(s) from crush_map when ceph-disk destroy fail
|
||||||
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd crush remove osd.{{ item.0 }}"
|
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd crush remove osd.{{ item }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
with_together:
|
with_items: "{{ osd_to_kill.split(',') }}"
|
||||||
- "{{ osd_to_kill.split(',') }}"
|
|
||||||
- "{{ destroy.results }}"
|
|
||||||
when:
|
|
||||||
- (item.1.get("skipped") or item.1.stderr|length > 0)
|
|
||||||
|
|
||||||
- name: delete osd(s) auth key when ceph-disk destroy fail
|
- name: delete osd(s) auth key when ceph-disk destroy fail
|
||||||
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} auth del osd.{{ item.0 }}"
|
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} auth del osd.{{ item }}"
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
with_together:
|
with_items: "{{ osd_to_kill.split(',') }}"
|
||||||
- "{{ osd_to_kill.split(',') }}"
|
|
||||||
- "{{ destroy.results }}"
|
|
||||||
when:
|
|
||||||
- (item.1.get("skipped") or item.1.stderr|length > 0)
|
|
||||||
|
|
||||||
- name: deallocate osd(s) id when ceph-disk destroy fail
|
- name: deallocate osd(s) id when ceph-disk destroy fail
|
||||||
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd rm {{ item.0 }}"
|
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd rm {{ item }}"
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
with_together:
|
with_items: "{{ osd_to_kill.split(',') }}"
|
||||||
- "{{ osd_to_kill.split(',') }}"
|
|
||||||
- "{{ destroy.results }}"
|
|
||||||
when:
|
|
||||||
- (item.1.get("skipped") or item.1.stderr|length > 0)
|
|
||||||
|
|
||||||
- name: show ceph health
|
- name: show ceph health
|
||||||
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s"
|
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s"
|
||||||
|
|
|
@ -280,14 +280,16 @@
|
||||||
- ceph-osd
|
- ceph-osd
|
||||||
|
|
||||||
post_tasks:
|
post_tasks:
|
||||||
- name: waiting for clean pgs...
|
- name: container - waiting for clean pgs...
|
||||||
shell: |
|
command: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s --format json"
|
||||||
test "[""$(docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$(docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"]')"
|
register: ceph_health_post
|
||||||
register: result
|
until: >
|
||||||
until: result.rc == 0
|
((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) == 1
|
||||||
|
and
|
||||||
|
(ceph_health_post.stdout | from_json).pgmap.pgs_by_state.0.state_name == "active+clean"
|
||||||
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
retries: "{{ health_osd_check_retries }}"
|
retries: "{{ health_osd_check_retries }}"
|
||||||
delay: "{{ health_osd_check_delay }}"
|
delay: "{{ health_osd_check_delay }}"
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
|
||||||
|
|
||||||
|
|
||||||
- name: switching from non-containerized to containerized ceph mds
|
- name: switching from non-containerized to containerized ceph mds
|
||||||
|
|
|
@ -45,13 +45,15 @@
|
||||||
|
|
||||||
- name: install red hat storage repository key for debian systems
|
- name: install red hat storage repository key for debian systems
|
||||||
apt_key:
|
apt_key:
|
||||||
file: "{{ ceph_rhcs_repository_path }}/RPM-GPG-KEY-redhat-release"
|
file: "{{ ceph_rhcs_repository_path }}/MON/release.asc"
|
||||||
state: present
|
state: present
|
||||||
|
|
||||||
- name: add red hat storage repository for debian systems
|
- name: add red hat storage repository for debian systems
|
||||||
template:
|
apt_repository:
|
||||||
src: "{{ role_path }}/templates/redhat_storage_repo.j2"
|
repo: "deb file://{{ ceph_rhcs_repository_path }}/{{ item }} {{ ansible_lsb.codename }} main"
|
||||||
dest: /etc/apt/sources.list.d/rh_storage.list
|
state: present
|
||||||
owner: root
|
changed_when: false
|
||||||
group: root
|
with_items:
|
||||||
mode: 0644
|
- MON
|
||||||
|
- OSD
|
||||||
|
- Tools
|
||||||
|
|
|
@ -25,6 +25,9 @@
|
||||||
- mon_group_name in group_names
|
- mon_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment
|
||||||
- mon_socket_stat.rc == 0
|
- mon_socket_stat.rc == 0
|
||||||
|
with_items: "{{ ansible_play_batch }}"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ item }}"
|
||||||
|
|
||||||
- name: restart ceph mon daemon(s) - container
|
- name: restart ceph mon daemon(s) - container
|
||||||
command: /tmp/restart_mon_daemon.sh
|
command: /tmp/restart_mon_daemon.sh
|
||||||
|
@ -33,7 +36,10 @@
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mon_group_name in group_names
|
- mon_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment
|
||||||
- ceph_mon_container_stat.stdout_lines|length != 0
|
- ceph_mon_container_stat.get('stdout_lines', [])|length != 0
|
||||||
|
with_items: "{{ ansible_play_batch }}"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ item }}"
|
||||||
|
|
||||||
# This does not just restart OSDs but everything else too. Unfortunately
|
# This does not just restart OSDs but everything else too. Unfortunately
|
||||||
# at this time the ansible role does not have an OSD id list to use
|
# at this time the ansible role does not have an OSD id list to use
|
||||||
|
@ -63,6 +69,9 @@
|
||||||
- handler_health_osd_check
|
- handler_health_osd_check
|
||||||
# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
|
# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
|
||||||
- inventory_hostname in play_hosts
|
- inventory_hostname in play_hosts
|
||||||
|
with_items: "{{ ansible_play_batch }}"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ item }}"
|
||||||
|
|
||||||
- name: restart ceph osds daemon(s) - container
|
- name: restart ceph osds daemon(s) - container
|
||||||
command: /tmp/restart_osd_daemon.sh
|
command: /tmp/restart_osd_daemon.sh
|
||||||
|
@ -72,10 +81,13 @@
|
||||||
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
||||||
- osd_group_name in group_names
|
- osd_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment
|
||||||
- ((crush_location is defined and crush_location) or ceph_osd_container_stat.stdout_lines|length != 0)
|
- ((crush_location is defined and crush_location) or ceph_osd_container_stat.get('stdout_lines', [])|length != 0)
|
||||||
- handler_health_osd_check
|
- handler_health_osd_check
|
||||||
# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
|
# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
|
||||||
- inventory_hostname in play_hosts
|
- inventory_hostname in play_hosts
|
||||||
|
with_items: "{{ ansible_play_batch }}"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ item }}"
|
||||||
|
|
||||||
- name: copy mds restart script
|
- name: copy mds restart script
|
||||||
template:
|
template:
|
||||||
|
@ -97,6 +109,9 @@
|
||||||
- mds_group_name in group_names
|
- mds_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment
|
||||||
- mds_socket_stat.rc == 0
|
- mds_socket_stat.rc == 0
|
||||||
|
with_items: "{{ ansible_play_batch }}"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ item }}"
|
||||||
|
|
||||||
- name: restart ceph mds daemon(s) - container
|
- name: restart ceph mds daemon(s) - container
|
||||||
command: /tmp/restart_mds_daemon.sh
|
command: /tmp/restart_mds_daemon.sh
|
||||||
|
@ -105,7 +120,10 @@
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mds_group_name in group_names
|
- mds_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment
|
||||||
- ceph_mds_container_stat.stdout_lines|length != 0
|
- ceph_mds_container_stat.get('stdout_lines', [])|length != 0
|
||||||
|
with_items: "{{ ansible_play_batch }}"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ item }}"
|
||||||
|
|
||||||
- name: copy rgw restart script
|
- name: copy rgw restart script
|
||||||
template:
|
template:
|
||||||
|
@ -127,6 +145,9 @@
|
||||||
- rgw_group_name in group_names
|
- rgw_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment
|
||||||
- rgw_socket_stat.rc == 0
|
- rgw_socket_stat.rc == 0
|
||||||
|
with_items: "{{ ansible_play_batch }}"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ item }}"
|
||||||
|
|
||||||
- name: restart ceph rgw daemon(s) - container
|
- name: restart ceph rgw daemon(s) - container
|
||||||
command: /tmp/restart_rgw_daemon.sh
|
command: /tmp/restart_rgw_daemon.sh
|
||||||
|
@ -135,7 +156,10 @@
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- rgw_group_name in group_names
|
- rgw_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment
|
||||||
- ceph_rgw_container_stat.stdout_lines|length != 0
|
- ceph_rgw_container_stat.get('stdout_lines', [])|length != 0
|
||||||
|
with_items: "{{ ansible_play_batch }}"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ item }}"
|
||||||
|
|
||||||
- name: copy nfs restart script
|
- name: copy nfs restart script
|
||||||
template:
|
template:
|
||||||
|
@ -157,6 +181,9 @@
|
||||||
- nfs_group_name in group_names
|
- nfs_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment
|
||||||
- nfs_socket_stat.rc == 0
|
- nfs_socket_stat.rc == 0
|
||||||
|
with_items: "{{ ansible_play_batch }}"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ item }}"
|
||||||
|
|
||||||
- name: restart ceph nfs daemon(s) - container
|
- name: restart ceph nfs daemon(s) - container
|
||||||
command: /tmp/restart_nfs_daemon.sh
|
command: /tmp/restart_nfs_daemon.sh
|
||||||
|
@ -165,7 +192,10 @@
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- nfs_group_name in group_names
|
- nfs_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment
|
||||||
- ceph_nfs_container_stat.stdout_lines|length != 0
|
- ceph_nfs_container_stat.get('stdout_lines', [])|length != 0
|
||||||
|
with_items: "{{ ansible_play_batch }}"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ item }}"
|
||||||
|
|
||||||
- name: copy rbd mirror restart script
|
- name: copy rbd mirror restart script
|
||||||
template:
|
template:
|
||||||
|
@ -187,6 +217,9 @@
|
||||||
- rbdmirror_group_name in group_names
|
- rbdmirror_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment
|
||||||
- rbd_mirror_socket_stat.rc == 0
|
- rbd_mirror_socket_stat.rc == 0
|
||||||
|
with_items: "{{ ansible_play_batch }}"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ item }}"
|
||||||
|
|
||||||
- name: restart ceph rbd mirror daemon(s) - container
|
- name: restart ceph rbd mirror daemon(s) - container
|
||||||
command: /tmp/restart_rbd_mirror_daemon.sh
|
command: /tmp/restart_rbd_mirror_daemon.sh
|
||||||
|
@ -195,7 +228,10 @@
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- rbdmirror_group_name in group_names
|
- rbdmirror_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment
|
||||||
- ceph_rbd_mirror_container_stat.stdout_lines|length != 0
|
- ceph_rbd_mirror_container_stat.get('stdout_lines', [])|length != 0
|
||||||
|
with_items: "{{ ansible_play_batch }}"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ item }}"
|
||||||
|
|
||||||
- name: copy mgr restart script
|
- name: copy mgr restart script
|
||||||
template:
|
template:
|
||||||
|
@ -217,6 +253,9 @@
|
||||||
- mgr_group_name in group_names
|
- mgr_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment
|
||||||
- mgr_socket_stat.rc == 0
|
- mgr_socket_stat.rc == 0
|
||||||
|
with_items: "{{ ansible_play_batch }}"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ item }}"
|
||||||
|
|
||||||
- name: restart ceph mgr daemon(s) - container
|
- name: restart ceph mgr daemon(s) - container
|
||||||
command: /tmp/restart_mgr_daemon.sh
|
command: /tmp/restart_mgr_daemon.sh
|
||||||
|
@ -225,4 +264,7 @@
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mgr_group_name in group_names
|
- mgr_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment
|
||||||
- ceph_mgr_container_stat.stdout_lines|length != 0
|
- ceph_mgr_container_stat.get('stdout_lines', [])|length != 0
|
||||||
|
with_items: "{{ ansible_play_batch }}"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ item }}"
|
||||||
|
|
|
@ -5,6 +5,10 @@ DELAY="{{ handler_health_osd_check_delay }}"
|
||||||
CEPH_CLI="--name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring --cluster {{ cluster }}"
|
CEPH_CLI="--name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring --cluster {{ cluster }}"
|
||||||
|
|
||||||
check_pgs() {
|
check_pgs() {
|
||||||
|
num_pgs=$($docker_exec ceph $CEPH_CLI -s -f json|python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')
|
||||||
|
if [[ "$num_pgs" == "0" ]]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
while [ $RETRIES -ne 0 ]; do
|
while [ $RETRIES -ne 0 ]; do
|
||||||
test "[""$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"]')"
|
test "[""$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"]')"
|
||||||
RET=$?
|
RET=$?
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
- name: prepare ceph containerized osd disk collocated
|
- name: prepare ceph containerized osd disk collocated
|
||||||
shell: |
|
shell: |
|
||||||
docker run --net=host \
|
docker run --net=host \
|
||||||
|
--rm \
|
||||||
--pid=host \
|
--pid=host \
|
||||||
--privileged=true \
|
--privileged=true \
|
||||||
--name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.1 | regex_replace('/dev/', '') }} \
|
--name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.1 | regex_replace('/dev/', '') }} \
|
||||||
|
@ -30,6 +31,7 @@
|
||||||
- name: automatic prepare ceph containerized osd disk collocated
|
- name: automatic prepare ceph containerized osd disk collocated
|
||||||
shell: |
|
shell: |
|
||||||
docker run --net=host \
|
docker run --net=host \
|
||||||
|
--rm \
|
||||||
--pid=host \
|
--pid=host \
|
||||||
--privileged=true \
|
--privileged=true \
|
||||||
--name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.key }} \
|
--name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.key }} \
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
- name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated
|
- name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated
|
||||||
shell: |
|
shell: |
|
||||||
docker run --net=host \
|
docker run --net=host \
|
||||||
|
--rm \
|
||||||
--pid=host \
|
--pid=host \
|
||||||
--privileged=true \
|
--privileged=true \
|
||||||
--name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.1 | regex_replace('/dev/', '') }} \
|
--name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.1 | regex_replace('/dev/', '') }} \
|
||||||
|
@ -32,6 +33,7 @@
|
||||||
- name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated with a dedicated device for db and wal
|
- name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated with a dedicated device for db and wal
|
||||||
shell: |
|
shell: |
|
||||||
docker run --net=host \
|
docker run --net=host \
|
||||||
|
--rm \
|
||||||
--pid=host \
|
--pid=host \
|
||||||
--privileged=true \
|
--privileged=true \
|
||||||
--name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.1 | regex_replace('/dev/', '') }} \
|
--name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.1 | regex_replace('/dev/', '') }} \
|
||||||
|
|
|
@ -10,7 +10,7 @@ cluster_network: "192.168.36.0/24"
|
||||||
journal_size: 100
|
journal_size: 100
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
devices:
|
devices:
|
||||||
- '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0'
|
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
|
||||||
- '/dev/sdb'
|
- '/dev/sdb'
|
||||||
dedicated_devices:
|
dedicated_devices:
|
||||||
- '/dev/sdc'
|
- '/dev/sdc'
|
||||||
|
|
|
@ -11,7 +11,7 @@ osd_scenario: non-collocated
|
||||||
dmcrypt: true
|
dmcrypt: true
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
devices:
|
devices:
|
||||||
- '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0'
|
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
|
||||||
- '/dev/sdb'
|
- '/dev/sdb'
|
||||||
dedicated_devices:
|
dedicated_devices:
|
||||||
- '/dev/sdc'
|
- '/dev/sdc'
|
||||||
|
|
|
@ -11,7 +11,7 @@ osd_scenario: collocated
|
||||||
dmcrypt: true
|
dmcrypt: true
|
||||||
osd_objectstore: bluestore
|
osd_objectstore: bluestore
|
||||||
devices:
|
devices:
|
||||||
- '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0'
|
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
|
||||||
- '/dev/sdb'
|
- '/dev/sdb'
|
||||||
os_tuning_params:
|
os_tuning_params:
|
||||||
- { name: kernel.pid_max, value: 4194303 }
|
- { name: kernel.pid_max, value: 4194303 }
|
||||||
|
|
|
@ -11,7 +11,7 @@ osd_scenario: collocated
|
||||||
dmcrypt: true
|
dmcrypt: true
|
||||||
osd_objectstore: bluestore
|
osd_objectstore: bluestore
|
||||||
devices:
|
devices:
|
||||||
- '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0'
|
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
|
||||||
- '/dev/sdb'
|
- '/dev/sdb'
|
||||||
os_tuning_params:
|
os_tuning_params:
|
||||||
- { name: kernel.pid_max, value: 4194303 }
|
- { name: kernel.pid_max, value: 4194303 }
|
||||||
|
|
|
@ -16,7 +16,7 @@ ceph_rgw_civetweb_port: 8080
|
||||||
osd_scenario: non-collocated
|
osd_scenario: non-collocated
|
||||||
osd_objectstore: bluestore
|
osd_objectstore: bluestore
|
||||||
devices:
|
devices:
|
||||||
- '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0'
|
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
|
||||||
- /dev/sdb
|
- /dev/sdb
|
||||||
dedicated_devices:
|
dedicated_devices:
|
||||||
- /dev/sdc
|
- /dev/sdc
|
||||||
|
|
|
@ -9,7 +9,7 @@ public_network: "192.168.3.0/24"
|
||||||
cluster_network: "192.168.4.0/24"
|
cluster_network: "192.168.4.0/24"
|
||||||
journal_size: 100
|
journal_size: 100
|
||||||
devices:
|
devices:
|
||||||
- '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0'
|
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
|
||||||
- '/dev/sdb'
|
- '/dev/sdb'
|
||||||
osd_scenario: collocated
|
osd_scenario: collocated
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
|
|
|
@ -9,7 +9,7 @@ journal_size: 100
|
||||||
radosgw_interface: eth1
|
radosgw_interface: eth1
|
||||||
osd_objectstore: filestore
|
osd_objectstore: filestore
|
||||||
devices:
|
devices:
|
||||||
- '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0'
|
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
|
||||||
- '/dev/sdb'
|
- '/dev/sdb'
|
||||||
dedicated_devices:
|
dedicated_devices:
|
||||||
- '/dev/sdc'
|
- '/dev/sdc'
|
||||||
|
|
|
@ -11,7 +11,7 @@ osd_scenario: non-collocated
|
||||||
dmcrypt: true
|
dmcrypt: true
|
||||||
osd_objectstore: filestore
|
osd_objectstore: filestore
|
||||||
devices:
|
devices:
|
||||||
- '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0'
|
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
|
||||||
- '/dev/sdb'
|
- '/dev/sdb'
|
||||||
dedicated_devices:
|
dedicated_devices:
|
||||||
- '/dev/sdc'
|
- '/dev/sdc'
|
||||||
|
|
|
@ -11,7 +11,7 @@ osd_scenario: collocated
|
||||||
osd_objectstore: filestore
|
osd_objectstore: filestore
|
||||||
dmcrypt: true
|
dmcrypt: true
|
||||||
devices:
|
devices:
|
||||||
- '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0'
|
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
|
||||||
- '/dev/sdb'
|
- '/dev/sdb'
|
||||||
os_tuning_params:
|
os_tuning_params:
|
||||||
- { name: kernel.pid_max, value: 4194303 }
|
- { name: kernel.pid_max, value: 4194303 }
|
||||||
|
|
|
@ -17,6 +17,6 @@ osd_objectstore: filestore
|
||||||
dmcrypt: true
|
dmcrypt: true
|
||||||
ceph_rgw_civetweb_port: 8080
|
ceph_rgw_civetweb_port: 8080
|
||||||
devices:
|
devices:
|
||||||
- '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0'
|
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
|
||||||
- /dev/sdb
|
- /dev/sdb
|
||||||
ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
|
ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
|
||||||
|
|
|
@ -16,7 +16,7 @@ ceph_rgw_civetweb_port: 8080
|
||||||
osd_objectstore: filestore
|
osd_objectstore: filestore
|
||||||
osd_scenario: non-collocated
|
osd_scenario: non-collocated
|
||||||
devices:
|
devices:
|
||||||
- '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0'
|
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
|
||||||
- /dev/sdb
|
- /dev/sdb
|
||||||
dedicated_devices:
|
dedicated_devices:
|
||||||
- /dev/sdc
|
- /dev/sdc
|
||||||
|
|
|
@ -17,7 +17,7 @@ ceph_rgw_civetweb_port: 8080
|
||||||
osd_objectstore: filestore
|
osd_objectstore: filestore
|
||||||
ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
|
ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
|
||||||
devices:
|
devices:
|
||||||
- '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0'
|
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
|
||||||
- /dev/sdb
|
- /dev/sdb
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
ceph_osd_docker_run_script_path: /var/tmp
|
||||||
rgw_override_bucket_index_max_shards: 16
|
rgw_override_bucket_index_max_shards: 16
|
||||||
|
|
|
@ -10,7 +10,7 @@ radosgw_interface: eth1
|
||||||
journal_size: 100
|
journal_size: 100
|
||||||
osd_objectstore: filestore
|
osd_objectstore: filestore
|
||||||
devices:
|
devices:
|
||||||
- '/dev/disk/by-path/pci-0000:00:01.1-ata-1.0'
|
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
|
||||||
- '/dev/sdb'
|
- '/dev/sdb'
|
||||||
osd_scenario: collocated
|
osd_scenario: collocated
|
||||||
os_tuning_params:
|
os_tuning_params:
|
||||||
|
|
|
@ -9,7 +9,7 @@ monitor_interface: eth1
|
||||||
radosgw_interface: eth1
|
radosgw_interface: eth1
|
||||||
journal_size: 100
|
journal_size: 100
|
||||||
devices:
|
devices:
|
||||||
- '/dev/disk/by-path/pci-0000:00:01.1-ata-1'
|
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
|
||||||
- '/dev/sdb'
|
- '/dev/sdb'
|
||||||
dedicated_devices:
|
dedicated_devices:
|
||||||
- '/dev/sdc'
|
- '/dev/sdc'
|
||||||
|
|
Loading…
Reference in New Issue