purge-container: use lsblk to resolv parent device

Using `lsblk` to resolv the parent device is better than just removing the last
char when passing it to the zap container.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/2329/head
Guillaume Abrioux 2018-01-17 09:08:16 +01:00 committed by Sébastien Han
parent 58eb045d2f
commit 55298fa80c
1 changed files with 24 additions and 59 deletions

View File

@ -290,42 +290,6 @@
command: "docker rm -f {{ item }}"
with_items: "{{ prepare_containers.stdout_lines }}"
- name: see if ceph-disk-created data partitions are present
shell: |
ls /dev/disk/by-partlabel | grep -q "ceph.*.data"
failed_when: false
register: ceph_data_partlabels
- name: see if ceph-disk-created block partitions are present
shell: |
ls /dev/disk/by-partlabel | grep -q "ceph.*block$"
failed_when: false
register: ceph_block_partlabels
- name: see if ceph-disk-created journal partitions are present
shell: |
ls /dev/disk/by-partlabel | grep -q "ceph.*.journal"
failed_when: false
register: ceph_journal_partlabels
- name: see if ceph-disk-created block db partitions are present
shell: |
ls /dev/disk/by-partlabel | grep -q "ceph.*.block.db"
failed_when: false
register: ceph_db_partlabels
- name: see if ceph-disk-created block wal partitions are present
shell: |
ls /dev/disk/by-partlabel | grep -q "ceph.*.block.wal"
failed_when: false
register: ceph_wal_partlabels
- name: see if ceph-disk-created lockbox partitions are present
shell: |
ls /dev/disk/by-partlabel | grep -q "ceph.*.lockbox"
failed_when: false
register: ceph_lockbox_partlabels
# NOTE(leseb): hope someone will find a more elegant way one day...
- name: see if encrypted partitions are present
shell: |
@ -342,63 +306,69 @@
- name: get ceph data partitions
command: |
blkid -o device -t PARTLABEL="ceph data"
when: ceph_data_partlabels.rc == 0
failed_when: false
register: ceph_data_partition_to_erase_path
- name: get ceph lockbox partitions
command: |
blkid -o device -t PARTLABEL="ceph lockbox"
when: ceph_lockbox_partlabels.rc == 0
failed_when: false
register: ceph_lockbox_partition_to_erase_path
- name: get ceph block partitions
command: |
blkid -o device -t PARTLABEL="ceph block"
when: ceph_block_partlabels.rc == 0
failed_when: false
register: ceph_block_partition_to_erase_path
- name: get ceph journal partitions
command: |
blkid -o device -t PARTLABEL="ceph journal"
when: ceph_journal_partlabels.rc == 0
failed_when: false
register: ceph_journal_partition_to_erase_path
- name: get ceph db partitions
command: |
blkid -o device -t PARTLABEL="ceph block.db"
when: ceph_db_partlabels.rc == 0
failed_when: false
register: ceph_db_partition_to_erase_path
- name: get ceph wal partitions
command: |
blkid -o device -t PARTLABEL="ceph block.wal"
when: ceph_wal_partlabels.rc == 0
failed_when: false
register: ceph_wal_partition_to_erase_path
- name: set_fact combined_devices_list
set_fact:
combined_devices_list: "{{ ceph_data_partition_to_erase_path.get('stdout_lines', []) +
ceph_lockbox_partition_to_erase_path.get('stdout_lines', []) +
ceph_block_partition_to_erase_path.get('stdout_lines', []) +
ceph_journal_partition_to_erase_path.get('stdout_lines', []) +
ceph_db_partition_to_erase_path.get('stdout_lines', []) +
ceph_wal_partition_to_erase_path.get('stdout_lines', []) }}"
- name: resolve parent device
shell: $(lsblk --nodeps -no pkname "{{ item }}")
register: tmp_resolved_parent_device
with_items:
- "{{ combined_devices_list }}"
- name: set_fact resolved_parent_device
set_fact:
resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
- name: zap ceph osd disks
shell: |
docker run --rm \
--privileged=true \
--name ceph-osd-zap-{{ ansible_hostname }}-{{ item[:-1] | regex_replace('/dev/', '') }} \
--name ceph-osd-zap-{{ ansible_hostname }}-{{ item }} \
-v /dev/:/dev/ \
-e OSD_DEVICE={{ item[:-1] }} \
-e OSD_DEVICE=/dev/{{ item }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
zap_device
with_items:
- "{{ ceph_data_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_lockbox_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_block_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_journal_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_db_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_wal_partition_to_erase_path.stdout_lines | default([]) }}"
when:
- (ceph_data_partlabels.rc == 0 or ceph_block_partlabels.rc == 0 or ceph_journal_partlabels.rc == 0 or ceph_db_partlabels.rc == 0 or ceph_wal_partlabels.rc == 0)
- "{{ combined_devices_list }}"
- name: wait until the zap containers die
shell: |
@ -412,15 +382,10 @@
- name: remove ceph osd zap disk container
docker:
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item[:-1] | regex_replace('/dev/', '') }}"
name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item }}"
state: absent
with_items:
- "{{ ceph_data_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_lockbox_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_block_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_journal_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_db_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_wal_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ combined_devices_list }}"
- name: remove ceph osd service
file: