purge-container: use lsblk to resolv parent device

Using `lsblk` to resolv the parent device is better than just removing the last
char when passing it to the zap container.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/2329/head
Guillaume Abrioux 2018-01-17 09:08:16 +01:00 committed by Sébastien Han
parent 58eb045d2f
commit 55298fa80c
1 changed files with 24 additions and 59 deletions

View File

@ -290,42 +290,6 @@
command: "docker rm -f {{ item }}" command: "docker rm -f {{ item }}"
with_items: "{{ prepare_containers.stdout_lines }}" with_items: "{{ prepare_containers.stdout_lines }}"
- name: see if ceph-disk-created data partitions are present
shell: |
ls /dev/disk/by-partlabel | grep -q "ceph.*.data"
failed_when: false
register: ceph_data_partlabels
- name: see if ceph-disk-created block partitions are present
shell: |
ls /dev/disk/by-partlabel | grep -q "ceph.*block$"
failed_when: false
register: ceph_block_partlabels
- name: see if ceph-disk-created journal partitions are present
shell: |
ls /dev/disk/by-partlabel | grep -q "ceph.*.journal"
failed_when: false
register: ceph_journal_partlabels
- name: see if ceph-disk-created block db partitions are present
shell: |
ls /dev/disk/by-partlabel | grep -q "ceph.*.block.db"
failed_when: false
register: ceph_db_partlabels
- name: see if ceph-disk-created block wal partitions are present
shell: |
ls /dev/disk/by-partlabel | grep -q "ceph.*.block.wal"
failed_when: false
register: ceph_wal_partlabels
- name: see if ceph-disk-created lockbox partitions are present
shell: |
ls /dev/disk/by-partlabel | grep -q "ceph.*.lockbox"
failed_when: false
register: ceph_lockbox_partlabels
# NOTE(leseb): hope someone will find a more elegant way one day... # NOTE(leseb): hope someone will find a more elegant way one day...
- name: see if encrypted partitions are present - name: see if encrypted partitions are present
shell: | shell: |
@ -342,63 +306,69 @@
- name: get ceph data partitions - name: get ceph data partitions
command: | command: |
blkid -o device -t PARTLABEL="ceph data" blkid -o device -t PARTLABEL="ceph data"
when: ceph_data_partlabels.rc == 0
failed_when: false failed_when: false
register: ceph_data_partition_to_erase_path register: ceph_data_partition_to_erase_path
- name: get ceph lockbox partitions - name: get ceph lockbox partitions
command: | command: |
blkid -o device -t PARTLABEL="ceph lockbox" blkid -o device -t PARTLABEL="ceph lockbox"
when: ceph_lockbox_partlabels.rc == 0
failed_when: false failed_when: false
register: ceph_lockbox_partition_to_erase_path register: ceph_lockbox_partition_to_erase_path
- name: get ceph block partitions - name: get ceph block partitions
command: | command: |
blkid -o device -t PARTLABEL="ceph block" blkid -o device -t PARTLABEL="ceph block"
when: ceph_block_partlabels.rc == 0
failed_when: false failed_when: false
register: ceph_block_partition_to_erase_path register: ceph_block_partition_to_erase_path
- name: get ceph journal partitions - name: get ceph journal partitions
command: | command: |
blkid -o device -t PARTLABEL="ceph journal" blkid -o device -t PARTLABEL="ceph journal"
when: ceph_journal_partlabels.rc == 0
failed_when: false failed_when: false
register: ceph_journal_partition_to_erase_path register: ceph_journal_partition_to_erase_path
- name: get ceph db partitions - name: get ceph db partitions
command: | command: |
blkid -o device -t PARTLABEL="ceph block.db" blkid -o device -t PARTLABEL="ceph block.db"
when: ceph_db_partlabels.rc == 0
failed_when: false failed_when: false
register: ceph_db_partition_to_erase_path register: ceph_db_partition_to_erase_path
- name: get ceph wal partitions - name: get ceph wal partitions
command: | command: |
blkid -o device -t PARTLABEL="ceph block.wal" blkid -o device -t PARTLABEL="ceph block.wal"
when: ceph_wal_partlabels.rc == 0
failed_when: false failed_when: false
register: ceph_wal_partition_to_erase_path register: ceph_wal_partition_to_erase_path
- name: set_fact combined_devices_list
set_fact:
combined_devices_list: "{{ ceph_data_partition_to_erase_path.get('stdout_lines', []) +
ceph_lockbox_partition_to_erase_path.get('stdout_lines', []) +
ceph_block_partition_to_erase_path.get('stdout_lines', []) +
ceph_journal_partition_to_erase_path.get('stdout_lines', []) +
ceph_db_partition_to_erase_path.get('stdout_lines', []) +
ceph_wal_partition_to_erase_path.get('stdout_lines', []) }}"
- name: resolve parent device
shell: $(lsblk --nodeps -no pkname "{{ item }}")
register: tmp_resolved_parent_device
with_items:
- "{{ combined_devices_list }}"
- name: set_fact resolved_parent_device
set_fact:
resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
- name: zap ceph osd disks - name: zap ceph osd disks
shell: | shell: |
docker run --rm \ docker run --rm \
--privileged=true \ --privileged=true \
--name ceph-osd-zap-{{ ansible_hostname }}-{{ item[:-1] | regex_replace('/dev/', '') }} \ --name ceph-osd-zap-{{ ansible_hostname }}-{{ item }} \
-v /dev/:/dev/ \ -v /dev/:/dev/ \
-e OSD_DEVICE={{ item[:-1] }} \ -e OSD_DEVICE=/dev/{{ item }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
zap_device zap_device
with_items: with_items:
- "{{ ceph_data_partition_to_erase_path.stdout_lines | default([]) }}" - "{{ combined_devices_list }}"
- "{{ ceph_lockbox_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_block_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_journal_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_db_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_wal_partition_to_erase_path.stdout_lines | default([]) }}"
when:
- (ceph_data_partlabels.rc == 0 or ceph_block_partlabels.rc == 0 or ceph_journal_partlabels.rc == 0 or ceph_db_partlabels.rc == 0 or ceph_wal_partlabels.rc == 0)
- name: wait until the zap containers die - name: wait until the zap containers die
shell: | shell: |
@ -412,15 +382,10 @@
- name: remove ceph osd zap disk container - name: remove ceph osd zap disk container
docker: docker:
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item[:-1] | regex_replace('/dev/', '') }}" name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item }}"
state: absent state: absent
with_items: with_items:
- "{{ ceph_data_partition_to_erase_path.stdout_lines | default([]) }}" - "{{ combined_devices_list }}"
- "{{ ceph_lockbox_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_block_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_journal_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_db_partition_to_erase_path.stdout_lines | default([]) }}"
- "{{ ceph_wal_partition_to_erase_path.stdout_lines | default([]) }}"
- name: remove ceph osd service - name: remove ceph osd service
file: file: