Merge pull request #1178 from zhsj/dev-partition

Add prepare osd with partition devices in raw_multi_journal
pull/1198/head
Sébastien Han 2016-12-15 22:50:23 +01:00 committed by GitHub
commit faabfdcefe
4 changed files with 82 additions and 18 deletions

View File

@ -0,0 +1,49 @@
---
# This playbook will make custom partition layout for your osd hosts.
# You should define `devices` variable for every host.
#
# For example, in host_vars/hostname1
#
# devices:
# - device_name: sdb
# partitions:
# - index: 1
# size: 10G
# type: data
# - index: 2
# size: 5G
# type: journal
# - device_name: sdc
# partitions:
# - index: 1
# size: 10G
# type: data
# - index: 2
# size: 5G
# type: journal
#
- vars:
osd_group_name: osds
journal_typecode: 45b0969e-9b03-4f30-b4c6-b4b80ceff106
data_typecode: 4fbd7e29-9d25-41b8-afd0-062c0ceff05d
hosts:
- "{{ osd_group_name }}"
tasks:
- name: install sgdisk(gdisk)
package:
name: gdisk
state: present
- name: erase all previous partitions(dangerous!!!)
shell: sgdisk --zap-all -- /dev/{{item.device_name}}
with_items: "{{ devices }}"
- name: make osd partitions
shell: >
sgdisk --new={{item.1.index}}:0:+{{item.1.size}} "--change-name={{item.1.index}}:ceph {{item.1.type}}"
"--typecode={{item.1.index}}:{% if item.1.type=='data' %}{{data_typecode}}{% else %}{{journal_typecode}}{% endif %}"
--mbrtogpt -- /dev/{{item.0.device_name}}
with_subelements:
- "{{ devices }}"
- partitions

View File

@ -24,14 +24,26 @@
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- name: check the journal device is partition
shell: "readlink -f {{ item }} | egrep '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p)[0-9]{1,2}$'"
with_items: "{{ raw_journal_devices }}"
changed_when: false
failed_when: false
always_run: true
register: journal_ispartition_results
- name: check the partition status of the journal devices
shell: "parted --script {{ item }} print > /dev/null 2>&1"
with_items: "{{ raw_journal_devices|unique }}"
shell: "parted --script {{ item.1 }} print > /dev/null 2>&1"
with_together:
- "{{ journal_ispartition_results.results }}"
- "{{ raw_journal_devices|unique }}"
changed_when: false
failed_when: false
always_run: true
register: journal_partition_status
when: raw_multi_journal or dmcrypt_dedicated_journal
when:
- (raw_multi_journal or dmcrypt_dedicated_journal)
- item.0.rc != 0
- name: fix partitions gpt header or labels of the journal devices
shell: "sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }} || sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }}"
@ -40,5 +52,6 @@
- "{{ raw_journal_devices|unique }}"
changed_when: false
when:
- raw_multi_journal or dmcrypt_dedicated_journal
- item.0.rc != 0
- (raw_multi_journal or dmcrypt_dedicated_journal)
- not item.0.get("skipped")
- item.0.get("rc", 0) != 0

View File

@ -8,12 +8,15 @@
register: ispartition_results
- name: check the partition status of the osd disks
shell: "parted --script {{ item }} print > /dev/null 2>&1"
with_items: "{{ devices }}"
shell: "parted --script {{ item.1 }} print > /dev/null 2>&1"
with_together:
- "{{ ispartition_results.results }}"
- "{{ devices }}"
changed_when: false
failed_when: false
always_run: true
register: osd_partition_status_results
when: item.0.rc != 0
# NOTE: The following calls to sgdisk are retried because sgdisk is known to
# fully wipe a device the first time around. There is no need to halt execution
@ -26,12 +29,17 @@
- "{{ osd_partition_status_results.results }}"
- "{{ devices }}"
changed_when: false
when: item.0.rc != 0
when:
- not item.0.get("skipped")
- item.0.get("rc", 0) != 0
- name: check if a partition named 'ceph' exists
shell: "parted --script {{ item }} print | egrep -sq '^ 1.*ceph'"
with_items: "{{ devices }}"
shell: "parted --script {{ item.1 }} print | egrep -sq '^ 1.*ceph'"
with_together:
- "{{ ispartition_results.results }}"
- "{{ devices }}"
changed_when: false
failed_when: false
always_run: true
register: parted_results
when: item.0.rc != 0

View File

@ -7,21 +7,15 @@
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: prepare osd disk(s)
command: "ceph-disk prepare --cluster {{ cluster }} {{ item.2 }} {{ item.3 }}"
command: "ceph-disk prepare --cluster {{ cluster }} {{ item.1 }} {{ item.2 }}"
with_together:
- "{{ parted_results.results }}"
- "{{ ispartition_results.results }}"
- "{{ devices }}"
- "{{ raw_journal_devices }}"
changed_when: false
when:
- not item.0.get("skipped")
- not item.1.get("skipped")
- item.0.get("rc", 0) != 0
- item.1.get("rc", 0) != 0
- item.0.get("skipped") or item.0.get("rc", 0) != 0
- raw_multi_journal
- not osd_auto_discovery