2014-03-10 23:52:31 +08:00
|
|
|
---
|
|
|
|
## SCENARIO 1: JOURNAL AND OSD_DATA ON THE SAME DEVICE
|
|
|
|
|
2014-09-05 03:14:11 +08:00
|
|
|
- include: check_devices.yml
|
2014-03-10 23:52:31 +08:00
|
|
|
|
|
|
|
# Prepare means
|
2014-06-26 20:19:09 +08:00
|
|
|
# - create GPT partition for a disk, or a loop label for a partition
|
2014-03-10 23:52:31 +08:00
|
|
|
# - mark the partition with the ceph type uuid
|
2014-07-08 16:15:38 +08:00
|
|
|
# - create a file system
|
2014-03-10 23:52:31 +08:00
|
|
|
# - mark the fs as ready for ceph consumption
|
|
|
|
# - entire data disk is used (one big partition)
|
|
|
|
# - a new partition is added to the journal disk (so it can be easily shared)
|
|
|
|
#
|
|
|
|
|
|
|
|
# NOTE (leseb): the prepare process must be parallelized somehow...
|
|
|
|
# if you have 64 disks with 4TB each, this will take a while
|
|
|
|
# since Ansible will sequential process the loop
|
|
|
|
|
2014-06-26 20:19:09 +08:00
|
|
|
# NOTE (alahouze): if the device is a partition, the parted command below has
|
|
|
|
# failed, this is why we check if the device is a partition too.
|
|
|
|
|
2014-03-10 23:52:31 +08:00
|
|
|
- name: Prepare OSD disk(s)
|
2014-09-05 03:14:11 +08:00
|
|
|
command: "ceph-disk prepare {{ item.2 }}"
|
2014-06-26 20:19:09 +08:00
|
|
|
when: (item.0.rc != 0 or item.1.rc != 0) and journal_collocation
|
2014-03-10 23:52:31 +08:00
|
|
|
ignore_errors: True
|
|
|
|
with_together:
|
|
|
|
- parted.results
|
2014-06-26 20:19:09 +08:00
|
|
|
- ispartition.results
|
2014-03-10 23:52:31 +08:00
|
|
|
- devices
|
|
|
|
|
2014-09-05 03:14:11 +08:00
|
|
|
- include: activate_osds.yml
|