ceph-ansible/roles/ceph-osd/tasks/journal_collocation.yml

39 lines
1.4 KiB
YAML
Raw Normal View History

---
## SCENARIO 1: JOURNAL AND OSD_DATA ON THE SAME DEVICE
- include: zap_devices.yml
2014-09-05 03:14:11 +08:00
- include: check_devices.yml
# Prepare means
# - create GPT partition for a disk, or a loop label for a partition
# - mark the partition with the ceph type uuid
# - create a file system
# - mark the fs as ready for ceph consumption
# - entire data disk is used (one big partition)
# - a new partition is added to the journal disk (so it can be easily shared)
#
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
2014-11-13 22:19:18 +08:00
- name: Automatic prepare OSD disk(s) without partitions
command: ceph-disk prepare "/dev/{{ item.key }}"
when: ansible_devices is defined and item.value.removable == "0" and item.value.partitions|count == 0 and journal_collocation and osd_auto_discovery
ignore_errors: True
with_dict: ansible_devices
register: prepared_osds
2014-11-13 22:19:18 +08:00
- name: Manually Prepare OSD disk(s)
2014-09-05 03:14:11 +08:00
command: "ceph-disk prepare {{ item.2 }}"
2014-11-13 22:19:18 +08:00
when: (item.0.rc != 0 or item.1.rc != 0) and journal_collocation and not osd_auto_discovery
ignore_errors: True
with_together:
- parted.results
- ispartition.results
- devices
2014-09-05 03:14:11 +08:00
- include: activate_osds.yml