mirror of https://github.com/ceph/ceph-ansible.git
39 lines
1.4 KiB
YAML
39 lines
1.4 KiB
YAML
---
|
|
## SCENARIO 1: JOURNAL AND OSD_DATA ON THE SAME DEVICE
|
|
|
|
- include: zap_devices.yml
|
|
- include: check_devices.yml
|
|
|
|
# Prepare means
|
|
# - create GPT partition for a disk, or a loop label for a partition
|
|
# - mark the partition with the ceph type uuid
|
|
# - create a file system
|
|
# - mark the fs as ready for ceph consumption
|
|
# - entire data disk is used (one big partition)
|
|
# - a new partition is added to the journal disk (so it can be easily shared)
|
|
#
|
|
|
|
# NOTE (leseb): the prepare process must be parallelized somehow...
|
|
# if you have 64 disks with 4TB each, this will take a while
|
|
# since Ansible will sequential process the loop
|
|
|
|
# NOTE (alahouze): if the device is a partition, the parted command below has
|
|
# failed, this is why we check if the device is a partition too.
|
|
- name: Automatic prepare OSD disk(s) without partitions
|
|
command: ceph-disk prepare "/dev/{{ item.key }}"
|
|
when: ansible_devices is defined and item.value.removable == "0" and item.value.partitions|count == 0 and journal_collocation and osd_auto_discovery
|
|
ignore_errors: True
|
|
with_dict: ansible_devices
|
|
register: prepared_osds
|
|
|
|
- name: Manually Prepare OSD disk(s)
|
|
command: "ceph-disk prepare {{ item.2 }}"
|
|
when: (item.0.rc != 0 or item.1.rc != 0) and journal_collocation and not osd_auto_discovery
|
|
ignore_errors: True
|
|
with_together:
|
|
- parted.results
|
|
- ispartition.results
|
|
- devices
|
|
|
|
- include: activate_osds.yml
|