Separate OSD scenarios to multiple files

Currently everything lives in main.yml, the file has become difficult to
read at some point and can be a real mess since we keep adding new
scenarios.
I think we should separate the scenarios into dedicated files and just
do includes in the main.yml file.

Closes: #16

Signed-off-by: Sébastien Han <sebastien.han@enovance.com>
pull/26/head
Sébastien Han 2014-03-10 16:52:31 +01:00
parent 6060a7e56a
commit fc38e21333
4 changed files with 205 additions and 75 deletions

View File

@ -0,0 +1,66 @@
---
## SCENARIO 1: JOURNAL AND OSD_DATA ON THE SAME DEVICE
#
- name: Install dependancies
apt: pkg=parted state=present
when: ansible_os_family == 'Debian'
- name: Install dependancies
yum: name=parted state=present
when: ansible_os_family == 'RedHat'
- name: Copy OSD bootstrap key
copy: src=fetch/{{ hostvars[groups['mons'][0]]['ansible_hostname'] }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600
when: cephx
# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1"
# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition
# it should exist we rc=0 and don't do anything unless we do something like --force
# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True"
# I believe it's safer
#
- name: If partition named 'ceph' exists
shell: parted --script {{ item }} print | egrep -sq '^ 1.*ceph'
ignore_errors: True
with_items: devices
register: parted
changed_when: False
# Prepare means
# - create GPT partition
# - mark the partition with the ceph type uuid
# - create a file system
# - mark the fs as ready for ceph consumption
# - entire data disk is used (one big partition)
# - a new partition is added to the journal disk (so it can be easily shared)
#
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
- name: Prepare OSD disk(s)
command: ceph-disk prepare {{ item.1 }}
when: item.0.rc != 0 and journal_collocation
ignore_errors: True
with_together:
- parted.results
- devices
# Activate means:
# - mount the volume in a temp location
# - allocate an osd id (if needed)
# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
# - start ceph-osd
#
- name: Activate OSD(s)
command: ceph-disk activate {{ item }}1
with_items: devices
ignore_errors: True
changed_when: False
- name: Start and add that the OSD service to the init sequence
service: name=ceph state=started enabled=yes

View File

@ -2,80 +2,11 @@
## Deploy Ceph Oject Storage Daemon(s)
#
- name: Install dependancies
apt: pkg=parted state=present
- include: journal_collocation.yml
when: journal_collocation
- name: Copy OSD bootstrap key
copy: src=fetch/{{ hostvars[groups['mons'][0]]['ansible_hostname'] }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600
when: cephx
- include: raw_journal.yml
when: raw_journal
# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1"
# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition
# it should exist we rc=0 and don't do anything unless we do something like --force
# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True"
# I believe it's safer
#
- name: If partition named 'ceph' exists
shell: parted --script {{ item }} print | egrep -sq '^ 1.*ceph'
ignore_errors: True
with_items: devices
register: parted
changed_when: False
# Prepare means
# - create GPT partition
# - mark the partition with the ceph type uuid
# - create a file system
# - mark the fs as ready for ceph consumption
# - entire data disk is used (one big partition)
# - a new partition is added to the journal disk (so it can be easily shared)
#
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
# Scenario 1 without dedicated journal
- name: Prepare OSD disk(s)
command: ceph-disk prepare {{ item.1 }}
when: item.0.rc != 0 and journal_collocation
ignore_errors: True
with_together:
- parted.results
- devices
# Scenario 2 with dedicated journal
- name: Prepare OSD disk(s)
command: ceph-disk prepare {{ item.1 }} {{ journal_device }}
when: item.0.rc != 0 and raw_journal
ignore_errors: True
with_together:
- parted.results
- devices
# Scenario 3
- name: Prepare OSD disk(s)
command: ceph-disk prepare {{ item.1 }} {{ item.2 }}
when: item.0.rc != 0 and raw_multi_journal
ignore_errors: True
with_together:
- parted.results
- devices
- raw_journal_devices
# Activate means:
# - mount the volume in a temp location
# - allocate an osd id (if needed)
# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
# - start ceph-osd
#
- name: Activate OSD(s)
command: ceph-disk activate {{ item }}1
ignore_errors: True
with_items: devices
changed_when: False
- name: Start and add that the OSD service to the init sequence
service: name=ceph state=started enabled=yes
- include: raw_multi_journal.yml
when: raw_multi_journal

View File

@ -0,0 +1,66 @@
---
## SCENARIO 2: SINGLE JOURNAL DEVICE FOR N OSDS
#
- name: Install dependancies
apt: pkg=parted state=present
when: ansible_os_family == 'Debian'
- name: Install dependancies
yum: name=parted state=present
when: ansible_os_family == 'RedHat'
- name: Copy OSD bootstrap key
copy: src=fetch/{{ hostvars[groups['mons'][0]]['ansible_hostname'] }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600
when: cephx
# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1"
# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition
# it should exist we rc=0 and don't do anything unless we do something like --force
# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True"
# I believe it's safer
#
- name: If partition named 'ceph' exists
shell: parted --script {{ item }} print | egrep -sq '^ 1.*ceph'
ignore_errors: True
with_items: devices
register: parted
changed_when: False
# Prepare means
# - create GPT partition
# - mark the partition with the ceph type uuid
# - create a file system
# - mark the fs as ready for ceph consumption
# - entire data disk is used (one big partition)
# - a new partition is added to the journal disk (so it can be easily shared)
#
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
- name: Prepare OSD disk(s)
command: ceph-disk prepare {{ item.1 }} {{ raw_journal_device }}
when: item.0.rc != 0 and raw_journal
ignore_errors: True
with_together:
- parted.results
- devices
# Activate means:
# - mount the volume in a temp location
# - allocate an osd id (if needed)
# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
# - start ceph-osd
#
- name: Activate OSD(s)
command: ceph-disk activate {{ item }}1
with_items: devices
ignore_errors: True
changed_when: False
- name: Start and add that the OSD service to the init sequence
service: name=ceph state=started enabled=yes

View File

@ -0,0 +1,67 @@
---
## SCENARIO 3: N JOURNAL DEVICES FOR N OSDS
#
- name: Install dependancies
apt: pkg=parted state=present
when: ansible_os_family == 'Debian'
- name: Install dependancies
yum: name=parted state=present
when: ansible_os_family == 'RedHat'
- name: Copy OSD bootstrap key
copy: src=fetch/{{ hostvars[groups['mons'][0]]['ansible_hostname'] }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600
when: cephx
# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1"
# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition
# it should exist we rc=0 and don't do anything unless we do something like --force
# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True"
# I believe it's safer
#
- name: If partition named 'ceph' exists
shell: parted --script {{ item }} print | egrep -sq '^ 1.*ceph'
ignore_errors: True
with_items: devices
register: parted
changed_when: False
# Prepare means
# - create GPT partition
# - mark the partition with the ceph type uuid
# - create a file system
# - mark the fs as ready for ceph consumption
# - entire data disk is used (one big partition)
# - a new partition is added to the journal disk (so it can be easily shared)
#
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
- name: Prepare OSD disk(s)
command: ceph-disk prepare {{ item.1 }} {{ item.2 }}
when: item.0.rc != 0 and raw_multi_journal
ignore_errors: True
with_together:
- parted.results
- devices
- raw_journal_devices
# Activate means:
# - mount the volume in a temp location
# - allocate an osd id (if needed)
# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
# - start ceph-osd
#
- name: Activate OSD(s)
command: ceph-disk activate {{ item }}1
with_items: devices
ignore_errors: True
changed_when: False
- name: Start and add that the OSD service to the init sequence
service: name=ceph state=started enabled=yes