Merge pull request #91 from leseb/wip-osd-directory

Ability to use a directory for OSD instead of a disk
pull/92/head
Leseb 2014-07-03 15:34:57 +02:00
commit a3c1e1c862
3 changed files with 66 additions and 0 deletions

View File

@ -49,3 +49,10 @@ raw_journal_device: /dev/sdb
raw_multi_journal: false raw_multi_journal: false
raw_journal_devices: [ '/dev/sdb', '/dev/sdb', '/dev/sdc', '/dev/sdc' ] raw_journal_devices: [ '/dev/sdb', '/dev/sdb', '/dev/sdc', '/dev/sdc' ]
# IV. Fourth scenario: use directory instead of disk for OSDs
# Use 'true' to enable this scenario
osd_directory: false
osd_directories: [ '/var/lib/ceph/osd/mydir1', '/var/lib/ceph/osd/mydir2', '/var/lib/ceph/osd/mydir3', '/var/lib/ceph/osd/mydir4']

View File

@ -10,3 +10,6 @@
- include: raw_multi_journal.yml - include: raw_multi_journal.yml
when: raw_multi_journal when: raw_multi_journal
- include: osd_directory.yml
when: osd_directory

View File

@ -0,0 +1,56 @@
---
## SCENARIO 4: USE A DIRECTORY INSTEAD OF A DISK FOR OSD
#
- name: Install dependancies
apt: pkg=parted state=present
when: ansible_os_family == 'Debian'
- name: Install dependancies
yum: name=parted state=present
when: ansible_os_family == 'RedHat'
- name: Copy OSD bootstrap key
copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600
when: cephx
# NOTE (leseb): we do not check the filesystem underneath the directory
# so it is really up to you to configure this properly.
# Declaring more than one directory on the same filesystem will confuse Ceph.
- name: Create OSD directories
file: path={{ item }} state=directory owner=root group=root
with_items: osd_directories
# Prepare means
# - create GPT partition
# - mark the partition with the ceph type uuid
# - create a file system
# - mark the fs as ready for ceph consumption
# - entire data disk is used (one big partition)
# - a new partition is added to the journal disk (so it can be easily shared)
#
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
- name: Prepare OSD disk(s)
command: ceph-disk prepare {{ item }}
when: osd_directory
with_items: osd_directories
# Activate means:
# - mount the volume in a temp location
# - allocate an osd id (if needed)
# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
# - start ceph-osd
#
- name: Activate OSD(s)
command: ceph-disk activate {{ item }}
with_items: osd_directories
changed_when: False
- name: Start and add that the OSD service to the init sequence
service: name=ceph state=started enabled=yes