WIP: Implement OSD sections

Still WIP, @mwheckmann free to test
As requested by #162

Current known issue, since ceph.conf gets modified during every single
run (at the end during the merge) so this will restart ceph daemons.

Signed-off-by: Sébastien Han <sebastien.han@enovance.com>
pull/177/head
Sébastien Han 2015-01-08 12:27:43 -05:00
parent 5047ac7669
commit f68cd46664
5 changed files with 93 additions and 2 deletions

View File

@ -5,8 +5,27 @@
# Dummy variable to avoid error because ansible does not recognize the file as a good configuration file when no variable in it.
dummy:
## Ceph options
####################
# OSD CRUSH LOCATION
####################
# The following options will build a ceph.conf with OSD sections
# Example:
# [osd.X]
# osd crush location = "root=location"
#
# This works with your inventory file
# To match the following 'osd_crush_location' option the inventory must look like:
#
# [osds]
# osd0 ceph_crush_root=foo ceph_crush_rack=bar
crush_location: false
osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"
##############
# CEPH OPTIONS
##############
#cephx: true

View File

@ -2,8 +2,27 @@
# You can override default vars defined in defaults/main.yml here,
# but I would advice to use host or group vars instead
## Ceph options
####################
# OSD CRUSH LOCATION
####################
# The following options will build a ceph.conf with OSD sections
# Example:
# [osd.X]
# osd crush location = "root=location"
#
# This works with your inventory file
# To match the following 'osd_crush_location' option the inventory must look like:
#
# [osds]
# osd0 ceph_crush_root=foo ceph_crush_rack=bar
crush_location: false
osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"
##############
# CEPH OPTIONS
##############
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
fsid: "{{ cluster_uuid.stdout }}"

View File

@ -31,6 +31,9 @@
ignore_errors: True
changed_when: False
- include: osd_fragment.yml
when: crush_location
- name: Start and add that the OSD service to the init sequence
service: >
name=ceph

View File

@ -0,0 +1,48 @@
---
- name: Get OSD path
shell: "df | grep {{ item }} | awk '{print $6}'"
with_items: devices
register: osd_path
ignore_errors: true
- name: Get OSD id
command: cat {{ item.stdout }}/whoami
register: osd_id
with_items: osd_path.results
ignore_errors: true
- name: Create a Ceph fragment and assemble directory
file: >
path={{ item }}
state=directory
owner=root
group=root
mode=0644
with_items:
- /etc/ceph/ceph.d/
- /etc/ceph/ceph.d/osd_fragments
- name: Create the OSD fragment
template: >
src=osd.conf.j2
dest=/etc/ceph/ceph.d/osd_fragments/osd.{{ item.stdout }}.conf
with_items: osd_id.results
- name: Copy ceph.conf for assembling
command: cp /etc/ceph/ceph.conf /etc/ceph/ceph.d/
- name: Assemble OSD sections
assemble: >
src=/etc/ceph/ceph.d/osd_fragments/
dest=/etc/ceph/ceph.d/osd.conf
owner=root
group=root
mode=0644
- name: Assemble Ceph conf and OSD fragments
assemble: >
src=/etc/ceph/ceph.d/
dest=/etc/ceph/ceph.conf
owner=root
group=root
mode=0644

View File

@ -0,0 +1,2 @@
[osd.{{ item.stdout }}]
osd crush location = {{ osd_crush_location }}