From 4f5d195788c352b31c6d4469a46a280324dc143f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Thu, 3 Jul 2014 15:30:49 +0200 Subject: [PATCH] Ability to use a directory for OSD instead of a disk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit implements a fourth scenario where we can directely use a directory instead of a block device for the OSDs. The purpose of this scenario is more testing-oriented. Please note that we do not check the filesystem underneath the directory so it is really up to you to configure this properly. Declaring more than one directory on the same filesystem will confuse Ceph. Fixes: #14 Signed-off-by: Sébastien Han --- group_vars/osds | 7 ++++ roles/osd/tasks/main.yml | 3 ++ roles/osd/tasks/osd_directory.yml | 56 +++++++++++++++++++++++++++++++ 3 files changed, 66 insertions(+) create mode 100644 roles/osd/tasks/osd_directory.yml diff --git a/group_vars/osds b/group_vars/osds index 26af41fc1..4468f8341 100644 --- a/group_vars/osds +++ b/group_vars/osds @@ -49,3 +49,10 @@ raw_journal_device: /dev/sdb raw_multi_journal: false raw_journal_devices: [ '/dev/sdb', '/dev/sdb', '/dev/sdc', '/dev/sdc' ] + + +# IV. Fourth scenario: use directory instead of disk for OSDs +# Use 'true' to enable this scenario + +osd_directory: false +osd_directories: [ '/var/lib/ceph/osd/mydir1', '/var/lib/ceph/osd/mydir2', '/var/lib/ceph/osd/mydir3', '/var/lib/ceph/osd/mydir4'] diff --git a/roles/osd/tasks/main.yml b/roles/osd/tasks/main.yml index ece7dd72e..1ff2a2db2 100644 --- a/roles/osd/tasks/main.yml +++ b/roles/osd/tasks/main.yml @@ -10,3 +10,6 @@ - include: raw_multi_journal.yml when: raw_multi_journal + +- include: osd_directory.yml + when: osd_directory diff --git a/roles/osd/tasks/osd_directory.yml b/roles/osd/tasks/osd_directory.yml new file mode 100644 index 000000000..8340aa08f --- /dev/null +++ b/roles/osd/tasks/osd_directory.yml @@ -0,0 +1,56 @@ +--- +## SCENARIO 4: USE A DIRECTORY INSTEAD OF A DISK FOR OSD +# + +- name: Install dependancies + apt: pkg=parted state=present + when: ansible_os_family == 'Debian' + +- name: Install dependancies + yum: name=parted state=present + when: ansible_os_family == 'RedHat' + +- name: Copy OSD bootstrap key + copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600 + when: cephx + +# NOTE (leseb): we do not check the filesystem underneath the directory +# so it is really up to you to configure this properly. +# Declaring more than one directory on the same filesystem will confuse Ceph. + +- name: Create OSD directories + file: path={{ item }} state=directory owner=root group=root + with_items: osd_directories + +# Prepare means +# - create GPT partition +# - mark the partition with the ceph type uuid +# - create a file system +# - mark the fs as ready for ceph consumption +# - entire data disk is used (one big partition) +# - a new partition is added to the journal disk (so it can be easily shared) +# + +# NOTE (leseb): the prepare process must be parallelized somehow... +# if you have 64 disks with 4TB each, this will take a while +# since Ansible will sequential process the loop + +- name: Prepare OSD disk(s) + command: ceph-disk prepare {{ item }} + when: osd_directory + with_items: osd_directories + +# Activate means: +# - mount the volume in a temp location +# - allocate an osd id (if needed) +# - remount in the correct location /var/lib/ceph/osd/$cluster-$id +# - start ceph-osd +# + +- name: Activate OSD(s) + command: ceph-disk activate {{ item }} + with_items: osd_directories + changed_when: False + +- name: Start and add that the OSD service to the init sequence + service: name=ceph state=started enabled=yes