From fc38e213337eda8979623992161a9cb53d74c078 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Mon, 10 Mar 2014 16:52:31 +0100 Subject: [PATCH] Separate OSD scenarios to multiple files MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently everything lives in main.yml, the file has become difficult to read at some point and can be a real mess since we keep adding new scenarios. I think we should separate the scenarios into dedicated files and just do includes in the main.yml file. Closes: #16 Signed-off-by: Sébastien Han --- roles/osd/tasks/journal_collocation.yml | 66 ++++++++++++++++++++ roles/osd/tasks/main.yml | 81 ++----------------------- roles/osd/tasks/raw_journal.yml | 66 ++++++++++++++++++++ roles/osd/tasks/raw_multi_journal.yml | 67 ++++++++++++++++++++ 4 files changed, 205 insertions(+), 75 deletions(-) create mode 100644 roles/osd/tasks/journal_collocation.yml create mode 100644 roles/osd/tasks/raw_journal.yml create mode 100644 roles/osd/tasks/raw_multi_journal.yml diff --git a/roles/osd/tasks/journal_collocation.yml b/roles/osd/tasks/journal_collocation.yml new file mode 100644 index 000000000..4a3d35857 --- /dev/null +++ b/roles/osd/tasks/journal_collocation.yml @@ -0,0 +1,66 @@ +--- +## SCENARIO 1: JOURNAL AND OSD_DATA ON THE SAME DEVICE +# + +- name: Install dependancies + apt: pkg=parted state=present + when: ansible_os_family == 'Debian' + +- name: Install dependancies + yum: name=parted state=present + when: ansible_os_family == 'RedHat' + +- name: Copy OSD bootstrap key + copy: src=fetch/{{ hostvars[groups['mons'][0]]['ansible_hostname'] }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600 + when: cephx + +# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1" +# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition +# it should exist we rc=0 and don't do anything unless we do something like --force +# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True" +# I believe it's safer +# + +- name: If partition named 'ceph' exists + shell: parted --script {{ item }} print | egrep -sq '^ 1.*ceph' + ignore_errors: True + with_items: devices + register: parted + changed_when: False + +# Prepare means +# - create GPT partition +# - mark the partition with the ceph type uuid +# - create a file system +# - mark the fs as ready for ceph consumption +# - entire data disk is used (one big partition) +# - a new partition is added to the journal disk (so it can be easily shared) +# + +# NOTE (leseb): the prepare process must be parallelized somehow... +# if you have 64 disks with 4TB each, this will take a while +# since Ansible will sequential process the loop + +- name: Prepare OSD disk(s) + command: ceph-disk prepare {{ item.1 }} + when: item.0.rc != 0 and journal_collocation + ignore_errors: True + with_together: + - parted.results + - devices + +# Activate means: +# - mount the volume in a temp location +# - allocate an osd id (if needed) +# - remount in the correct location /var/lib/ceph/osd/$cluster-$id +# - start ceph-osd +# + +- name: Activate OSD(s) + command: ceph-disk activate {{ item }}1 + with_items: devices + ignore_errors: True + changed_when: False + +- name: Start and add that the OSD service to the init sequence + service: name=ceph state=started enabled=yes diff --git a/roles/osd/tasks/main.yml b/roles/osd/tasks/main.yml index e37cd29c3..ece7dd72e 100644 --- a/roles/osd/tasks/main.yml +++ b/roles/osd/tasks/main.yml @@ -2,80 +2,11 @@ ## Deploy Ceph Oject Storage Daemon(s) # -- name: Install dependancies - apt: pkg=parted state=present +- include: journal_collocation.yml + when: journal_collocation -- name: Copy OSD bootstrap key - copy: src=fetch/{{ hostvars[groups['mons'][0]]['ansible_hostname'] }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600 - when: cephx +- include: raw_journal.yml + when: raw_journal -# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1" -# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition -# it should exist we rc=0 and don't do anything unless we do something like --force -# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True" -# I believe it's safer -# - -- name: If partition named 'ceph' exists - shell: parted --script {{ item }} print | egrep -sq '^ 1.*ceph' - ignore_errors: True - with_items: devices - register: parted - changed_when: False - -# Prepare means -# - create GPT partition -# - mark the partition with the ceph type uuid -# - create a file system -# - mark the fs as ready for ceph consumption -# - entire data disk is used (one big partition) -# - a new partition is added to the journal disk (so it can be easily shared) -# - -# NOTE (leseb): the prepare process must be parallelized somehow... -# if you have 64 disks with 4TB each, this will take a while -# since Ansible will sequential process the loop - -# Scenario 1 without dedicated journal -- name: Prepare OSD disk(s) - command: ceph-disk prepare {{ item.1 }} - when: item.0.rc != 0 and journal_collocation - ignore_errors: True - with_together: - - parted.results - - devices - -# Scenario 2 with dedicated journal -- name: Prepare OSD disk(s) - command: ceph-disk prepare {{ item.1 }} {{ journal_device }} - when: item.0.rc != 0 and raw_journal - ignore_errors: True - with_together: - - parted.results - - devices - -# Scenario 3 -- name: Prepare OSD disk(s) - command: ceph-disk prepare {{ item.1 }} {{ item.2 }} - when: item.0.rc != 0 and raw_multi_journal - ignore_errors: True - with_together: - - parted.results - - devices - - raw_journal_devices - -# Activate means: -# - mount the volume in a temp location -# - allocate an osd id (if needed) -# - remount in the correct location /var/lib/ceph/osd/$cluster-$id -# - start ceph-osd -# - -- name: Activate OSD(s) - command: ceph-disk activate {{ item }}1 - ignore_errors: True - with_items: devices - changed_when: False - -- name: Start and add that the OSD service to the init sequence - service: name=ceph state=started enabled=yes +- include: raw_multi_journal.yml + when: raw_multi_journal diff --git a/roles/osd/tasks/raw_journal.yml b/roles/osd/tasks/raw_journal.yml new file mode 100644 index 000000000..99180a069 --- /dev/null +++ b/roles/osd/tasks/raw_journal.yml @@ -0,0 +1,66 @@ +--- +## SCENARIO 2: SINGLE JOURNAL DEVICE FOR N OSDS +# + +- name: Install dependancies + apt: pkg=parted state=present + when: ansible_os_family == 'Debian' + +- name: Install dependancies + yum: name=parted state=present + when: ansible_os_family == 'RedHat' + +- name: Copy OSD bootstrap key + copy: src=fetch/{{ hostvars[groups['mons'][0]]['ansible_hostname'] }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600 + when: cephx + +# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1" +# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition +# it should exist we rc=0 and don't do anything unless we do something like --force +# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True" +# I believe it's safer +# + +- name: If partition named 'ceph' exists + shell: parted --script {{ item }} print | egrep -sq '^ 1.*ceph' + ignore_errors: True + with_items: devices + register: parted + changed_when: False + +# Prepare means +# - create GPT partition +# - mark the partition with the ceph type uuid +# - create a file system +# - mark the fs as ready for ceph consumption +# - entire data disk is used (one big partition) +# - a new partition is added to the journal disk (so it can be easily shared) +# + +# NOTE (leseb): the prepare process must be parallelized somehow... +# if you have 64 disks with 4TB each, this will take a while +# since Ansible will sequential process the loop + +- name: Prepare OSD disk(s) + command: ceph-disk prepare {{ item.1 }} {{ raw_journal_device }} + when: item.0.rc != 0 and raw_journal + ignore_errors: True + with_together: + - parted.results + - devices + +# Activate means: +# - mount the volume in a temp location +# - allocate an osd id (if needed) +# - remount in the correct location /var/lib/ceph/osd/$cluster-$id +# - start ceph-osd +# + +- name: Activate OSD(s) + command: ceph-disk activate {{ item }}1 + with_items: devices + ignore_errors: True + changed_when: False + +- name: Start and add that the OSD service to the init sequence + service: name=ceph state=started enabled=yes diff --git a/roles/osd/tasks/raw_multi_journal.yml b/roles/osd/tasks/raw_multi_journal.yml new file mode 100644 index 000000000..414a69f98 --- /dev/null +++ b/roles/osd/tasks/raw_multi_journal.yml @@ -0,0 +1,67 @@ +--- +## SCENARIO 3: N JOURNAL DEVICES FOR N OSDS +# + +- name: Install dependancies + apt: pkg=parted state=present + when: ansible_os_family == 'Debian' + +- name: Install dependancies + yum: name=parted state=present + when: ansible_os_family == 'RedHat' + +- name: Copy OSD bootstrap key + copy: src=fetch/{{ hostvars[groups['mons'][0]]['ansible_hostname'] }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600 + when: cephx + +# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1" +# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition +# it should exist we rc=0 and don't do anything unless we do something like --force +# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True" +# I believe it's safer +# + +- name: If partition named 'ceph' exists + shell: parted --script {{ item }} print | egrep -sq '^ 1.*ceph' + ignore_errors: True + with_items: devices + register: parted + changed_when: False + +# Prepare means +# - create GPT partition +# - mark the partition with the ceph type uuid +# - create a file system +# - mark the fs as ready for ceph consumption +# - entire data disk is used (one big partition) +# - a new partition is added to the journal disk (so it can be easily shared) +# + +# NOTE (leseb): the prepare process must be parallelized somehow... +# if you have 64 disks with 4TB each, this will take a while +# since Ansible will sequential process the loop + +- name: Prepare OSD disk(s) + command: ceph-disk prepare {{ item.1 }} {{ item.2 }} + when: item.0.rc != 0 and raw_multi_journal + ignore_errors: True + with_together: + - parted.results + - devices + - raw_journal_devices + +# Activate means: +# - mount the volume in a temp location +# - allocate an osd id (if needed) +# - remount in the correct location /var/lib/ceph/osd/$cluster-$id +# - start ceph-osd +# + +- name: Activate OSD(s) + command: ceph-disk activate {{ item }}1 + with_items: devices + ignore_errors: True + changed_when: False + +- name: Start and add that the OSD service to the init sequence + service: name=ceph state=started enabled=yes