Merge pull request #1704 from ceph/filestore

osd: refactor osd scenarios
pull/1705/head
Guillaume Abrioux 2017-07-24 14:57:54 +02:00 committed by GitHub
commit 37f73cafa4
5 changed files with 83 additions and 137 deletions

View File

@ -8,7 +8,7 @@
- include: ./scenarios/journal_collocation.yml
when:
- journal_collocation
- (journal_collocation or dmcrypt_journal_collocation)
- not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
@ -20,13 +20,6 @@
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./scenarios/dmcrypt-journal-collocation.yml
when:
- dmcrypt_journal_collocation
- not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./scenarios/dmcrypt-dedicated-journal.yml
when:
- dmcrypt_dedicated_journal

View File

@ -3,14 +3,32 @@
- include: ../check_devices.yml
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt"
when:
- osd_objectstore == 'bluestore'
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt"
when:
- osd_objectstore == 'filestore'
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
when:
- osd_objectstore == 'filestore'
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: prepare dmcrypt osd disk(s) with a dedicated journal device (filestore)
command: "ceph-disk prepare --dmcrypt --cluster {{ cluster }} {{ item.2 }} {{ item.3 }}"
- name: prepare dmcrypt osd disk(s) with a dedicated journal device on "{{ osd_objectstore }}"
command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }} {{ item.3 }}"
with_together:
- "{{ parted_results.results }}"
- "{{ ispartition_results.results }}"
@ -23,24 +41,5 @@
- item.0.get("rc", 0) != 0
- item.1.get("rc", 0) != 0
- not osd_auto_discovery
- osd_objectstore == 'filestore'
- dmcrypt_dedicated_journal
- name: prepare dmcrypt osd disk(s) with a dedicated journal device (bluestore)
command: "ceph-disk prepare --bluestore --dmcrypt --cluster {{ cluster }} {{ item.2 }} {{ item.3 }}"
with_together:
- "{{ parted_results.results }}"
- "{{ ispartition_results.results }}"
- "{{ devices }}"
- "{{ raw_journal_devices }}"
changed_when: false
when:
- not item.0.get("skipped")
- not item.1.get("skipped")
- item.0.get("rc", 0) != 0
- item.1.get("rc", 0) != 0
- not osd_auto_discovery
- osd_objectstore == 'bluestore'
- dmcrypt_dedicated_journal
- include: ../activate_osds.yml

View File

@ -1,66 +0,0 @@
---
## SCENARIO 5: DMCRYPT
- include: ../check_devices.yml
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: automatic prepare dmcrypt osd disk(s) without partitions with collocated osd data and journal (filestore)
command: ceph-disk prepare --dmcrypt --cluster "{{ cluster }}" "/dev/{{ item.key }}"
with_dict: "{{ ansible_devices }}"
when:
- ansible_devices is defined
- item.value.removable == "0"
- item.value.partitions|count == 0
- item.value.holders|count == 0
- dmcrypt_journal_collocation
- osd_objectstore == 'filestore'
- osd_auto_discovery
- name: automatic prepare dmcrypt osd disk(s) without partitions with collocated osd data and journal (bluestore)
command: ceph-disk prepare --bluestore --dmcrypt --cluster "{{ cluster }}" "/dev/{{ item.key }}"
with_dict: "{{ ansible_devices }}"
when:
- ansible_devices is defined
- item.value.removable == "0"
- item.value.partitions|count == 0
- item.value.holders|count == 0
- dmcrypt_journal_collocation
- osd_objectstore == 'bluestore'
- osd_auto_discovery
- name: manually prepare dmcrypt osd disk(s) with collocated osd data and journal (filestore)
command: ceph-disk prepare --dmcrypt --cluster "{{ cluster }}" "{{ item.2 }}"
with_together:
- "{{ parted_results.results }}"
- "{{ ispartition_results.results }}"
- "{{ devices }}"
when:
- not item.0.get("skipped")
- not item.1.get("skipped")
- item.0.get("rc", 0) != 0
- item.1.get("rc", 0) != 0
- dmcrypt_journal_collocation
- osd_objectstore == 'filestore'
- not osd_auto_discovery
- name: manually prepare dmcrypt osd disk(s) with collocated osd data and journal (bluestore)
command: ceph-disk prepare --bluestore --dmcrypt --cluster "{{ cluster }}" "{{ item.2 }}"
with_together:
- "{{ parted_results.results }}"
- "{{ ispartition_results.results }}"
- "{{ devices }}"
when:
- not item.0.get("skipped")
- not item.1.get("skipped")
- item.0.get("rc", 0) != 0
- item.1.get("rc", 0) != 0
- dmcrypt_journal_collocation
- osd_objectstore == 'bluestore'
- not osd_auto_discovery
- include: ../activate_osds.yml

View File

@ -7,10 +7,52 @@
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore"
when:
- osd_objectstore == 'bluestore'
- journal_collocation
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --filestore"
when:
- osd_objectstore == 'filestore'
- journal_collocation
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }}"
when:
- osd_objectstore == 'filestore'
- journal_collocation
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt"
when:
- osd_objectstore == 'bluestore'
- dmcrypt_journal_collocation
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt"
when:
- osd_objectstore == 'filestore'
- dmcrypt_journal_collocation
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
when:
- osd_objectstore == 'filestore'
- dmcrypt_journal_collocation
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: automatic prepare filestore osd disk(s) without partitions with collocated osd data and journal
command: ceph-disk prepare --cluster "{{ cluster }}" "/dev/{{ item.key }}"
- name: automatic prepare "{{ osd_objectstore }}" osd disk(s) without partitions with collocated osd data and journal
command: "ceph-disk prepare {{ ceph_disk_cli_options }} /dev/{{ item.key }}"
register: prepared_osds
with_dict: "{{ ansible_devices }}"
when:
@ -18,12 +60,10 @@
- item.value.removable == "0"
- item.value.partitions|count == 0
- item.value.holders|count == 0
- journal_collocation
- osd_objectstore == 'filestore'
- osd_auto_discovery
- name: manually prepare filestore osd disk(s) with collocated osd data and journal
command: "ceph-disk prepare --cluster {{ cluster }} {{ item.2 }}"
- name: manually prepare "{{ osd_objectstore }}" osd disk(s) with collocated osd data and journal
command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }}"
with_together:
- "{{ parted_results.results }}"
- "{{ ispartition_results.results }}"
@ -33,36 +73,6 @@
- not item.1.get("skipped")
- item.0.get("rc", 0) != 0
- item.1.get("rc", 0) != 0
- journal_collocation
- osd_objectstore == 'filestore'
- not osd_auto_discovery
- name: automatic prepare bluestore osd disk(s) without partitions
command: ceph-disk prepare --bluestore --cluster "{{ cluster }}" "/dev/{{ item.key }}"
register: prepared_osds
with_dict: "{{ ansible_devices }}"
when:
- ansible_devices is defined
- item.value.removable == "0"
- item.value.partitions|count == 0
- item.value.holders|count == 0
- journal_collocation
- osd_objectstore == 'bluestore'
- osd_auto_discovery
- name: manually prepare bluestore osd disk(s)
command: ceph-disk prepare --bluestore --cluster "{{ cluster }}" "{{ item.2 }}"
with_together:
- "{{ parted_results.results }}"
- "{{ ispartition_results.results }}"
- "{{ devices }}"
when:
- not item.0.get("skipped")
- not item.1.get("skipped")
- item.0.get("rc", 0) != 0
- item.1.get("rc", 0) != 0
- journal_collocation
- osd_objectstore == 'bluestore'
- not osd_auto_discovery
- include: ../activate_osds.yml

View File

@ -8,13 +8,25 @@
# since Ansible will sequential process the loop
- set_fact:
osd_type: "--filestore"
when:
ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore"
when:
- osd_objectstore == 'bluestore'
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --filestore"
when:
- osd_objectstore == 'filestore'
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }}"
when:
- osd_objectstore == 'filestore'
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- name: prepare filestore osd disk(s) with a dedicated journal device
command: "ceph-disk prepare {{ osd_type | default('') }} --cluster {{ cluster }} {{ item.1 }} {{ item.2 }}"
command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.1 }} {{ item.2 }}"
with_together:
- "{{ parted_results.results }}"
- "{{ devices }}"
@ -22,12 +34,11 @@
changed_when: false
when:
- item.0.get("skipped") or item.0.get("rc", 0) != 0
- raw_multi_journal
- osd_objectstore == 'filestore'
- not osd_auto_discovery
- name: manually prepare bluestore osd disk(s) with a dedicated device for db and wal
command: "ceph-disk prepare --bluestore --cluster {{ cluster }} --block.db {{ item.1 }} --block.wal {{ item.2 }} {{ item.3 }}"
command: "ceph-disk prepare {{ ceph_disk_cli_options }} --block.db {{ item.1 }} --block.wal {{ item.2 }} {{ item.3 }}"
with_together:
- "{{ parted_results.results }}"
- "{{ raw_journal_devices }}"
@ -36,7 +47,6 @@
when:
- not item.0.get("skipped")
- item.0.get("rc", 0) != 0
- raw_multi_journal
- osd_objectstore == 'bluestore'
- not osd_auto_discovery