Merge pull request #1264 from ceph/clarify-osd-logs

osd: clarify osd scenario prepare sequence
pull/1276/head
Sébastien Han 2017-02-02 14:03:12 +01:00 committed by GitHub
commit 064c57babb
6 changed files with 11 additions and 11 deletions

View File

@ -9,7 +9,7 @@
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: automatic prepare osd disk(s) without partitions
- name: automatic prepare bluestore osd disk(s) without partitions
command: ceph-disk prepare --bluestore --cluster "{{ cluster }}" "/dev/{{ item.key }}"
register: prepared_osds
with_dict: "{{ ansible_devices }}"
@ -20,7 +20,7 @@
- bluestore
- osd_auto_discovery
- name: manually prepare osd disk(s)
- name: manually prepare bluestore osd disk(s)
command: ceph-disk prepare --bluestore --cluster "{{ cluster }}" "{{ item.2 }}"
with_together:
- "{{ parted_results.results }}"

View File

@ -9,7 +9,7 @@
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: prepare osd disk(s)
- name: prepare dmcrypt osd disk(s) with a dedicated journal device
command: "ceph-disk prepare --dmcrypt --cluster {{ cluster }} {{ item.2 }} {{ item.3 }}"
with_together:
- "{{ parted_results.results }}"

View File

@ -9,7 +9,7 @@
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: automatic prepare osd disk(s) without partitions (dmcrypt)
- name: automatic prepare dmcrypt osd disk(s) without partitions with collocated osd data and journal
command: ceph-disk prepare --dmcrypt --cluster "{{ cluster }}" "/dev/{{ item.key }}"
with_dict: "{{ ansible_devices }}"
when:
@ -19,7 +19,7 @@
- dmcrypt_journal_collocation
- osd_auto_discovery
- name: manually prepare osd disk(s) (dmcrypt)
- name: manually prepare dmcrypt osd disk(s) with collocated osd data and journal
command: ceph-disk prepare --dmcrypt --cluster "{{ cluster }}" "{{ item.2 }}"
with_together:
- "{{ parted_results.results }}"

View File

@ -9,7 +9,7 @@
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: automatic prepare osd disk(s) without partitions
- name: automatic prepare filestore osd disk(s) without partitions with collocated osd data and journal
command: ceph-disk prepare --cluster "{{ cluster }}" "/dev/{{ item.key }}"
register: prepared_osds
with_dict: "{{ ansible_devices }}"
@ -20,7 +20,7 @@
- journal_collocation
- osd_auto_discovery
- name: manually prepare osd disk(s)
- name: manually prepare filestore osd disk(s) with collocated osd data and journal
command: "ceph-disk prepare --cluster {{ cluster }} {{ item.2 }}"
with_together:
- "{{ parted_results.results }}"

View File

@ -15,18 +15,18 @@
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
- name: prepare OSD disk(s)
- name: prepare osd directory disk(s)
command: "ceph-disk prepare --cluster {{ cluster }} {{ item }}"
with_items: "{{ osd_directories }}"
changed_when: false
when: osd_directory
- name: activate OSD(s)
- name: activate osd(s)
command: "ceph-disk activate {{ item }}"
with_items: "{{ osd_directories }}"
changed_when: false
- name: start and add the OSD target to the systemd sequence
- name: start and add osd target(s) to the systemd sequence
service:
name: ceph.target
state: started

View File

@ -7,7 +7,7 @@
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
- name: prepare osd disk(s)
- name: prepare filestore osd disk(s) with a dedicated journal device
command: "ceph-disk prepare --cluster {{ cluster }} {{ item.1 }} {{ item.2 }}"
with_together:
- "{{ parted_results.results }}"