Merge pull request #1461 from ceph/wip-remove-osd-directory-scenario

remove osd directory scenario
pull/1466/head
Sébastien Han 2017-04-24 10:54:54 +02:00 committed by GitHub
commit 58e7d39bcc
8 changed files with 1 additions and 91 deletions

View File

@ -142,16 +142,6 @@ dummy:
#raw_journal_devices: []
# III. Use directory instead of disk for OSDs
# Use 'true' to enable this scenario
#osd_directory: false
#osd_directories:
# - /var/lib/ceph/osd/mydir1
# - /var/lib/ceph/osd/mydir2
#osd_directories: []
# IV. This will partition disks for BlueStore
# Use 'true' to enable this scenario
#bluestore: false

View File

@ -17,13 +17,11 @@ Choose between the following scenario to configure your OSDs, **choose only one*
* `journal_collocation`
* `raw_multi_journal`
* `osd_directory`
Then:
* `devices`
* `raw_journal_devices` (**only if** you activated `raw_multi_journal`)
* `osd_directories` (**only if** you activated `osd_directory`)
# Dependencies

View File

@ -134,16 +134,6 @@ raw_multi_journal: false
raw_journal_devices: []
# III. Use directory instead of disk for OSDs
# Use 'true' to enable this scenario
osd_directory: false
#osd_directories:
# - /var/lib/ceph/osd/mydir1
# - /var/lib/ceph/osd/mydir2
osd_directories: []
# IV. This will partition disks for BlueStore
# Use 'true' to enable this scenario
bluestore: false

View File

@ -13,14 +13,12 @@
- include: ./check_devices_static.yml
when:
- not osd_auto_discovery
- not osd_directory
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./check_devices_auto.yml
when:
- osd_auto_discovery
- not osd_directory
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False

View File

@ -28,7 +28,6 @@
- not osd_containerized_deployment
- not journal_collocation
- not raw_multi_journal
- not osd_directory
- not bluestore
- not dmcrypt_journal_collocation
- not dmcrypt_dedicated_journal
@ -41,18 +40,13 @@
- osd_group_name in group_names
- not osd_containerized_deployment
- (journal_collocation and raw_multi_journal)
or (journal_collocation and osd_directory)
or (journal_collocation and bluestore)
or (raw_multi_journal and osd_directory)
or (raw_multi_journal and bluestore)
or (osd_directory and bluestore)
or (dmcrypt_journal_collocation and journal_collocation)
or (dmcrypt_journal_collocation and raw_multi_journal)
or (dmcrypt_journal_collocation and osd_directory)
or (dmcrypt_journal_collocation and bluestore)
or (dmcrypt_dedicated_journal and journal_collocation)
or (dmcrypt_dedicated_journal and raw_multi_journal)
or (dmcrypt_dedicated_journal and osd_directory)
or (dmcrypt_dedicated_journal and bluestore)
or (dmcrypt_dedicated_journal and dmcrypt_journal_collocation)
@ -77,12 +71,3 @@
- raw_journal_devices|length == 0
or devices|length == 0
- name: verify directories have been provided
fail:
msg: "please provide directories to your osd scenario"
when:
- osd_group_name is defined
- osd_group_name in group_names
- not osd_containerized_deployment
- osd_directory
- osd_directories is not defined

View File

@ -20,13 +20,6 @@
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./scenarios/osd_directory.yml
when:
- osd_directory
- not osd_containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./scenarios/bluestore.yml
when:
- osd_objectstore == 'bluestore'

View File

@ -6,7 +6,6 @@
failed_when: false
always_run: true
register: osd_path
when: not osd_directory
- name: get osd id
command: cat {{ item.stdout }}/whoami
@ -15,22 +14,12 @@
failed_when: false
always_run: true
register: osd_id_non_dir_scenario
when: not osd_directory
- name: get osd id for directory scenario
command: cat {{ item.stdout }}/whoami
with_items: "{{ osd_directories }}"
changed_when: false
failed_when: false
always_run: true
register: osd_id_dir_scenario
when: osd_directory
# NOTE (leseb): we must do this because of
# https://github.com/ansible/ansible/issues/4297
- name: combine osd_path results
set_fact:
combined_osd_id: "{{ osd_id_non_dir_scenario if not osd_directory else osd_id_dir_scenario }}"
combined_osd_id: "{{ osd_id_non_dir_scenario }}"
- name: create a ceph fragment and assemble directory
file:

View File

@ -1,33 +0,0 @@
---
## SCENARIO 4: USE A DIRECTORY INSTEAD OF A DISK FOR OSD
# NOTE (leseb): we do not check the filesystem underneath the directory
# so it is really up to you to configure this properly.
# Declaring more than one directory on the same filesystem will confuse Ceph.
- name: create osd directories
file:
path: "{{ item }}"
state: directory
owner: "ceph"
group: "ceph"
with_items: "{{ osd_directories }}"
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
- name: prepare osd directory disk(s)
command: "ceph-disk prepare --cluster {{ cluster }} {{ item }}"
with_items: "{{ osd_directories }}"
changed_when: false
when: osd_directory
- name: activate osd(s)
command: "ceph-disk activate {{ item }}"
with_items: "{{ osd_directories }}"
changed_when: false
- name: start and add osd target(s) to the systemd sequence
service:
name: ceph.target
state: started
enabled: yes