Merge pull request #1724 from ceph/container-multi-journal

osd: allow multi dedicated journals for containers
pull/1804/merge
Sébastien Han 2017-08-30 17:41:42 +02:00 committed by GitHub
commit b05271f464
27 changed files with 341 additions and 267 deletions

1
Vagrantfile vendored
View File

@ -77,7 +77,6 @@ ansible_provision = proc do |ansible|
containerized_deployment: 'true', containerized_deployment: 'true',
monitor_interface: ETH, monitor_interface: ETH,
ceph_mon_docker_subnet: "#{PUBLIC_SUBNET}.0/24", ceph_mon_docker_subnet: "#{PUBLIC_SUBNET}.0/24",
ceph_osd_docker_devices: settings['disks'],
devices: settings['disks'], devices: settings['disks'],
ceph_docker_on_openstack: BOX == 'openstack', ceph_docker_on_openstack: BOX == 'openstack',
ceph_rgw_civetweb_port: 8080, ceph_rgw_civetweb_port: 8080,

View File

@ -227,40 +227,22 @@ dummy:
#ceph_config_keys: [] # DON'T TOUCH ME #ceph_config_keys: [] # DON'T TOUCH ME
# PREPARE DEVICE # PREPARE DEVICE
# Make sure you only pass a single device to dedicated_devices, otherwise this will fail horribly.
# This is why we use [0] in the example.
# #
# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above # WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
# #
# Examples:
# Journal collocated on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_FILESTORE=1
# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
# Encrypted OSD on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
#
# Bluestore OSD collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1
# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }}
# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }}
# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
#
#
#ceph_osd_docker_devices: "{{ devices }}" #ceph_osd_docker_devices: "{{ devices }}"
#ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 #ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}
# ACTIVATE DEVICE # ACTIVATE DEVICE
# Examples:
# Journal collocated or Dedicated journal on Filesore: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FILESTORE=1
# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
# Bluestore OSD: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
# #
#ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} #ceph_osd_docker_extra_env:
#ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command #ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
########### ###########
# SYSTEMD # # SYSTEMD #
########### ###########
# ceph_osd_systemd_overrides will override the systemd settings # ceph_osd_systemd_overrides will override the systemd settings
# for the ceph-osd services. # for the ceph-osd services.
# For example,to set "PrivateDevices=false" you can specify: # For example,to set "PrivateDevices=false" you can specify:

View File

@ -240,7 +240,7 @@
name: "ceph-osd@{{ item | basename }}" name: "ceph-osd@{{ item | basename }}"
state: stopped state: stopped
enabled: no enabled: no
with_items: "{{ ceph_osd_docker_devices }}" with_items: "{{ devices }}"
ignore_errors: true ignore_errors: true
- name: remove ceph osd prepare container - name: remove ceph osd prepare container
@ -248,7 +248,7 @@
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
name: "ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}" name: "ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}"
state: absent state: absent
with_items: "{{ ceph_osd_docker_devices }}" with_items: "{{ devices }}"
ignore_errors: true ignore_errors: true
- name: remove ceph osd container - name: remove ceph osd container
@ -256,7 +256,7 @@
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
name: "ceph-osd-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}" name: "ceph-osd-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}"
state: absent state: absent
with_items: "{{ ceph_osd_docker_devices }}" with_items: "{{ devices }}"
ignore_errors: true ignore_errors: true
- name: zap ceph osd disks - name: zap ceph osd disks
@ -269,7 +269,7 @@
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
zap_device zap_device
with_items: with_items:
- "{{ ceph_osd_docker_devices }}" - "{{ devices }}"
- "{{ dedicated_devices|default([]) }}" - "{{ dedicated_devices|default([]) }}"
- name: wait until the zap containers die - name: wait until the zap containers die
@ -287,7 +287,7 @@
name: "ceph-osd-zap-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}" name: "ceph-osd-zap-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}"
state: absent state: absent
with_items: with_items:
- "{{ ceph_osd_docker_devices }}" - "{{ devices }}"
- "{{ dedicated_devices|default([]) }}" - "{{ dedicated_devices|default([]) }}"
- name: remove ceph osd service - name: remove ceph osd service

View File

@ -267,7 +267,7 @@
name: ceph-osd@{{ item | basename }} name: ceph-osd@{{ item | basename }}
state: restarted state: restarted
enabled: yes enabled: yes
with_items: "{{ ceph_osd_docker_devices }}" with_items: "{{ devices }}"
when: when:
- ansible_service_mgr == 'systemd' - ansible_service_mgr == 'systemd'
- containerized_deployment - containerized_deployment

View File

@ -178,7 +178,7 @@
- name: collect osd devices - name: collect osd devices
shell: | shell: |
blkid | awk '/ceph data/ { sub ("1:", "", $1); print $1 }' blkid | awk '/ceph data/ { sub ("1:", "", $1); print $1 }'
register: ceph_osd_docker_devices register: devices
changed_when: false changed_when: false
- name: stop non-containerized ceph osd(s) - name: stop non-containerized ceph osd(s)
@ -223,7 +223,7 @@
docker ps | grep -sq {{ item | regex_replace('/', '') }} docker ps | grep -sq {{ item | regex_replace('/', '') }}
changed_when: false changed_when: false
failed_when: false failed_when: false
with_items: "{{ ceph_osd_docker_devices.stdout_lines }}" with_items: "{{ devices.stdout_lines }}"
register: osd_running register: osd_running
- name: unmount all the osd directories - name: unmount all the osd directories
@ -231,12 +231,12 @@
changed_when: false changed_when: false
failed_when: false failed_when: false
with_together: with_together:
- "{{ ceph_osd_docker_devices.stdout_lines }}" - "{{ devices.stdout_lines }}"
- "{{ osd_running.results }}" - "{{ osd_running.results }}"
when: when:
- item.1.get("rc", 0) != 0 - item.1.get("rc", 0) != 0
- set_fact: ceph_osd_docker_devices={{ ceph_osd_docker_devices.stdout_lines }} - set_fact: devices={{ devices.stdout_lines }}
roles: roles:
- ceph-defaults - ceph-defaults

View File

@ -11,5 +11,5 @@
changed_when: false changed_when: false
with_together: with_together:
- "{{ ceph_config_keys }}" - "{{ ceph_config_keys }}"
- "{{ statconfig.results }}" - "{{ statconfig.results | default([]) }}"
when: item.1.stat.exists == true when: item.1.stat.exists == true

View File

@ -219,40 +219,22 @@ lvm_volumes: []
ceph_config_keys: [] # DON'T TOUCH ME ceph_config_keys: [] # DON'T TOUCH ME
# PREPARE DEVICE # PREPARE DEVICE
# Make sure you only pass a single device to dedicated_devices, otherwise this will fail horribly.
# This is why we use [0] in the example.
# #
# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above # WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
# #
# Examples:
# Journal collocated on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_FILESTORE=1
# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
# Encrypted OSD on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
#
# Bluestore OSD collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1
# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }}
# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }}
# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
#
#
ceph_osd_docker_devices: "{{ devices }}" ceph_osd_docker_devices: "{{ devices }}"
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}
# ACTIVATE DEVICE # ACTIVATE DEVICE
# Examples:
# Journal collocated or Dedicated journal on Filesore: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FILESTORE=1
# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
# Bluestore OSD: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
# #
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} ceph_osd_docker_extra_env:
ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
########### ###########
# SYSTEMD # # SYSTEMD #
########### ###########
# ceph_osd_systemd_overrides will override the systemd settings # ceph_osd_systemd_overrides will override the systemd settings
# for the ceph-osd services. # for the ceph-osd services.
# For example,to set "PrivateDevices=false" you can specify: # For example,to set "PrivateDevices=false" you can specify:

View File

@ -0,0 +1,81 @@
---
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore"
when:
- osd_objectstore == 'bluestore'
- not dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- not containerized_deployment
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --filestore"
when:
- osd_objectstore == 'filestore'
- not dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- not containerized_deployment
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }}"
when:
- osd_objectstore == 'filestore'
- not dmcrypt
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- not containerized_deployment
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt"
when:
- osd_objectstore == 'bluestore'
- dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- not containerized_deployment
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt"
when:
- osd_objectstore == 'filestore'
- dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- not containerized_deployment
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
when:
- osd_objectstore == 'filestore'
- dmcrypt
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- not containerized_deployment
- set_fact:
docker_env_args: -e KV_TYPE={{ kv_type }} -e KV_IP={{ kv_endpoint }} -e KV_PORT={{ kv_port }}
when:
- containerized_deployment_with_kv
- set_fact:
docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=0
when:
- containerized_deployment
- osd_objectstore == 'filestore'
- not dmcrypt
- set_fact:
docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=1
when:
- containerized_deployment
- osd_objectstore == 'filestore'
- dmcrypt
- set_fact:
docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0
when:
- containerized_deployment
- osd_objectstore == 'bluestore'
- not dmcrypt
- set_fact:
docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=1
when:
- containerized_deployment
- osd_objectstore == 'bluestore'
- dmcrypt

View File

@ -40,10 +40,10 @@
always_run: true always_run: true
register: journal_partition_status register: journal_partition_status
when: when:
- osd_scenario == 'non-collocated'
- item.0.rc != 0 - item.0.rc != 0
- osd_scenario == 'non-collocated'
- name: fix partitions gpt header or labels of the journal devices - name: fix partitions gpt header or labels of the journal device(s)
shell: "sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }} || sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }}" shell: "sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }} || sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }}"
with_together: with_together:
- "{{ journal_partition_status.results }}" - "{{ journal_partition_status.results }}"
@ -51,5 +51,18 @@
changed_when: false changed_when: false
when: when:
- not item.0.get("skipped") - not item.0.get("skipped")
- osd_scenario == 'non-collocated'
- item.0.get("rc", 0) != 0 - item.0.get("rc", 0) != 0
- osd_scenario == 'non-collocated'
- not containerized_deployment
- name: create gpt disk label of the journal device(s)
command: parted --script {{ item.1 }} mklabel gpt
with_together:
- "{{ osd_partition_status_results.results }}"
- "{{ dedicated_devices|unique }}"
changed_when: false
when:
- not item.0.get("skipped")
- item.0.get("rc", 0) != 0
- osd_scenario == 'non-collocated'
- containerized_deployment

View File

@ -54,10 +54,27 @@
changed_when: false changed_when: false
when: when:
- ansible_devices is defined - ansible_devices is defined
- item.0.item.value.removable == 0 - not item.0.get("skipped")
- item.0.item.value.partitions|count == 0 - item.0.get("rc", 0) != 0
- item.0.item.value.holders|count == 0 - item.1.value.removable == 0
- item.0.rc != 0 - item.1.value.partitions|count == 0
- item.1.value.holders|count == 0
- not containerized_deployment
- name: create gpt disk label
command: parted --script {{ item.1 }} mklabel gpt
with_together:
- "{{ osd_partition_status_results.results }}"
- "{{ ansible_devices }}"
changed_when: false
when:
- ansible_devices is defined
- not item.0.get("skipped")
- item.0.get("rc", 0) != 0
- item.1.value.removable == 0
- item.1.value.partitions|count == 0
- item.1.value.holders|count == 0
- containerized_deployment
- name: check if a partition named 'ceph' exists (autodiscover disks) - name: check if a partition named 'ceph' exists (autodiscover disks)
shell: "parted --script /dev/{{ item.key }} print | egrep -sq '^ 1.*ceph'" shell: "parted --script /dev/{{ item.key }} print | egrep -sq '^ 1.*ceph'"

View File

@ -32,6 +32,18 @@
when: when:
- not item.0.get("skipped") - not item.0.get("skipped")
- item.0.get("rc", 0) != 0 - item.0.get("rc", 0) != 0
- not containerized_deployment
- name: create gpt disk label
command: parted --script {{ item.1 }} mklabel gpt
with_together:
- "{{ osd_partition_status_results.results }}"
- "{{ devices }}"
changed_when: false
when:
- not item.0.get("skipped")
- item.0.get("rc", 0) != 0
- containerized_deployment
- name: check if a partition named 'ceph' exists - name: check if a partition named 'ceph' exists
shell: "parted --script {{ item.1 }} print | egrep -sq '^ 1.*ceph'" shell: "parted --script {{ item.1 }} print | egrep -sq '^ 1.*ceph'"

View File

@ -9,62 +9,6 @@
state: unmounted state: unmounted
when: ceph_docker_on_openstack when: ceph_docker_on_openstack
- name: verify if the disk was already prepared
shell: "lsblk -o PARTLABEL {{ item }} | grep -sq 'ceph'"
failed_when: false
always_run: true
with_items: "{{ ceph_osd_docker_devices }}"
register: osd_prepared
# use shell rather than docker module
# to ensure osd disk prepare finishes before
# starting the next task
- name: prepare ceph osd disk
shell: |
docker run --net=host \
--pid=host \
--privileged=true \
--name="ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item.0 | regex_replace('/', '') }}" \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /dev:/dev \
-v /etc/localtime:/etc/localtime:ro \
-e "OSD_DEVICE={{ item.0 }}" \
-e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
{{ ceph_osd_docker_prepare_env }} \
"{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
with_together:
- "{{ ceph_osd_docker_devices }}"
- "{{ osd_prepared.results }}"
when:
- item.1.get("rc", 0) != 0
- ceph_osd_docker_prepare_env is defined
- not containerized_deployment_with_kv
- name: prepare ceph osd disk with kv_store
shell: |
docker run --net=host \
--pid=host \
--privileged=true \
--name="ceph-osd-prepare-{{ ansible_hostname }}-dev-{{ item.0 | regex_replace('/', '') }}" \
-v /dev:/dev \
-v /etc/localtime:/etc/localtime:ro \
-e "OSD_DEVICE={{ item.0 }}" \
-e "{{ ceph_osd_docker_prepare_env }}" \
-e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
-e KV_TYPE={{kv_type}} \
-e KV_IP={{kv_endpoint}} \
-e KV_PORT={{kv_port}} \
{{ ceph_osd_docker_prepare_env }} \
"{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" \
with_together:
- "{{ ceph_osd_docker_devices }}"
- "{{ osd_prepared.results }}"
when:
- item.1.get("rc", 0) != 0
- ceph_osd_docker_prepare_env is defined
- containerized_deployment_with_kv
- name: generate ceph osd docker run script - name: generate ceph osd docker run script
become: true become: true
template: template:
@ -86,7 +30,7 @@
- name: enable systemd unit file for osd instance - name: enable systemd unit file for osd instance
shell: systemctl enable ceph-osd@{{ item | basename }}.service shell: systemctl enable ceph-osd@{{ item | basename }}.service
changed_when: false changed_when: false
with_items: "{{ ceph_osd_docker_devices }}" with_items: "{{ devices }}"
- name: reload systemd unit files - name: reload systemd unit files
shell: systemctl daemon-reload shell: systemctl daemon-reload
@ -98,4 +42,4 @@
state: started state: started
enabled: yes enabled: yes
changed_when: false changed_when: false
with_items: "{{ ceph_osd_docker_devices }}" with_items: "{{ devices }}"

View File

@ -6,17 +6,19 @@
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False static: False
- include: ceph_disk_cli_options_facts.yml
- include: check_devices.yml
- include: ./scenarios/collocated.yml - include: ./scenarios/collocated.yml
when: when:
- osd_scenario == 'collocated' - osd_scenario == 'collocated'
- not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False static: False
- include: ./scenarios/non-collocated.yml - include: ./scenarios/non-collocated.yml
when: when:
- osd_scenario == 'non-collocated' - osd_scenario == 'non-collocated'
- not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False static: False

View File

@ -1,63 +1,59 @@
--- ---
## SCENARIO 1: JOURNAL AND OSD_DATA ON THE SAME DEVICE # use shell rather than docker module
# to ensure osd disk prepare finishes before
- include: ../check_devices.yml # starting the next task
- name: prepare ceph containerized osd disk collocated
# NOTE (leseb): the prepare process must be parallelized somehow... shell: |
# if you have 64 disks with 4TB each, this will take a while docker run --net=host \
# since Ansible will sequential process the loop --pid=host \
--privileged=true \
- set_fact: --name=ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item.1 | regex_replace('/', '') }} \
ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore" -v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /dev:/dev \
-v /etc/localtime:/etc/localtime:ro \
-e CLUSTER={{ cluster }} \
-e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
-e OSD_DEVICE={{ item.1 }} \
{{ docker_env_args }} \
{{ ceph_osd_docker_prepare_env }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
with_together:
- "{{ parted_results.results }}"
- "{{ devices }}"
when: when:
- osd_objectstore == 'bluestore' - not item.0.get("skipped")
- osd_scenario == 'collocated' - not osd_auto_discovery
- not dmcrypt - containerized_deployment
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact: - name: automatic prepare ceph containerized osd disk collocated
ceph_disk_cli_options: "--cluster {{ cluster }} --filestore" shell: |
docker run --net=host \
--pid=host \
--privileged=true \
--name=ceph-osd-prepare-{{ ansible_hostname }}-devdev{{ item.key }} \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /dev:/dev \
-v /etc/localtime:/etc/localtime:ro \
-e CLUSTER={{ cluster }} \
-e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
-e OSD_DEVICE=/dev/{{ item.key }} \
{{ docker_env_args }} \
{{ ceph_osd_docker_prepare_env }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
with_dict: "{{ ansible_devices }}"
when: when:
- osd_objectstore == 'filestore' - ansible_devices is defined
- osd_scenario == 'collocated' - item.value.removable == "0"
- not dmcrypt - item.value.partitions|count == 0
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous - item.value.holders|count == 0
- osd_auto_discovery
- set_fact: - containerized_deployment
ceph_disk_cli_options: "--cluster {{ cluster }}"
when:
- osd_objectstore == 'filestore'
- osd_scenario == 'collocated'
- not dmcrypt
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt"
when:
- osd_objectstore == 'bluestore'
- osd_scenario == 'collocated'
- dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt"
when:
- osd_objectstore == 'filestore'
- osd_scenario == 'collocated'
- dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
when:
- osd_objectstore == 'filestore'
- osd_scenario == 'collocated'
- dmcrypt
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
# NOTE (alahouze): if the device is a partition, the parted command below has # NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too. # failed, this is why we check if the device is a partition too.
- name: automatic prepare "{{ osd_objectstore }}" osd disk(s) without partitions with collocated osd data and journal - name: automatic prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) without partitions with collocated osd data and journal
command: "ceph-disk prepare {{ ceph_disk_cli_options }} /dev/{{ item.key }}" command: "ceph-disk prepare {{ ceph_disk_cli_options }} /dev/{{ item.key }}"
register: prepared_osds register: prepared_osds
with_dict: "{{ ansible_devices }}" with_dict: "{{ ansible_devices }}"
@ -67,8 +63,9 @@
- item.value.partitions|count == 0 - item.value.partitions|count == 0
- item.value.holders|count == 0 - item.value.holders|count == 0
- osd_auto_discovery - osd_auto_discovery
- not containerized_deployment
- name: manually prepare "{{ osd_objectstore }}" osd disk(s) with collocated osd data and journal - name: manually prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) with collocated osd data and journal
command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }}" command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }}"
with_together: with_together:
- "{{ parted_results.results }}" - "{{ parted_results.results }}"
@ -80,5 +77,6 @@
- item.0.get("rc", 0) != 0 - item.0.get("rc", 0) != 0
- item.1.get("rc", 0) != 0 - item.1.get("rc", 0) != 0
- not osd_auto_discovery - not osd_auto_discovery
- not containerized_deployment
- include: ../activate_osds.yml - include: ../activate_osds.yml

View File

@ -1,53 +1,64 @@
--- ---
- include: ../check_devices.yml # use shell rather than docker module
# to ensure osd disk prepare finishes before
# NOTE (leseb): the prepare process must be parallelized somehow... # starting the next task
# if you have 64 disks with 4TB each, this will take a while - name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated
# since Ansible will sequential process the loop shell: |
docker run --net=host \
- set_fact: --pid=host \
ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore" --privileged=true \
--name=ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item.1 | regex_replace('/', '') }} \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /dev:/dev \
-v /etc/localtime:/etc/localtime:ro \
-e CLUSTER={{ cluster }} \
-e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
-e OSD_DEVICE={{ item.1 }} \
-e OSD_JOURNAL={{ item.2 }} \
{{ docker_env_args }} \
{{ ceph_osd_docker_prepare_env }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
with_together:
- "{{ parted_results.results }}"
- "{{ devices }}"
- "{{ dedicated_devices }}"
when: when:
- not item.0.get("skipped")
- not osd_auto_discovery
- containerized_deployment
- osd_objectstore == 'filestore'
- name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated with a dedicated device for db and wal
shell: |
docker run --net=host \
--pid=host \
--privileged=true \
--name=ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item.1 | regex_replace('/', '') }} \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /dev:/dev \
-v /etc/localtime:/etc/localtime:ro \
-e CLUSTER={{ cluster }} \
-e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
-e OSD_DEVICE={{ item.1 }} \
-e OSD_BLUESTORE_BLOCK_DB={{ item.2 }} \
-e OSD_BLUESTORE_BLOCK_WAL={{ item.3 }} \
{{ docker_env_args }} \
{{ ceph_osd_docker_prepare_env }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
with_together:
- "{{ parted_results.results }}"
- "{{ devices }}"
- "{{ dedicated_devices }}"
- "{{ bluestore_wal_devices }}"
when:
- not item.0.get("skipped")
- not osd_auto_discovery
- containerized_deployment
- osd_objectstore == 'bluestore' - osd_objectstore == 'bluestore'
- not dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact: - name: prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) non-collocated
ceph_disk_cli_options: "--cluster {{ cluster }} --filestore"
when:
- osd_objectstore == 'filestore'
- not dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }}"
when:
- osd_objectstore == 'filestore'
- not dmcrypt
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt"
when:
- osd_objectstore == 'bluestore'
- dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt"
when:
- osd_objectstore == 'filestore'
- dmcrypt
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
when:
- osd_objectstore == 'filestore'
- dmcrypt
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- name: prepare filestore osd disk(s) non-collocated
command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }} {{ item.3 }}" command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }} {{ item.3 }}"
with_together: with_together:
- "{{ parted_results.results }}" - "{{ parted_results.results }}"
@ -60,9 +71,9 @@
- not item.1.get("skipped") - not item.1.get("skipped")
- item.1.get("rc", 0) != 0 - item.1.get("rc", 0) != 0
- osd_objectstore == 'filestore' - osd_objectstore == 'filestore'
- not osd_auto_discovery - not containerized_deployment
- name: manually prepare "{{ osd_objectstore }}" osd disk(s) with a dedicated device for db and wal - name: manually prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) with a dedicated device for db and wal
command: "ceph-disk prepare {{ ceph_disk_cli_options }} --block.db {{ item.1 }} --block.wal {{ item.2 }} {{ item.3 }}" command: "ceph-disk prepare {{ ceph_disk_cli_options }} --block.db {{ item.1 }} --block.wal {{ item.2 }} {{ item.3 }}"
with_together: with_together:
- "{{ parted_results.results }}" - "{{ parted_results.results }}"
@ -73,6 +84,6 @@
- not item.0.get("skipped") - not item.0.get("skipped")
- item.0.get("rc", 0) != 0 - item.0.get("rc", 0) != 0
- osd_objectstore == 'bluestore' - osd_objectstore == 'bluestore'
- not osd_auto_discovery - not containerized_deployment
- include: ../activate_osds.yml - include: ../activate_osds.yml

View File

@ -2,22 +2,47 @@
# {{ ansible_managed }} # {{ ansible_managed }}
#############
# VARIABLES #
#############
REGEX="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
############# #############
# FUNCTIONS # # FUNCTIONS #
############# #############
function create_dev_list {
local regex function expose_devices {
local disks local disks
regex="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
# we use the prepare container to find the partitions to expose # we use the prepare container to find the partitions to expose
disks=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep -Eo /dev/disk/by-partuuid/${regex} | uniq) disks=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
for disk in $disks; do for disk in $disks; do
DEVICES="--device $disk " DEVICES="--device=$disk "
done done
} }
create_dev_list $1 function expose_partitions {
local partition
for partition in Block.wal Block.db Journal; do
if docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep -Eo "$partition is GPT partition"; then
if [[ "$partition" == "Block.wal" ]]; then
part=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep "$partition is GPT partition" | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
DOCKER_ENV="$DOCKER_ENV -e OSD_BLUESTORE_BLOCK_WAL=$part"
elif [[ "$partition" == "Block.db" ]]; then
part=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep "$partition is GPT partition" | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
DOCKER_ENV="$DOCKER_ENV -e OSD_BLUESTORE_BLOCK_DB=$part"
elif [[ "$partition" == "Journal" ]]; then
part=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep "$partition is GPT partition" | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
DOCKER_ENV="$DOCKER_ENV -e OSD_JOURNAL=$part"
fi
fi
done
}
#expose_devices $1
expose_partitions $1
######## ########
@ -28,29 +53,40 @@ create_dev_list $1
--rm \ --rm \
--net=host \ --net=host \
--privileged=true \ --privileged=true \
--pid=host \
-v /dev:/dev \
-v /etc/localtime:/etc/localtime:ro \
-v /var/lib/ceph:/var/lib/ceph \
-v /etc/ceph:/etc/ceph \
$DOCKER_ENV \
{% if ansible_distribution == 'Ubuntu' -%} {% if ansible_distribution == 'Ubuntu' -%}
--security-opt apparmor:unconfined \ --security-opt apparmor:unconfined \
{% endif -%} {% endif -%}
--pid=host \
{% if not containerized_deployment_with_kv -%} {% if not containerized_deployment_with_kv -%}
-v /var/lib/ceph:/var/lib/ceph \
-v /etc/ceph:/etc/ceph \
{% else -%} {% else -%}
-e KV_TYPE={{kv_type}} \ -e KV_TYPE={{ kv_type }} \
-e KV_IP={{kv_endpoint}} \ -e KV_IP={{ kv_endpoint }} \
-e KV_PORT={{kv_port}} \ -e KV_PORT={{ kv_port }} \
{% endif -%} {% endif -%}
-v /dev:/dev \ {% if osd_objectstore == 'filestore' and not dmcrypt -%}
-v /etc/localtime:/etc/localtime:ro \ -e OSD_FILESTORE=1 \
--device=/dev/${1} \ -e OSD_DMCRYPT=0 \
--device=/dev/${1}1 \
{% if dedicated_devices|length > 0 -%}
-e OSD_JOURNAL={{ dedicated_devices[0] }} \
{% else -%}
--device=/dev/${1}2 \
{% endif -%} {% endif -%}
{% if osd_objectstore == 'filestore' and dmcrypt -%}
-e OSD_FILESTORE=1 \
-e OSD_DMCRYPT=1 \
{% endif -%}
{% if osd_objectstore == 'bluestore' and not dmcrypt -%}
-e OSD_BLUESTORE=1 \
-e OSD_DMCRYPT=0 \
{% endif -%}
{% if osd_objectstore == 'bluestore' and dmcrypt -%}
-e OSD_BLUESTORE=1 \
-e OSD_DMCRYPT=1 \
{% endif -%}
-e CLUSTER={{ cluster }} \
-e OSD_DEVICE=/dev/${1} \ -e OSD_DEVICE=/dev/${1} \
-e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
{{ ceph_osd_docker_extra_env }} \ {{ ceph_osd_docker_extra_env }} \
--name=ceph-osd-{{ ansible_hostname }}-dev${1} \ --name=ceph-osd-{{ ansible_hostname }}-dev${1} \
-e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}

View File

@ -8,7 +8,7 @@ monitor_interface: eth1
radosgw_interface: eth1 radosgw_interface: eth1
osd_scenario: collocated osd_scenario: collocated
dmcrypt: true dmcrypt: true
osd_objectstore: "bluestore" osd_objectstore: bluestore
devices: devices:
- '/dev/sda' - '/dev/sda'
- '/dev/sdb' - '/dev/sdb'

View File

@ -10,7 +10,7 @@ monitor_interface: eth1
radosgw_interface: eth1 radosgw_interface: eth1
osd_scenario: collocated osd_scenario: collocated
dmcrypt: true dmcrypt: true
osd_objectstore: "bluestore" osd_objectstore: bluestore
devices: devices:
- '/dev/sda' - '/dev/sda'
- '/dev/sdb' - '/dev/sdb'
@ -24,5 +24,4 @@ ceph_conf_overrides:
osd: osd:
bluestore block db size = 67108864 bluestore block db size = 67108864
bluestore block wal size = 1048576000 bluestore block wal size = 1048576000
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1

View File

@ -14,11 +14,11 @@ ceph_docker_on_openstack: False
public_network: "192.168.15.0/24" public_network: "192.168.15.0/24"
cluster_network: "192.168.16.0/24" cluster_network: "192.168.16.0/24"
ceph_rgw_civetweb_port: 8080 ceph_rgw_civetweb_port: 8080
ceph_osd_docker_devices: "{{ devices }}" osd_scenario: non-collocated
osd_objectstore: bluestore
devices: devices:
- /dev/sda - /dev/sda
dedicated_devices: dedicated_devices:
- /dev/sdb - /dev/sdb
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE=1 ceph_osd_docker_prepare_env: -e OSD_FORCE_ZAP=1
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
ceph_osd_docker_run_script_path: /var/tmp ceph_osd_docker_run_script_path: /var/tmp

View File

@ -15,9 +15,7 @@ public_network: "192.168.15.0/24"
cluster_network: "192.168.16.0/24" cluster_network: "192.168.16.0/24"
osd_scenario: collocated osd_scenario: collocated
ceph_rgw_civetweb_port: 8080 ceph_rgw_civetweb_port: 8080
ceph_osd_docker_devices: "{{ devices }}" ceph_osd_docker_prepare_env: -e OSD_FORCE_ZAP=1
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_FORCE_ZAP=1 -e OSD_BLUESTORE=1
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
devices: devices:
- /dev/sda - /dev/sda
- /dev/sdb - /dev/sdb

View File

@ -6,7 +6,7 @@ public_network: "192.168.1.0/24"
cluster_network: "192.168.2.0/24" cluster_network: "192.168.2.0/24"
journal_size: 100 journal_size: 100
radosgw_interface: eth1 radosgw_interface: eth1
osd_objectstore: "filestore" osd_objectstore: filestore
devices: devices:
- '/dev/sda' - '/dev/sda'
dedicated_devices: dedicated_devices:

View File

@ -7,7 +7,8 @@ journal_size: 100
monitor_interface: eth1 monitor_interface: eth1
radosgw_interface: eth1 radosgw_interface: eth1
osd_scenario: non-collocated osd_scenario: non-collocated
osd_objectstore: "filestore" dmcrypt: true
osd_objectstore: filestore
devices: devices:
- '/dev/sda' - '/dev/sda'
dedicated_devices: dedicated_devices:

View File

@ -7,7 +7,8 @@ journal_size: 100
monitor_interface: eth1 monitor_interface: eth1
radosgw_interface: eth1 radosgw_interface: eth1
osd_scenario: collocated osd_scenario: collocated
osd_objectstore: "filestore" osd_objectstore: filestore
dmcrypt: true
devices: devices:
- '/dev/sda' - '/dev/sda'
- '/dev/sdb' - '/dev/sdb'

View File

@ -14,11 +14,10 @@ ceph_docker_on_openstack: False
public_network: "192.168.15.0/24" public_network: "192.168.15.0/24"
cluster_network: "192.168.16.0/24" cluster_network: "192.168.16.0/24"
osd_scenario: collocated osd_scenario: collocated
osd_objectstore: filestore
dmcrypt: true dmcrypt: true
ceph_rgw_civetweb_port: 8080 ceph_rgw_civetweb_port: 8080
ceph_osd_docker_devices: "{{ devices }}"
devices: devices:
- /dev/sda - /dev/sda
- /dev/sdb - /dev/sdb
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1

View File

@ -14,11 +14,11 @@ ceph_docker_on_openstack: False
public_network: "192.168.15.0/24" public_network: "192.168.15.0/24"
cluster_network: "192.168.16.0/24" cluster_network: "192.168.16.0/24"
ceph_rgw_civetweb_port: 8080 ceph_rgw_civetweb_port: 8080
ceph_osd_docker_devices: "{{ devices }}" osd_objectstore: filestore
osd_scenario: non-collocated osd_scenario: non-collocated
devices: devices:
- /dev/sda - /dev/sda
dedicated_devices: dedicated_devices:
- /dev/sdb - /dev/sdb
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
ceph_osd_docker_run_script_path: /var/tmp ceph_osd_docker_run_script_path: /var/tmp

View File

@ -15,9 +15,8 @@ public_network: "192.168.15.0/24"
cluster_network: "192.168.16.0/24" cluster_network: "192.168.16.0/24"
osd_scenario: collocated osd_scenario: collocated
ceph_rgw_civetweb_port: 8080 ceph_rgw_civetweb_port: 8080
ceph_osd_docker_devices: "{{ devices }}" osd_objectstore: filestore
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE
devices: devices:
- /dev/sda - /dev/sda
- /dev/sdb - /dev/sdb

View File

@ -7,7 +7,7 @@ cluster_network: "192.168.4.0/24"
monitor_interface: eth1 monitor_interface: eth1
radosgw_interface: eth1 radosgw_interface: eth1
journal_size: 100 journal_size: 100
osd_objectstore: "filestore" osd_objectstore: filestore
devices: devices:
- '/dev/sda' - '/dev/sda'
- '/dev/sdb' - '/dev/sdb'