osds: move openstack pools creation in ceph-osd

When deploying a large number of OSD nodes it can be an issue because the
protection check [1] won't pass since it tries to create pools before all
OSDs are active.

The idea here is to move openstack pools creation at the end of `ceph-osd` role.

[1] e59258943b/src/mon/OSDMonitor.cc (L5673)

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1578086

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/2645/head
Guillaume Abrioux 2018-05-22 16:41:40 +02:00 committed by Sébastien Han
parent f8260119cd
commit 564a662baf
12 changed files with 257 additions and 189 deletions

View File

@ -532,3 +532,69 @@ dummy:
#docker_pull_retry: 3
#docker_pull_timeout: "300s"
#############
# OPENSTACK #
#############
#openstack_config: false
#openstack_glance_pool:
# name: "images"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
#openstack_cinder_pool:
# name: "volumes"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
#openstack_nova_pool:
# name: "vms"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
#openstack_cinder_backup_pool:
# name: "backups"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
#openstack_gnocchi_pool:
# name: "metrics"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
#openstack_pools:
# - "{{ openstack_glance_pool }}"
# - "{{ openstack_cinder_pool }}"
# - "{{ openstack_nova_pool }}"
# - "{{ openstack_cinder_backup_pool }}"
# - "{{ openstack_gnocchi_pool }}"
# The value for 'key' can be a pre-generated key,
# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
# By default, keys will be auto-generated.
#
#openstack_keys:
# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }

View File

@ -69,72 +69,6 @@ dummy:
# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host)
#create_crush_tree: false
#############
# OPENSTACK #
#############
#openstack_config: false
#openstack_glance_pool:
# name: "images"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
#openstack_cinder_pool:
# name: "volumes"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
#openstack_nova_pool:
# name: "vms"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
#openstack_cinder_backup_pool:
# name: "backups"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
#openstack_gnocchi_pool:
# name: "metrics"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
#openstack_pools:
# - "{{ openstack_glance_pool }}"
# - "{{ openstack_cinder_pool }}"
# - "{{ openstack_nova_pool }}"
# - "{{ openstack_cinder_backup_pool }}"
# - "{{ openstack_gnocchi_pool }}"
# The value for 'key' can be a pre-generated key,
# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
# By default, keys will be auto-generated.
#
#openstack_keys:
# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
##########
# DOCKER #
##########

View File

@ -29,7 +29,7 @@ dummy:
# mimic: 13
# Directory to fetch cluster fsid, keys etc...
fetch_directory: ~/ceph-ansible-keys
#fetch_directory: fetch/
# The 'cluster' variable determines the name of the cluster.
# Changing the default value to something else means that you will
@ -135,14 +135,14 @@ fetch_directory: ~/ceph-ansible-keys
# - 'distro' means that no separate repo file will be added
# you will get whatever version of Ceph is included in your Linux distro.
# 'local' means that the ceph binaries will be copied over from the local machine
ceph_origin: repository
#ceph_origin: "{{ 'repository' if ceph_rhcs or ceph_stable or ceph_dev or ceph_stable_uca or ceph_custom else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
#valid_ceph_origins:
# - repository
# - distro
# - local
ceph_repository: rhcs
#ceph_repository: "{{ 'community' if ceph_stable else 'rhcs' if ceph_rhcs else 'dev' if ceph_dev else 'uca' if ceph_stable_uca else 'custom' if ceph_custom else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
#valid_ceph_repository:
# - community
# - rhcs
@ -532,3 +532,69 @@ ceph_repository: rhcs
#docker_pull_retry: 3
#docker_pull_timeout: "300s"
#############
# OPENSTACK #
#############
#openstack_config: false
#openstack_glance_pool:
# name: "images"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
#openstack_cinder_pool:
# name: "volumes"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
#openstack_nova_pool:
# name: "vms"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
#openstack_cinder_backup_pool:
# name: "backups"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
#openstack_gnocchi_pool:
# name: "metrics"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
#openstack_pools:
# - "{{ openstack_glance_pool }}"
# - "{{ openstack_cinder_pool }}"
# - "{{ openstack_nova_pool }}"
# - "{{ openstack_cinder_backup_pool }}"
# - "{{ openstack_gnocchi_pool }}"
# The value for 'key' can be a pre-generated key,
# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
# By default, keys will be auto-generated.
#
#openstack_keys:
# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }

View File

@ -523,3 +523,68 @@ rolling_update: false
#####################
docker_pull_retry: 3
docker_pull_timeout: "300s"
#############
# OPENSTACK #
#############
openstack_config: false
openstack_glance_pool:
name: "images"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_nova_pool:
name: "vms"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_cinder_backup_pool:
name: "backups"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_gnocchi_pool:
name: "metrics"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
- "{{ openstack_nova_pool }}"
- "{{ openstack_cinder_backup_pool }}"
- "{{ openstack_gnocchi_pool }}"
# The value for 'key' can be a pre-generated key,
# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
# By default, keys will be auto-generated.
#
openstack_keys:
- { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
- { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
- { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
- { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
- { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }

View File

@ -61,72 +61,6 @@ crush_rules:
# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host)
create_crush_tree: false
#############
# OPENSTACK #
#############
openstack_config: false
openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_nova_pool:
name: "vms"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_cinder_backup_pool:
name: "backups"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_gnocchi_pool:
name: "metrics"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
- "{{ openstack_nova_pool }}"
- "{{ openstack_cinder_backup_pool }}"
- "{{ openstack_gnocchi_pool }}"
# The value for 'key' can be a pre-generated key,
# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
# By default, keys will be auto-generated.
#
openstack_keys:
- { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
- { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
- { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
- { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
- { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
##########
# DOCKER #
##########

View File

@ -50,13 +50,6 @@
when:
- openstack_keys_tmp is defined
# Create the pools listed in openstack_pools
- name: include openstack_config.yml
include: openstack_config.yml
when:
- openstack_config
- inventory_hostname == groups[mon_group_name] | last
- name: include create_mds_filesystems.yml
include: create_mds_filesystems.yml
when:

View File

@ -86,3 +86,10 @@
- containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
# Create the pools listed in openstack_pools
- name: include openstack_config.yml
include: openstack_config.yml
when:
- openstack_config
- inventory_hostname == groups[osd_group_name] | last

View File

@ -1,15 +1,16 @@
---
- name: list existing pool(s)
command: >
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool get {{ item.name }} size
with_items: "{{ openstack_pools | unique }}"
register: created_pools
delegate_to: "{{ groups[mon_group_name][0] }}"
failed_when: false
- name: create openstack pool(s)
command: >
{{ docker_exec_cmd }} ceph --cluster {{ cluster }}
{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }}
{{ item.0.pg_num }}
{{ item.0.pgp_num | default(item.0.pg_num) }}
@ -23,13 +24,15 @@
- "{{ openstack_pools | unique }}"
- "{{ created_pools.results }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- item.1.get('rc', 0) != 0
- name: assign application to pool(s)
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
with_items: "{{ openstack_pools | unique }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- ceph_release_num[ceph_release] >= ceph_release_num['luminous']
- item.application is defined
@ -40,10 +43,11 @@
name: "{{ item.name }}"
caps: "{{ item.caps }}"
secret: "{{ item.key | default('') }}"
containerized: "{{ docker_exec_cmd | default(False) }}"
containerized: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }}"
cluster: "{{ cluster }}"
mode: "{{ item.mode|default(omit) }}"
with_items: "{{ openstack_keys }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: cephx
- name: fetch openstack cephx key(s)
@ -51,6 +55,7 @@
src: "/etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
dest: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
flat: yes
delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ openstack_keys }}"
- name: copy to other mons the openstack cephx key(s)

View File

@ -20,3 +20,23 @@ devices:
dedicated_devices:
- '/dev/sdc'
- '/dev/sdc'
openstack_config: True
openstack_glance_pool:
name: "images"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"

View File

@ -11,25 +11,4 @@ crush_rule_hdd:
type: host
default: true
crush_rules:
- "{{ crush_rule_hdd }}"
openstack_config: True
openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
- "{{ crush_rule_hdd }}"

View File

@ -21,3 +21,23 @@ ceph_conf_overrides:
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- /dev/sdb
openstack_config: True
openstack_glance_pool:
name: "images"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"

View File

@ -12,24 +12,3 @@ crush_rule_hdd:
default: true
crush_rules:
- "{{ crush_rule_hdd }}"
openstack_config: True
openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"