mon: move `osd_pool_default_pg_num` in `ceph-defaults`

`osd_pool_default_pg_num` parameter is set in `ceph-mon`.
When using ceph-ansible with `--limit` on a specifc group of nodes, it
will fail when trying to access this variables since it wouldn't be
defined.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1518696

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit d4c0960f04)
pull/3384/head
Guillaume Abrioux 2018-11-13 15:40:35 +01:00 committed by mergify[bot]
parent e8dd6b8993
commit 68b2ad11ee
18 changed files with 89 additions and 125 deletions

View File

@ -354,8 +354,8 @@ dummy:
#cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
#cephfs_pools:
# - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
# - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
# - { name: "{{ cephfs_data }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
# - { name: "{{ cephfs_metadata }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
## OSD options
#
@ -578,8 +578,8 @@ dummy:
#openstack_config: false
#openstack_glance_pool:
# name: "images"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
@ -588,8 +588,8 @@ dummy:
# size: ""
#openstack_cinder_pool:
# name: "volumes"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
@ -598,8 +598,8 @@ dummy:
# size: ""
#openstack_nova_pool:
# name: "vms"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
@ -608,8 +608,8 @@ dummy:
# size: ""
#openstack_cinder_backup_pool:
# name: "backups"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
@ -618,8 +618,8 @@ dummy:
# size: ""
#openstack_gnocchi_pool:
# name: "metrics"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
@ -628,8 +628,8 @@ dummy:
# size: ""
#openstack_cephfs_data_pool:
# name: "manila_data"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
@ -638,8 +638,8 @@ dummy:
# size: ""
#openstack_cephfs_metadata_pool:
# name: "manila_metadata"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""

View File

@ -20,8 +20,8 @@ dummy:
#user_config: false
#test:
# name: "test"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
@ -29,8 +29,8 @@ dummy:
# size: ""
#test2:
# name: "test2"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""

View File

@ -354,8 +354,8 @@ ceph_rhcs_version: 3
#cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
#cephfs_pools:
# - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
# - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
# - { name: "{{ cephfs_data }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
# - { name: "{{ cephfs_metadata }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
## OSD options
#
@ -578,8 +578,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
#openstack_config: false
#openstack_glance_pool:
# name: "images"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
@ -588,8 +588,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: ""
#openstack_cinder_pool:
# name: "volumes"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
@ -598,8 +598,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: ""
#openstack_nova_pool:
# name: "vms"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
@ -608,8 +608,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: ""
#openstack_cinder_backup_pool:
# name: "backups"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
@ -618,8 +618,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: ""
#openstack_gnocchi_pool:
# name: "metrics"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
@ -628,8 +628,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: ""
#openstack_cephfs_data_pool:
# name: "manila_data"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
@ -638,8 +638,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: ""
#openstack_cephfs_metadata_pool:
# name: "manila_metadata"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""

View File

@ -12,8 +12,8 @@ copy_admin_key: false
user_config: false
test:
name: "test"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
@ -21,8 +21,8 @@ test:
size: ""
test2:
name: "test2"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""

View File

@ -102,9 +102,9 @@
command: >
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }}
{{ item.0.pg_num }}
{{ item.0.pgp_num }}
{{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
{{ item.0.pg_num | default(osd_pool_default_pg_num) }}
{{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }}
{{ 'replicated_rule' if not item.0.rule_name | default('replicated_rule') else item.0.rule_name | default('replicated_rule') }}
{{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
{%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
{{ item.0.erasure_profile }}

View File

@ -346,8 +346,8 @@ cephfs_data: cephfs_data # name of the data pool for a given filesystem
cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
cephfs_pools:
- { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
- { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
- { name: "{{ cephfs_data }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
- { name: "{{ cephfs_metadata }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
## OSD options
#
@ -570,8 +570,8 @@ docker_pull_timeout: "300s"
openstack_config: false
openstack_glance_pool:
name: "images"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
@ -580,8 +580,8 @@ openstack_glance_pool:
size: ""
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
@ -590,8 +590,8 @@ openstack_cinder_pool:
size: ""
openstack_nova_pool:
name: "vms"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
@ -600,8 +600,8 @@ openstack_nova_pool:
size: ""
openstack_cinder_backup_pool:
name: "backups"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
@ -610,8 +610,8 @@ openstack_cinder_backup_pool:
size: ""
openstack_gnocchi_pool:
name: "metrics"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
@ -620,8 +620,8 @@ openstack_gnocchi_pool:
size: ""
openstack_cephfs_data_pool:
name: "manila_data"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
@ -630,8 +630,8 @@ openstack_cephfs_data_pool:
size: ""
openstack_cephfs_metadata_pool:
name: "manila_metadata"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""

View File

@ -240,5 +240,9 @@
- ceph_current_status['servicemap']['services'] is defined
- ceph_current_status['servicemap']['services']['rgw'] is defined
- name: set_fact osd_pool_default_pg_num
set_fact:
osd_pool_default_pg_num: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', ceph_osd_pool_default_pg_num) }}"
- name: populate service facts
service_facts:

View File

@ -35,14 +35,8 @@
- name: rbd pool related tasks
when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
block:
- name: get default value for osd_pool_default_pg_num
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} config get osd_pool_default_pg_num"
changed_when: false
register: osd_pool_default_pg_num
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: create a rbd pool if it doesn't exist
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ (osd_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ osd_pool_default_pg_num }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"

View File

@ -2,7 +2,7 @@
- name: filesystem pools related tasks
block:
- name: create filesystem pools
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs }}"
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs | default(osd_pool_default_pg_num) }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
with_items:

View File

@ -33,6 +33,3 @@
include_tasks: crush_rules.yml
when:
- crush_rule_config
- name: include set_osd_pool_default_pg_num.yml
include_tasks: set_osd_pool_default_pg_num.yml

View File

@ -1,31 +0,0 @@
# NOTE(leseb): we add a conditional for backward compatibility
# so people that had 'pool_default_pg_num' declared will get
# the same behaviour
#
- name: get default value for osd_pool_default_pg_num
shell: |
{{ docker_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ monitor_name }} config get osd_pool_default_pg_num
failed_when: false
changed_when: false
run_once: true
register: default_pool_default_pg_num
when:
- pool_default_pg_num is not defined
- ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) == False
- name: set_fact osd_pool_default_pg_num with pool_default_pg_num (backward compatibility)
set_fact:
osd_pool_default_pg_num: "{{ pool_default_pg_num }}"
when: pool_default_pg_num is defined
- name: set_fact osd_pool_default_pg_num with default_pool_default_pg_num.stdout
set_fact:
osd_pool_default_pg_num: "{{ (default_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
when:
- default_pool_default_pg_num.get('rc') == 0
- name: set_fact osd_pool_default_pg_num ceph_conf_overrides.global.osd_pool_default_pg_num
set_fact:
osd_pool_default_pg_num: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
when:
- ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False

View File

@ -24,9 +24,9 @@
command: >
{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }}
{{ item.0.pg_num }}
{{ item.0.pgp_num | default(item.0.pg_num) }}
{{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
{{ item.0.pg_num | default(osd_pool_default_pg_num) }}
{{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }}
{{ 'replicated_rule' if not item.0.rule_name | default('replicated_rule') else item.0.rule_name | default('replicated_rule') }}
{{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
{%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
{{ item.0.erasure_profile }}

View File

@ -27,7 +27,7 @@
- rgw_create_pools is defined
block:
- name: create rgw pools if rgw_create_pools is defined
command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
changed_when: false
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"

View File

@ -14,8 +14,8 @@ nfs_ganesha_flavor: "ceph_master"
openstack_config: True
openstack_glance_pool:
name: "images"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
@ -23,8 +23,8 @@ openstack_glance_pool:
size: 1
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""

View File

@ -3,8 +3,8 @@ copy_admin_key: True
user_config: True
test:
name: "test"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
@ -12,8 +12,8 @@ test:
size: "{{ osd_pool_default_size }}"
test2:
name: "test2"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""

View File

@ -2,16 +2,16 @@
user_config: True
test:
name: "test"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
test2:
name: "test2"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""

View File

@ -18,8 +18,8 @@ ceph_conf_overrides:
openstack_config: True
openstack_glance_pool:
name: "images"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
@ -27,8 +27,8 @@ openstack_glance_pool:
size: 1
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""

View File

@ -3,16 +3,16 @@ user_config: True
copy_admin_key: True
test:
name: "test"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
test2:
name: "test2"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""