mon: move `osd_pool_default_pg_num` in `ceph-defaults`

`osd_pool_default_pg_num` parameter is set in `ceph-mon`.
When using ceph-ansible with `--limit` on a specifc group of nodes, it
will fail when trying to access this variables since it wouldn't be
defined.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1518696

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit d4c0960f04)
pull/3384/head
Guillaume Abrioux 2018-11-13 15:40:35 +01:00 committed by mergify[bot]
parent e8dd6b8993
commit 68b2ad11ee
18 changed files with 89 additions and 125 deletions

View File

@ -354,8 +354,8 @@ dummy:
#cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem #cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
#cephfs_pools: #cephfs_pools:
# - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } # - { name: "{{ cephfs_data }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
# - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } # - { name: "{{ cephfs_metadata }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
## OSD options ## OSD options
# #
@ -578,8 +578,8 @@ dummy:
#openstack_config: false #openstack_config: false
#openstack_glance_pool: #openstack_glance_pool:
# name: "images" # name: "images"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -588,8 +588,8 @@ dummy:
# size: "" # size: ""
#openstack_cinder_pool: #openstack_cinder_pool:
# name: "volumes" # name: "volumes"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -598,8 +598,8 @@ dummy:
# size: "" # size: ""
#openstack_nova_pool: #openstack_nova_pool:
# name: "vms" # name: "vms"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -608,8 +608,8 @@ dummy:
# size: "" # size: ""
#openstack_cinder_backup_pool: #openstack_cinder_backup_pool:
# name: "backups" # name: "backups"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -618,8 +618,8 @@ dummy:
# size: "" # size: ""
#openstack_gnocchi_pool: #openstack_gnocchi_pool:
# name: "metrics" # name: "metrics"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -628,8 +628,8 @@ dummy:
# size: "" # size: ""
#openstack_cephfs_data_pool: #openstack_cephfs_data_pool:
# name: "manila_data" # name: "manila_data"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -638,8 +638,8 @@ dummy:
# size: "" # size: ""
#openstack_cephfs_metadata_pool: #openstack_cephfs_metadata_pool:
# name: "manila_metadata" # name: "manila_metadata"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""

View File

@ -20,8 +20,8 @@ dummy:
#user_config: false #user_config: false
#test: #test:
# name: "test" # name: "test"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -29,8 +29,8 @@ dummy:
# size: "" # size: ""
#test2: #test2:
# name: "test2" # name: "test2"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""

View File

@ -354,8 +354,8 @@ ceph_rhcs_version: 3
#cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem #cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
#cephfs_pools: #cephfs_pools:
# - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } # - { name: "{{ cephfs_data }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
# - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } # - { name: "{{ cephfs_metadata }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
## OSD options ## OSD options
# #
@ -578,8 +578,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
#openstack_config: false #openstack_config: false
#openstack_glance_pool: #openstack_glance_pool:
# name: "images" # name: "images"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -588,8 +588,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: "" # size: ""
#openstack_cinder_pool: #openstack_cinder_pool:
# name: "volumes" # name: "volumes"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -598,8 +598,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: "" # size: ""
#openstack_nova_pool: #openstack_nova_pool:
# name: "vms" # name: "vms"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -608,8 +608,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: "" # size: ""
#openstack_cinder_backup_pool: #openstack_cinder_backup_pool:
# name: "backups" # name: "backups"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -618,8 +618,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: "" # size: ""
#openstack_gnocchi_pool: #openstack_gnocchi_pool:
# name: "metrics" # name: "metrics"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -628,8 +628,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: "" # size: ""
#openstack_cephfs_data_pool: #openstack_cephfs_data_pool:
# name: "manila_data" # name: "manila_data"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -638,8 +638,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: "" # size: ""
#openstack_cephfs_metadata_pool: #openstack_cephfs_metadata_pool:
# name: "manila_metadata" # name: "manila_metadata"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""

View File

@ -12,8 +12,8 @@ copy_admin_key: false
user_config: false user_config: false
test: test:
name: "test" name: "test"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -21,8 +21,8 @@ test:
size: "" size: ""
test2: test2:
name: "test2" name: "test2"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""

View File

@ -102,9 +102,9 @@
command: > command: >
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }} osd pool create {{ item.0.name }}
{{ item.0.pg_num }} {{ item.0.pg_num | default(osd_pool_default_pg_num) }}
{{ item.0.pgp_num }} {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }}
{{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }} {{ 'replicated_rule' if not item.0.rule_name | default('replicated_rule') else item.0.rule_name | default('replicated_rule') }}
{{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }} {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
{%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %} {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
{{ item.0.erasure_profile }} {{ item.0.erasure_profile }}

View File

@ -346,8 +346,8 @@ cephfs_data: cephfs_data # name of the data pool for a given filesystem
cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
cephfs_pools: cephfs_pools:
- { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } - { name: "{{ cephfs_data }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
- { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } - { name: "{{ cephfs_metadata }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
## OSD options ## OSD options
# #
@ -570,8 +570,8 @@ docker_pull_timeout: "300s"
openstack_config: false openstack_config: false
openstack_glance_pool: openstack_glance_pool:
name: "images" name: "images"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -580,8 +580,8 @@ openstack_glance_pool:
size: "" size: ""
openstack_cinder_pool: openstack_cinder_pool:
name: "volumes" name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -590,8 +590,8 @@ openstack_cinder_pool:
size: "" size: ""
openstack_nova_pool: openstack_nova_pool:
name: "vms" name: "vms"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -600,8 +600,8 @@ openstack_nova_pool:
size: "" size: ""
openstack_cinder_backup_pool: openstack_cinder_backup_pool:
name: "backups" name: "backups"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -610,8 +610,8 @@ openstack_cinder_backup_pool:
size: "" size: ""
openstack_gnocchi_pool: openstack_gnocchi_pool:
name: "metrics" name: "metrics"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -620,8 +620,8 @@ openstack_gnocchi_pool:
size: "" size: ""
openstack_cephfs_data_pool: openstack_cephfs_data_pool:
name: "manila_data" name: "manila_data"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -630,8 +630,8 @@ openstack_cephfs_data_pool:
size: "" size: ""
openstack_cephfs_metadata_pool: openstack_cephfs_metadata_pool:
name: "manila_metadata" name: "manila_metadata"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""

View File

@ -240,5 +240,9 @@
- ceph_current_status['servicemap']['services'] is defined - ceph_current_status['servicemap']['services'] is defined
- ceph_current_status['servicemap']['services']['rgw'] is defined - ceph_current_status['servicemap']['services']['rgw'] is defined
- name: set_fact osd_pool_default_pg_num
set_fact:
osd_pool_default_pg_num: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', ceph_osd_pool_default_pg_num) }}"
- name: populate service facts - name: populate service facts
service_facts: service_facts:

View File

@ -35,14 +35,8 @@
- name: rbd pool related tasks - name: rbd pool related tasks
when: "'rbd' not in (rbd_pool_exists.stdout | from_json)" when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
block: block:
- name: get default value for osd_pool_default_pg_num
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} config get osd_pool_default_pg_num"
changed_when: false
register: osd_pool_default_pg_num
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: create a rbd pool if it doesn't exist - name: create a rbd pool if it doesn't exist
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ (osd_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}" command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ osd_pool_default_pg_num }}"
changed_when: false changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"

View File

@ -2,7 +2,7 @@
- name: filesystem pools related tasks - name: filesystem pools related tasks
block: block:
- name: create filesystem pools - name: create filesystem pools
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs }}" command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs | default(osd_pool_default_pg_num) }}"
changed_when: false changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: with_items:

View File

@ -33,6 +33,3 @@
include_tasks: crush_rules.yml include_tasks: crush_rules.yml
when: when:
- crush_rule_config - crush_rule_config
- name: include set_osd_pool_default_pg_num.yml
include_tasks: set_osd_pool_default_pg_num.yml

View File

@ -1,31 +0,0 @@
# NOTE(leseb): we add a conditional for backward compatibility
# so people that had 'pool_default_pg_num' declared will get
# the same behaviour
#
- name: get default value for osd_pool_default_pg_num
shell: |
{{ docker_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ monitor_name }} config get osd_pool_default_pg_num
failed_when: false
changed_when: false
run_once: true
register: default_pool_default_pg_num
when:
- pool_default_pg_num is not defined
- ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) == False
- name: set_fact osd_pool_default_pg_num with pool_default_pg_num (backward compatibility)
set_fact:
osd_pool_default_pg_num: "{{ pool_default_pg_num }}"
when: pool_default_pg_num is defined
- name: set_fact osd_pool_default_pg_num with default_pool_default_pg_num.stdout
set_fact:
osd_pool_default_pg_num: "{{ (default_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
when:
- default_pool_default_pg_num.get('rc') == 0
- name: set_fact osd_pool_default_pg_num ceph_conf_overrides.global.osd_pool_default_pg_num
set_fact:
osd_pool_default_pg_num: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
when:
- ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False

View File

@ -24,9 +24,9 @@
command: > command: >
{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }} osd pool create {{ item.0.name }}
{{ item.0.pg_num }} {{ item.0.pg_num | default(osd_pool_default_pg_num) }}
{{ item.0.pgp_num | default(item.0.pg_num) }} {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }}
{{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }} {{ 'replicated_rule' if not item.0.rule_name | default('replicated_rule') else item.0.rule_name | default('replicated_rule') }}
{{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }} {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
{%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %} {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
{{ item.0.erasure_profile }} {{ item.0.erasure_profile }}

View File

@ -27,7 +27,7 @@
- rgw_create_pools is defined - rgw_create_pools is defined
block: block:
- name: create rgw pools if rgw_create_pools is defined - name: create rgw pools if rgw_create_pools is defined
command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}" command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
changed_when: false changed_when: false
with_dict: "{{ rgw_create_pools }}" with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"

View File

@ -14,8 +14,8 @@ nfs_ganesha_flavor: "ceph_master"
openstack_config: True openstack_config: True
openstack_glance_pool: openstack_glance_pool:
name: "images" name: "images"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -23,8 +23,8 @@ openstack_glance_pool:
size: 1 size: 1
openstack_cinder_pool: openstack_cinder_pool:
name: "volumes" name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""

View File

@ -3,8 +3,8 @@ copy_admin_key: True
user_config: True user_config: True
test: test:
name: "test" name: "test"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -12,8 +12,8 @@ test:
size: "{{ osd_pool_default_size }}" size: "{{ osd_pool_default_size }}"
test2: test2:
name: "test2" name: "test2"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""

View File

@ -2,16 +2,16 @@
user_config: True user_config: True
test: test:
name: "test" name: "test"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
expected_num_objects: "" expected_num_objects: ""
test2: test2:
name: "test2" name: "test2"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""

View File

@ -18,8 +18,8 @@ ceph_conf_overrides:
openstack_config: True openstack_config: True
openstack_glance_pool: openstack_glance_pool:
name: "images" name: "images"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -27,8 +27,8 @@ openstack_glance_pool:
size: 1 size: 1
openstack_cinder_pool: openstack_cinder_pool:
name: "volumes" name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""

View File

@ -3,16 +3,16 @@ user_config: True
copy_admin_key: True copy_admin_key: True
test: test:
name: "test" name: "test"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
expected_num_objects: "" expected_num_objects: ""
test2: test2:
name: "test2" name: "test2"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""