mon: move `osd_pool_default_pg_num` in `ceph-defaults`

`osd_pool_default_pg_num` parameter is set in `ceph-mon`.
When using ceph-ansible with `--limit` on a specifc group of nodes, it
will fail when trying to access this variables since it wouldn't be
defined.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1518696

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/3353/head
Guillaume Abrioux 2018-11-13 15:40:35 +01:00 committed by mergify[bot]
parent 68dde424f6
commit d4c0960f04
18 changed files with 87 additions and 122 deletions

View File

@ -352,8 +352,8 @@ dummy:
#cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem #cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
#cephfs_pools: #cephfs_pools:
# - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } # - { name: "{{ cephfs_data }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
# - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } # - { name: "{{ cephfs_metadata }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
## OSD options ## OSD options
# #
@ -571,8 +571,8 @@ dummy:
#openstack_config: false #openstack_config: false
#openstack_glance_pool: #openstack_glance_pool:
# name: "images" # name: "images"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -581,8 +581,8 @@ dummy:
# size: "" # size: ""
#openstack_cinder_pool: #openstack_cinder_pool:
# name: "volumes" # name: "volumes"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -591,8 +591,8 @@ dummy:
# size: "" # size: ""
#openstack_nova_pool: #openstack_nova_pool:
# name: "vms" # name: "vms"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -601,8 +601,8 @@ dummy:
# size: "" # size: ""
#openstack_cinder_backup_pool: #openstack_cinder_backup_pool:
# name: "backups" # name: "backups"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -611,8 +611,8 @@ dummy:
# size: "" # size: ""
#openstack_gnocchi_pool: #openstack_gnocchi_pool:
# name: "metrics" # name: "metrics"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -621,8 +621,8 @@ dummy:
# size: "" # size: ""
#openstack_cephfs_data_pool: #openstack_cephfs_data_pool:
# name: "manila_data" # name: "manila_data"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -631,8 +631,8 @@ dummy:
# size: "" # size: ""
#openstack_cephfs_metadata_pool: #openstack_cephfs_metadata_pool:
# name: "manila_metadata" # name: "manila_metadata"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""

View File

@ -20,8 +20,8 @@ dummy:
#user_config: false #user_config: false
#test: #test:
# name: "test" # name: "test"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -29,8 +29,8 @@ dummy:
# size: "" # size: ""
#test2: #test2:
# name: "test2" # name: "test2"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""

View File

@ -352,8 +352,8 @@ ceph_rhcs_version: 3
#cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem #cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
#cephfs_pools: #cephfs_pools:
# - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } # - { name: "{{ cephfs_data }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
# - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } # - { name: "{{ cephfs_metadata }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
## OSD options ## OSD options
# #
@ -571,8 +571,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
#openstack_config: false #openstack_config: false
#openstack_glance_pool: #openstack_glance_pool:
# name: "images" # name: "images"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -581,8 +581,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: "" # size: ""
#openstack_cinder_pool: #openstack_cinder_pool:
# name: "volumes" # name: "volumes"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -591,8 +591,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: "" # size: ""
#openstack_nova_pool: #openstack_nova_pool:
# name: "vms" # name: "vms"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -601,8 +601,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: "" # size: ""
#openstack_cinder_backup_pool: #openstack_cinder_backup_pool:
# name: "backups" # name: "backups"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -611,8 +611,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: "" # size: ""
#openstack_gnocchi_pool: #openstack_gnocchi_pool:
# name: "metrics" # name: "metrics"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -621,8 +621,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: "" # size: ""
#openstack_cephfs_data_pool: #openstack_cephfs_data_pool:
# name: "manila_data" # name: "manila_data"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""
@ -631,8 +631,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# size: "" # size: ""
#openstack_cephfs_metadata_pool: #openstack_cephfs_metadata_pool:
# name: "manila_metadata" # name: "manila_metadata"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" # pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule" # rule_name: "replicated_rule"
# type: 1 # type: 1
# erasure_profile: "" # erasure_profile: ""

View File

@ -12,8 +12,8 @@ copy_admin_key: false
user_config: false user_config: false
test: test:
name: "test" name: "test"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -21,8 +21,8 @@ test:
size: "" size: ""
test2: test2:
name: "test2" name: "test2"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""

View File

@ -102,8 +102,8 @@
command: > command: >
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }} osd pool create {{ item.0.name }}
{{ item.0.pg_num }} {{ item.0.pg_num | default(osd_pool_default_pg_num) }}
{{ item.0.pgp_num }} {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }}
{{ 'replicated_rule' if not item.0.rule_name | default('replicated_rule') else item.0.rule_name | default('replicated_rule') }} {{ 'replicated_rule' if not item.0.rule_name | default('replicated_rule') else item.0.rule_name | default('replicated_rule') }}
{{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }} {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
{%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile %} {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile %}

View File

@ -344,8 +344,8 @@ cephfs_data: cephfs_data # name of the data pool for a given filesystem
cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
cephfs_pools: cephfs_pools:
- { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } - { name: "{{ cephfs_data }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
- { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } - { name: "{{ cephfs_metadata }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
## OSD options ## OSD options
# #
@ -563,8 +563,8 @@ docker_pull_timeout: "300s"
openstack_config: false openstack_config: false
openstack_glance_pool: openstack_glance_pool:
name: "images" name: "images"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -573,8 +573,8 @@ openstack_glance_pool:
size: "" size: ""
openstack_cinder_pool: openstack_cinder_pool:
name: "volumes" name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -583,8 +583,8 @@ openstack_cinder_pool:
size: "" size: ""
openstack_nova_pool: openstack_nova_pool:
name: "vms" name: "vms"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -593,8 +593,8 @@ openstack_nova_pool:
size: "" size: ""
openstack_cinder_backup_pool: openstack_cinder_backup_pool:
name: "backups" name: "backups"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -603,8 +603,8 @@ openstack_cinder_backup_pool:
size: "" size: ""
openstack_gnocchi_pool: openstack_gnocchi_pool:
name: "metrics" name: "metrics"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -613,8 +613,8 @@ openstack_gnocchi_pool:
size: "" size: ""
openstack_cephfs_data_pool: openstack_cephfs_data_pool:
name: "manila_data" name: "manila_data"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -623,8 +623,8 @@ openstack_cephfs_data_pool:
size: "" size: ""
openstack_cephfs_metadata_pool: openstack_cephfs_metadata_pool:
name: "manila_metadata" name: "manila_metadata"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule" rule_name: "replicated_rule"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""

View File

@ -240,6 +240,10 @@
- ceph_current_status['servicemap']['services'] is defined - ceph_current_status['servicemap']['services'] is defined
- ceph_current_status['servicemap']['services']['rgw'] is defined - ceph_current_status['servicemap']['services']['rgw'] is defined
- name: set_fact osd_pool_default_pg_num
set_fact:
osd_pool_default_pg_num: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', ceph_osd_pool_default_pg_num) }}"
- name: import_tasks set_monitor_address.yml - name: import_tasks set_monitor_address.yml
import_tasks: set_monitor_address.yml import_tasks: set_monitor_address.yml

View File

@ -35,14 +35,8 @@
- name: rbd pool related tasks - name: rbd pool related tasks
when: "'rbd' not in (rbd_pool_exists.stdout | from_json)" when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
block: block:
- name: get default value for osd_pool_default_pg_num
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} config get osd_pool_default_pg_num"
changed_when: false
register: osd_pool_default_pg_num
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: create a rbd pool if it doesn't exist - name: create a rbd pool if it doesn't exist
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ (osd_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}" command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ osd_pool_default_pg_num }}"
changed_when: false changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"

View File

@ -2,7 +2,7 @@
- name: filesystem pools related tasks - name: filesystem pools related tasks
block: block:
- name: create filesystem pools - name: create filesystem pools
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs }}" command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs | default(osd_pool_default_pg_num) }}"
changed_when: false changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: with_items:

View File

@ -33,6 +33,3 @@
include_tasks: crush_rules.yml include_tasks: crush_rules.yml
when: when:
- crush_rule_config - crush_rule_config
- name: include set_osd_pool_default_pg_num.yml
include_tasks: set_osd_pool_default_pg_num.yml

View File

@ -1,30 +0,0 @@
# NOTE(leseb): we add a conditional for backward compatibility
# so people that had 'pool_default_pg_num' declared will get
# the same behaviour
#
- name: get default value for osd_pool_default_pg_num
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ monitor_name }} config get osd_pool_default_pg_num"
failed_when: false
changed_when: false
run_once: true
register: default_pool_default_pg_num
when:
- pool_default_pg_num is not defined
- ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) == False
- name: set_fact osd_pool_default_pg_num with pool_default_pg_num (backward compatibility)
set_fact:
osd_pool_default_pg_num: "{{ pool_default_pg_num }}"
when: pool_default_pg_num is defined
- name: set_fact osd_pool_default_pg_num with default_pool_default_pg_num.stdout
set_fact:
osd_pool_default_pg_num: "{{ (default_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
when:
- default_pool_default_pg_num.get('rc') == 0
- name: set_fact osd_pool_default_pg_num ceph_conf_overrides.global.osd_pool_default_pg_num
set_fact:
osd_pool_default_pg_num: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
when:
- ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False

View File

@ -25,8 +25,8 @@
command: > command: >
{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }} osd pool create {{ item.0.name }}
{{ item.0.pg_num }} {{ item.0.pg_num | default(osd_pool_default_pg_num) }}
{{ item.0.pgp_num | default(item.0.pg_num) }} {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }}
{{ 'replicated_rule' if not item.0.rule_name | default('replicated_rule') else item.0.rule_name | default('replicated_rule') }} {{ 'replicated_rule' if not item.0.rule_name | default('replicated_rule') else item.0.rule_name | default('replicated_rule') }}
{{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }} {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
{%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile %} {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile %}

View File

@ -27,7 +27,7 @@
- rgw_create_pools is defined - rgw_create_pools is defined
block: block:
- name: create rgw pools if rgw_create_pools is defined - name: create rgw pools if rgw_create_pools is defined
command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}" command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
changed_when: false changed_when: false
with_dict: "{{ rgw_create_pools }}" with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"

View File

@ -14,8 +14,8 @@ nfs_ganesha_flavor: "ceph_master"
openstack_config: True openstack_config: True
openstack_glance_pool: openstack_glance_pool:
name: "images" name: "images"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -23,8 +23,8 @@ openstack_glance_pool:
size: "" size: ""
openstack_cinder_pool: openstack_cinder_pool:
name: "volumes" name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""

View File

@ -3,8 +3,8 @@ copy_admin_key: True
user_config: True user_config: True
test: test:
name: "test" name: "test"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -12,8 +12,8 @@ test:
size: "" size: ""
test2: test2:
name: "test2" name: "test2"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""

View File

@ -2,16 +2,16 @@
user_config: True user_config: True
test: test:
name: "test" name: "test"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
expected_num_objects: "" expected_num_objects: ""
test2: test2:
name: "test2" name: "test2"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""

View File

@ -18,8 +18,8 @@ ceph_conf_overrides:
openstack_config: True openstack_config: True
openstack_glance_pool: openstack_glance_pool:
name: "images" name: "images"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
@ -27,8 +27,8 @@ openstack_glance_pool:
size: "" size: ""
openstack_cinder_pool: openstack_cinder_pool:
name: "volumes" name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""

View File

@ -3,16 +3,16 @@ user_config: True
copy_admin_key: True copy_admin_key: True
test: test:
name: "test" name: "test"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""
expected_num_objects: "" expected_num_objects: ""
test2: test2:
name: "test2" name: "test2"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" erasure_profile: ""