mirror of https://github.com/ceph/ceph-ansible.git
mon: pool creation and pgs
Since we introduced config_overrides we removed a lot of options from the default template. In some cases, like mds pool, openstack pools etc we need to know the amount of PGs required. The idea here is to skip the task if ceph_conf_overrides.global.osd_pool_default_pg_num is not define in your `group_vars/all.yml`. Closes: #1145 Signed-off-by: Sébastien Han <seb@redhat.com> Co-Authored-By: Guillaume Abrioux <gabrioux@redhat.com>pull/1146/head
parent
396cefbb75
commit
ddac3a1fb5
|
@ -15,8 +15,8 @@ dummy:
|
|||
|
||||
#user_config: false
|
||||
#pools:
|
||||
# - { name: test, pgs: "{{ pool_default_pg_num }}" }
|
||||
# - { name: test2, pgs: "{{ pool_default_pg_num }}" }
|
||||
# - { name: test, pgs: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}" }
|
||||
# - { name: test2, pgs: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}" }
|
||||
|
||||
#keys:
|
||||
# - { name: client.test, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=test'" }
|
||||
|
|
|
@ -23,7 +23,6 @@ dummy:
|
|||
#cephx: true
|
||||
|
||||
# CephFS
|
||||
#pool_default_pg_num: 128
|
||||
#cephfs_data: cephfs_data
|
||||
#cephfs_metadata: cephfs_metadata
|
||||
#cephfs: cephfs
|
||||
|
@ -50,16 +49,16 @@ dummy:
|
|||
#openstack_config: false
|
||||
#openstack_glance_pool:
|
||||
# name: images
|
||||
# pg_num: "{{ pool_default_pg_num }}"
|
||||
# pg_num: "{{ osd_pool_default_pg_num }}"
|
||||
#openstack_cinder_pool:
|
||||
# name: volumes
|
||||
# pg_num: "{{ pool_default_pg_num }}"
|
||||
# pg_num: "{{ osd_pool_default_pg_num }}"
|
||||
#openstack_nova_pool:
|
||||
# name: vms
|
||||
# pg_num: "{{ pool_default_pg_num }}"
|
||||
# pg_num: "{{ osd_pool_default_pg_num }}"
|
||||
#openstack_cinder_backup_pool:
|
||||
# name: backups
|
||||
# pg_num: "{{ pool_default_pg_num }}"
|
||||
# pg_num: "{{ osd_pool_default_pg_num }}"
|
||||
|
||||
#openstack_pools:
|
||||
# - "{{ openstack_glance_pool }}"
|
||||
|
|
|
@ -7,8 +7,8 @@ fetch_directory: fetch/
|
|||
|
||||
user_config: false
|
||||
pools:
|
||||
- { name: test, pgs: "{{ pool_default_pg_num }}" }
|
||||
- { name: test2, pgs: "{{ pool_default_pg_num }}" }
|
||||
- { name: test, pgs: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}" }
|
||||
- { name: test2, pgs: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}" }
|
||||
|
||||
keys:
|
||||
- { name: client.test, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=test'" }
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
---
|
||||
- include: pre_requisite.yml
|
||||
- include: create_users_keys.yml
|
||||
when: user_config
|
||||
when:
|
||||
- user_config
|
||||
- ceph_conf_overrides.global.osd_pool_default_pg_num is defined
|
||||
|
|
|
@ -15,7 +15,6 @@ monitor_secret: "{{ monitor_keyring.stdout }}"
|
|||
cephx: true
|
||||
|
||||
# CephFS
|
||||
pool_default_pg_num: 128
|
||||
cephfs_data: cephfs_data
|
||||
cephfs_metadata: cephfs_metadata
|
||||
cephfs: cephfs
|
||||
|
@ -42,16 +41,16 @@ calamari: false
|
|||
openstack_config: false
|
||||
openstack_glance_pool:
|
||||
name: images
|
||||
pg_num: "{{ pool_default_pg_num }}"
|
||||
pg_num: "{{ osd_pool_default_pg_num }}"
|
||||
openstack_cinder_pool:
|
||||
name: volumes
|
||||
pg_num: "{{ pool_default_pg_num }}"
|
||||
pg_num: "{{ osd_pool_default_pg_num }}"
|
||||
openstack_nova_pool:
|
||||
name: vms
|
||||
pg_num: "{{ pool_default_pg_num }}"
|
||||
pg_num: "{{ osd_pool_default_pg_num }}"
|
||||
openstack_cinder_backup_pool:
|
||||
name: backups
|
||||
pg_num: "{{ pool_default_pg_num }}"
|
||||
pg_num: "{{ osd_pool_default_pg_num }}"
|
||||
|
||||
openstack_pools:
|
||||
- "{{ openstack_glance_pool }}"
|
||||
|
|
|
@ -43,6 +43,43 @@
|
|||
- cephx
|
||||
- groups[restapi_group_name] is defined
|
||||
|
||||
# NOTE(leseb): we add a conditional for backward compatibility
|
||||
# so people that had 'pool_default_pg_num' declared will get
|
||||
# the same behaviour
|
||||
#
|
||||
- name: check if does global key exist in ceph_conf_overrides
|
||||
set_fact:
|
||||
global_in_ceph_conf_overrides: "{{ 'global' in ceph_conf_overrides }}"
|
||||
|
||||
- name: check if ceph_conf_overrides.global.osd_pool_default_pg_num is set
|
||||
set_fact:
|
||||
osd_pool_default_pg_num_in_overrides: "{{ 'osd_pool_default_pg_num' in ceph_conf_overrides.global }}"
|
||||
when: global_in_ceph_conf_overrides
|
||||
|
||||
- name: get default value for osd_pool_default_pg_num
|
||||
shell: |
|
||||
ceph --cluster {{ cluster }} daemon mon.{{ monitor_name }} config get osd_pool_default_pg_num | grep -Po '(?<="osd_pool_default_pg_num": ")[^"]*'
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
run_once: true
|
||||
register: default_pool_default_pg_num
|
||||
when: (pool_default_pg_num is not defined or not global_in_ceph_conf_overrides)
|
||||
|
||||
- set_fact:
|
||||
osd_pool_default_pg_num: "{{ pool_default_pg_num }}"
|
||||
when: pool_default_pg_num is defined
|
||||
|
||||
- set_fact:
|
||||
osd_pool_default_pg_num: "{{ default_pool_default_pg_num.stdout }}"
|
||||
when:
|
||||
- pool_default_pg_num is not defined
|
||||
- default_pool_default_pg_num.rc == 0
|
||||
- ceph_conf_overrides.global.osd_pool_default_pg_num is not defined
|
||||
|
||||
- set_fact:
|
||||
osd_pool_default_pg_num: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
|
||||
when: ceph_conf_overrides.global.osd_pool_default_pg_num is defined
|
||||
|
||||
- include: rbd_pool.yml
|
||||
when: ceph_conf_overrides.global.osd_pool_default_pg_num is defined
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# the role 'ceph-common' doesn't get inherited so the condition can not be evaluate
|
||||
# since those check are performed by the ceph-common role
|
||||
- name: create filesystem pools
|
||||
command: ceph --cluster {{ cluster }} osd pool create {{ item }} {{ pool_default_pg_num }}
|
||||
command: ceph --cluster {{ cluster }} osd pool create {{ item }} {{ osd_pool_default_pg_num }}
|
||||
with_items:
|
||||
- cephfs_data
|
||||
- cephfs_metadata
|
||||
|
|
Loading…
Reference in New Issue