Refact code for set_osd_pool_default_*

This commit refacts the code regarding all `set_osd_pool_default_*`
related tasks by avoiding usage of useless `set_fact` to determine
whether a key is present in `ceph_conf_overrides`.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/1976/head
Guillaume Abrioux 2017-10-03 13:55:27 +02:00
parent 6aca67bc9c
commit 2c4258a0fd
25 changed files with 36 additions and 79 deletions

View File

@ -6,5 +6,4 @@
include: create_users_keys.yml
when:
- user_config
- global_in_ceph_conf_overrides
- ceph_conf_overrides.global.osd_pool_default_pg_num is defined
- ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False

View File

@ -21,7 +21,3 @@
when:
- cephx
- copy_admin_key
- name: set_fact global_in_ceph_conf_overrides
set_fact:
global_in_ceph_conf_overrides: "{{ 'global' in ceph_conf_overrides }}"

View File

@ -78,28 +78,11 @@
- ceph_release_num.{{ ceph_release }} > ceph_release_num.jewel
with_items: "{{ groups.get(mgr_group_name, []) }}"
- name: include set_osd_pool_default_pg_num.yml
include: set_osd_pool_default_pg_num.yml
- name: crush_rules.yml
include: crush_rules.yml
when:
- crush_rule_config
# Create the pools listed in openstack_pools
- name: include openstack_config.yml
include: openstack_config.yml
when:
- openstack_config
- inventory_hostname == groups[mon_group_name] | last
# CEPH creates the rbd pool during the ceph cluster initialization in
# releases prior to luminous. If the rbd_pool.yml playbook is called too
# early, the rbd pool does not exist yet.
- name: include rbd_pool.yml
include: rbd_pool.yml
when: ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- name: find ceph keys
shell: ls -1 /etc/ceph/*.keyring
changed_when: false

View File

@ -89,16 +89,6 @@
- "{{ inventory_hostname == groups[mon_group_name] | last }}"
- not containerized_deployment_with_kv
- name: include ceph-mon/tasks/set_osd_pool_default_pg_num.yml
include: "{{ lookup('env', 'ANSIBLE_ROLES_PATH') | default (playbook_dir + '/roles', true) }}/ceph-mon/tasks/set_osd_pool_default_pg_num.yml"
# create openstack pools only when all mons are up.
- name: include ceph-mon/tasks/set_osd_pool_default_pg_num.yml
include: "{{ lookup('env', 'ANSIBLE_ROLES_PATH') | default (playbook_dir + '/roles', true) }}/ceph-mon/tasks/openstack_config.yml"
when:
- openstack_config
- "{{ inventory_hostname == groups[mon_group_name] | last }}"
- block:
- name: create ceph mgr keyring(s) when mon is containerized
command: docker exec ceph-mon-{{ ansible_hostname }} ceph --cluster {{ cluster }} auth get-or-create mgr.{{ hostvars[item]['ansible_hostname'] }} mon 'allow profile mgr' osd 'allow *' mds 'allow *' -o /etc/ceph/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring

View File

@ -32,6 +32,23 @@
include: docker/main.yml
when: containerized_deployment
- name: include set_osd_pool_default_pg_num.yml
include: set_osd_pool_default_pg_num.yml
# Create the pools listed in openstack_pools
- name: include openstack_config.yml
include: openstack_config.yml
when:
- openstack_config
- inventory_hostname == groups[mon_group_name] | last
# CEPH creates the rbd pool during the ceph cluster initialization in
# releases prior to luminous. If the rbd_pool.yml playbook is called too
# early, the rbd pool does not exist yet.
- name: include rbd_pool.yml
include: rbd_pool.yml
when: ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- name: include create_mds_filesystems.yml
include: create_mds_filesystems.yml
when:

View File

@ -1,7 +1,7 @@
---
- name: test if rbd exists
shell: |
ceph --cluster {{ cluster }} osd pool ls | grep -sq rbd
"{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd pool ls | grep -sq rbd"
changed_when: false
failed_when: false
run_once: true
@ -16,14 +16,10 @@
include: rbd_pool_pgs.yml
when:
- rbd_pool_exist.rc == 0
- global_in_ceph_conf_overrides
- ceph_conf_overrides.global.osd_pool_default_pg_num is defined
- ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False
- name: include rbd_pool_size.yml
include: rbd_pool_size.yml
when:
- rbd_pool_exist.rc == 0
- global_in_ceph_conf_overrides
- ceph_conf_overrides.global.osd_pool_default_size is defined
# In luminous release, ceph does not create the rbd pool by default.
- ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False

View File

@ -6,7 +6,7 @@
- name: check rbd pool usage
shell: |
ceph --connect-timeout 5 --cluster {{ cluster }} df | awk '/rbd/ {print $3}'
"{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} df | awk '/rbd/ {print $3}'"
changed_when: false
failed_when: false
check_mode: true

View File

@ -6,7 +6,7 @@
- name: check pg num for rbd pool
shell: |
ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd pg_num | awk '{print $2}'
"{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd pg_num | awk '{print $2}'"
changed_when: false
failed_when: false
check_mode: true
@ -15,8 +15,8 @@
- name: destroy and recreate rbd pool if osd_pool_default_pg_num is not honoured
shell: |
ceph --connect-timeout 5 --cluster {{ cluster }} osd pool delete rbd rbd --yes-i-really-really-mean-it
ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create rbd {{ ceph_conf_overrides.global.osd_pool_default_pg_num }}
"{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool delete rbd rbd --yes-i-really-really-mean-it"
"{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create rbd {{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
changed_when: false
failed_when: false
run_once: true

View File

@ -6,7 +6,7 @@
- name: check size for rbd pool
shell: |
ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd size | awk '{print $2}'
"{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd size | awk '{print $2}'"
changed_when: false
failed_when: false
check_mode: true
@ -14,7 +14,7 @@
register: rbd_pool_size
- name: change rbd pool size if osd_pool_default_size is not honoured
command: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set rbd size {{ ceph_conf_overrides.global.osd_pool_default_size }}
command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set rbd size {{ ceph_conf_overrides.global.osd_pool_default_size }}"
changed_when: false
failed_when: false
run_once: true

View File

@ -2,40 +2,30 @@
# so people that had 'pool_default_pg_num' declared will get
# the same behaviour
#
- name: check if does global key exist in ceph_conf_overrides
set_fact:
global_in_ceph_conf_overrides: "{{ 'global' in ceph_conf_overrides }}"
- name: check if ceph_conf_overrides.global.osd_pool_default_pg_num is set
set_fact:
osd_pool_default_pg_num_in_overrides: "{{ 'osd_pool_default_pg_num' in ceph_conf_overrides.global }}"
when: global_in_ceph_conf_overrides
- name: get default value for osd_pool_default_pg_num
shell: |
{{ docker_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ monitor_name }} config get osd_pool_default_pg_num | grep -Po '(?<="osd_pool_default_pg_num": ")[^"]*'
{{ docker_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ monitor_name }} config get osd_pool_default_pg_num
failed_when: false
changed_when: false
run_once: true
register: default_pool_default_pg_num
when: pool_default_pg_num is not defined or not global_in_ceph_conf_overrides
when:
- pool_default_pg_num is not defined
- ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) == False
- name: set_fact osd_pool_default_pg_num pool_default_pg_num
- name: set_fact osd_pool_default_pg_num with pool_default_pg_num (backward compatibility)
set_fact:
osd_pool_default_pg_num: "{{ pool_default_pg_num }}"
when: pool_default_pg_num is defined
- name: set_fact osd_pool_default_pg_num default_pool_default_pg_num.stdout
- name: set_fact osd_pool_default_pg_num with default_pool_default_pg_num.stdout
set_fact:
osd_pool_default_pg_num: "{{ default_pool_default_pg_num.stdout }}"
osd_pool_default_pg_num: "{{ (default_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
when:
- pool_default_pg_num is not defined
- default_pool_default_pg_num.rc == 0
- (osd_pool_default_pg_num_in_overrides is not defined or not osd_pool_default_pg_num_in_overrides)
- default_pool_default_pg_num.get('rc') == 0
- name: set_fact osd_pool_default_pg_num ceph_conf_overrides.global.osd_pool_default_pg_num
set_fact:
osd_pool_default_pg_num: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
when:
- global_in_ceph_conf_overrides
- ceph_conf_overrides.global.osd_pool_default_pg_num is defined
- ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False

View File

@ -23,7 +23,6 @@ user_config: True
openstack_config: True
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864

View File

@ -21,7 +21,6 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864

View File

@ -18,7 +18,6 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864

View File

@ -18,7 +18,6 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864

View File

@ -18,7 +18,6 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864

View File

@ -22,7 +22,6 @@ user_config: True
openstack_config: True
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
nfs_ganesha_stable: true
nfs_ganesha_dev: false

View File

@ -21,5 +21,4 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1

View File

@ -18,5 +18,4 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1

View File

@ -24,7 +24,6 @@ rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
user_config: True
keys:

View File

@ -17,5 +17,4 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1

View File

@ -16,5 +16,4 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1

View File

@ -18,5 +18,4 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1

View File

@ -25,5 +25,4 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1

View File

@ -20,7 +20,6 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
debian_ceph_packages:
- ceph