Set the default crush rule in ceph.conf

Currently the default crush rule value is added to the ceph config
on the mon nodes as an extra configuration applied after the template
generation via the ansible ini module.

This implies two behaviors:

1/ On each ceph-ansible run, the ceph.conf will be regenerated via
ceph-config+template and then ceph-mon+ini_file. This leads to a
non necessary daemons restart.

2/ When other ceph daemons are collocated on the monitor nodes
(like mgr or rgw), the default crush rule value will be erased by
the ceph.conf template (mon -> mgr -> rgw).

This patch adds the osd_pool_default_crush_rule config to the ceph
template and only for the monitor nodes (like crush_rules.yml).
The default crush rule id is read (if exist) from the current ceph
configuration.
The default configuration is -1 (ceph default).

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1638092

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
pull/3714/head
Dimitri Savineau 2019-03-07 17:14:12 -05:00 committed by mergify[bot]
parent b7f4e3e7c7
commit d8538ad4e1
4 changed files with 22 additions and 6 deletions

View File

@ -32,6 +32,8 @@ mon initial members = {% for host in groups[mon_group_name] %}
{%- endif %}
{%- if not loop.last %},{% endif %}
{% endfor %}
osd pool default crush rule = {{ osd_pool_default_crush_rule | default(ceph_osd_pool_default_crush_rule) }}
{% endif %}
fsid = {{ fsid }}

View File

@ -2,3 +2,4 @@
ceph_osd_pool_default_size: 3
ceph_osd_pool_default_min_size: 0
ceph_osd_pool_default_pg_num: 8
ceph_osd_pool_default_crush_rule: -1

View File

@ -249,6 +249,23 @@
set_fact:
osd_pool_default_min_size: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_min_size', ceph_osd_pool_default_min_size) }}"
- name: check if the ceph conf exists
stat:
path: '/etc/ceph/{{ cluster }}.conf'
register: ceph_conf
- name: get default crush rule value from ceph configuration
command: grep 'osd pool default crush rule' /etc/ceph/{{ cluster }}.conf
register: crush_rule_variable
changed_when: false
failed_when: false
when: ceph_conf.stat.exists
- name: set_fact osd_pool_default_crush_rule
set_fact:
osd_pool_default_crush_rule: "{% if crush_rule_variable.rc == 0 %}{{ crush_rule_variable.stdout.split(' = ')[1] }}{% else %}{{ ceph_osd_pool_default_crush_rule }}{% endif %}"
when: ceph_conf.stat.exists
- name: import_tasks set_monitor_address.yml
import_tasks: set_monitor_address.yml

View File

@ -38,12 +38,8 @@
- inventory_hostname == groups.get(mon_group_name) | last
- not item.get('skipped', false)
- name: set_fact osd_pool_default_crush_rule
set_fact:
osd_pool_default_crush_rule: "osd_pool_default_crush_rule"
- name: insert new default crush rule into daemon to prevent restart
command: "{{ hostvars[item]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[item]['monitor_name'] }} config set {{ osd_pool_default_crush_rule }} {{ info_ceph_default_crush_rule_yaml.rule_id }}"
command: "{{ hostvars[item]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[item]['monitor_name'] }} config set osd_pool_default_crush_rule {{ info_ceph_default_crush_rule_yaml.rule_id }}"
changed_when: false
delegate_to: "{{ item }}"
with_items: "{{ groups[mon_group_name] }}"
@ -55,7 +51,7 @@
ini_file:
dest: "/etc/ceph/{{ cluster }}.conf"
section: "global"
option: "{{ osd_pool_default_crush_rule }}"
option: "osd pool default crush rule"
value: "{{ info_ceph_default_crush_rule_yaml.rule_id }}"
delegate_to: "{{ item }}"
with_items: "{{ groups[mon_group_name] }}"