osd: use default crush rule name when needed

When `rule_name` isn't set in `crush_rules` the osd pool creation will
fail.
This commit adds a new fact `ceph_osd_pool_default_crush_rule_name` with
the default crush rule name.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1817586

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 1bb9860dfd)
pull/5230/head
Guillaume Abrioux 2020-03-27 16:21:09 +01:00 committed by Dimitri Savineau
parent 03355aec8c
commit 7acd9686ab
8 changed files with 45 additions and 18 deletions

View File

@ -26,9 +26,9 @@
set_fact:
delegated_node: "{{ groups[mon_group_name][0] if groups.get(mon_group_name, []) | length > 0 else inventory_hostname }}"
- name: set_fact condition_copy_admin_key
- name: set_fact admin_key_presence
set_fact:
condition_copy_admin_key: "{{ True if groups.get(mon_group_name, []) | length > 0 else copy_admin_key }}"
admin_key_presence: "{{ True if groups.get(mon_group_name, []) | length > 0 else copy_admin_key }}"
- name: create cephx key(s)
ceph_key:
@ -38,7 +38,7 @@
secret: "{{ item.key | default('') }}"
cluster: "{{ cluster }}"
dest: "{{ ceph_conf_key_directory }}"
import_key: "{{ condition_copy_admin_key }}"
import_key: "{{ admin_key_presence }}"
mode: "{{ item.mode|default(omit) }}"
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
@ -66,12 +66,16 @@
- name: pool related tasks
when:
- condition_copy_admin_key | bool
- admin_key_presence | bool
- inventory_hostname == groups.get('_filtered_clients', []) | first
block:
- import_role:
name: ceph-facts
tasks_from: get_def_crush_rule_name.yml
- name: list existing pool(s)
command: >
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
{{ ceph_admin_command }} --cluster {{ cluster }}
osd pool get {{ item.name }} size
with_items: "{{ pools }}"
register: created_pools
@ -80,13 +84,13 @@
- name: create ceph pool(s)
command: >
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
{{ ceph_admin_command | default('') }} --cluster {{ cluster }}
osd pool create {{ item.0.name }}
{{ item.0.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}
{{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else '' }}
{%- if item.0.type | default(1) | int == 1 or item.0.type | default('replicated') == 'replicated' %}
replicated
{{ item.0.rule_name | default(osd_pool_default_crush_rule) }}
{{ item.0.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}
{{ item.0.expected_num_objects | default(0) }}
{%- else %}
erasure
@ -102,19 +106,19 @@
- item.1.rc != 0
- name: set the target ratio on pool(s)
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}"
command: "{{ ceph_admin_command | default('') }} --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}"
with_items: "{{ pools | unique }}"
delegate_to: "{{ delegated_node }}"
when: item.pg_autoscale_mode | default(False) | bool
- name: set pg_autoscale_mode value on pool(s)
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}"
command: "{{ ceph_admin_command | default('') }} --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}"
delegate_to: "{{ delegated_node }}"
with_items: "{{ pools | unique }}"
- name: customize pool size
command: >
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
{{ ceph_admin_command | default('') }} --cluster {{ cluster }}
osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}
with_items: "{{ pools | unique }}"
delegate_to: "{{ delegated_node }}"
@ -127,7 +131,7 @@
- name: customize pool min_size
command: >
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
{{ ceph_admin_command | default('') }} --cluster {{ cluster }}
osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}
with_items: "{{ pools | unique }}"
delegate_to: "{{ delegated_node }}"
@ -139,7 +143,7 @@
- item.type | default('replicated') != 'erasure'
- name: assign application to pool(s)
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
command: "{{ ceph_admin_command | default('') }} --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
with_items: "{{ pools | unique }}"
changed_when: false
delegate_to: "{{ delegated_node }}"

View File

@ -33,7 +33,7 @@ mon initial members = {% for host in groups[mon_group_name] %}
{%- if not loop.last %},{% endif %}
{% endfor %}
osd pool default crush rule = {{ osd_pool_default_crush_rule | default(ceph_osd_pool_default_crush_rule) }}
osd pool default crush rule = {{ osd_pool_default_crush_rule }}
{% endif %}
fsid = {{ fsid }}

View File

@ -3,3 +3,4 @@ ceph_osd_pool_default_size: 3
ceph_osd_pool_default_min_size: 0
ceph_osd_pool_default_pg_num: 8
ceph_osd_pool_default_crush_rule: -1
ceph_osd_pool_default_crush_rule_name: "replicated_rule"

View File

@ -320,8 +320,7 @@
- name: set_fact osd_pool_default_crush_rule
set_fact:
osd_pool_default_crush_rule: "{% if crush_rule_variable.rc == 0 %}{{ crush_rule_variable.stdout.split(' = ')[1] }}{% else %}{{ ceph_osd_pool_default_crush_rule }}{% endif %}"
when: ceph_conf.stat.exists
osd_pool_default_crush_rule: "{{ crush_rule_variable.stdout.split(' = ')[1] if crush_rule_variable.get('rc', 1) | int == 0 else ceph_osd_pool_default_crush_rule }}"
- name: import_tasks set_monitor_address.yml
import_tasks: set_monitor_address.yml
@ -358,3 +357,7 @@
- name: set_fact use_new_ceph_iscsi package or old ceph-iscsi-config/cli
set_fact:
use_new_ceph_iscsi: "{{ (gateway_ip_list == '0.0.0.0' and gateway_iqn | length == 0 and client_connections | length == 0 and rbd_devices | length == 0) | bool | ternary(true, false) }}"
- name: set_fact ceph_admin_command
set_fact:
ceph_admin_command: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else 'ceph' }} -n client.admin -k /etc/ceph/{{ cluster }}.client.admin.keyring"

View File

@ -0,0 +1,14 @@
---
- name: get current default crush rule details
command: "{{ hostvars[delegated_node | default(groups[mon_group_name][0])]['ceph_admin_command'] }} --cluster {{ cluster }} osd -f json crush rule dump"
register: default_crush_rule_details
changed_when: false
delegate_to: "{{ delegated_node | default(groups[mon_group_name][0]) }}"
run_once: true
- name: get current default crush rule name
set_fact:
ceph_osd_pool_default_crush_rule_name: "{{ item.rule_name }}"
with_items: "{{ default_crush_rule_details.stdout | default('{}') | from_json }}"
run_once: True
when: item.rule_id | int == osd_pool_default_crush_rule | int

View File

@ -21,6 +21,10 @@
- name: deploy filesystem pools
when: fs_pools_created is not defined
block:
- import_role:
name: ceph-facts
tasks_from: get_def_crush_rule_name.yml
- name: create filesystem pools
command: >
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
@ -29,7 +33,7 @@
{{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else '' }}
{%- if item.type | default(1) | int == 1 or item.type | default('replicated') == 'replicated' %}
replicated
{{ item.rule_name | default(osd_pool_default_crush_rule) }}
{{ item.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}
{{ item.expected_num_objects | default(0) }}
{%- else %}
erasure

View File

@ -29,9 +29,10 @@
# If multiple rules are set as default (should not be) then the last one is taken as actual default.
# the with_items statement overrides each iteration with the new one.
# NOTE(leseb): we should actually fail if multiple rules are set as default
- name: set_fact info_ceph_default_crush_rule_yaml
- name: set_fact info_ceph_default_crush_rule_yaml, ceph_osd_pool_default_crush_rule_name
set_fact:
info_ceph_default_crush_rule_yaml: "{{ item.stdout | from_json() }}"
ceph_osd_pool_default_crush_rule_name: "{{ (item.stdout | from_json).rule_name }}"
with_items: "{{ info_ceph_default_crush_rule.results }}"
run_once: true
when: not item.get('skipped', false)

View File

@ -19,7 +19,7 @@
{{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else '' }}
{%- if item.0.type | default(1) | int == 1 or item.0.type | default('replicated') == 'replicated' %}
replicated
{{ item.0.rule_name | default(osd_pool_default_crush_rule) }}
{{ item.0.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}
{{ item.0.expected_num_objects | default(0) }}
{%- else %}
erasure