ceph-{mon,osd}: move default crush variables

Since ed36a11 we move the crush rules creation code from the ceph-mon to
the ceph-osd role.
To keep the backward compatibility we kept the possibility to set the
crush variables on the mons side but we didn't move the default values.
As a result, when using crush_rule_config set to true and wanted to use
the default values for crush_rules then the crush rule ansible task
creation will fail.

"msg": "'ansible.vars.hostvars.HostVarsVars object' has no attribute
'crush_rules'"

This patch move the default crush variables from ceph-mon to ceph-osd
role but also use those default values when nothing is defined on the
mons side.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1798864

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
(cherry picked from commit 1fc6b33714)
pull/5074/head v3.2.40
Dimitri Savineau 2020-02-10 13:43:31 -05:00 committed by Guillaume Abrioux
parent 306ce82358
commit db8902d444
7 changed files with 71 additions and 71 deletions

View File

@ -36,39 +36,6 @@ dummy:
# mds: allow *
# mgr: allow *
###############
# CRUSH RULES #
###############
#crush_rule_config: false
#crush_rule_hdd:
# name: HDD
# root: default
# type: host
# class: hdd
# default: false
#crush_rule_ssd:
# name: SSD
# root: default
# type: host
# class: ssd
# default: false
#crush_rules:
# - "{{ crush_rule_hdd }}"
# - "{{ crush_rule_ssd }}"
# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }}
# and will move hosts into them which might lead to significant data movement in the cluster!
#
# In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so:
#
# [osds]
# ceph-osd-01 osd_crush_location="{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }"
#
# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host)
#create_crush_tree: false
##########
# DOCKER #

View File

@ -223,6 +223,39 @@ dummy:
#crush_device_class: ""
#osds_per_device: 1
###############
# CRUSH RULES #
###############
#crush_rule_config: false
#crush_rule_hdd:
# name: HDD
# root: default
# type: host
# class: hdd
# default: false
#crush_rule_ssd:
# name: SSD
# root: default
# type: host
# class: ssd
# default: false
#crush_rules:
# - "{{ crush_rule_hdd }}"
# - "{{ crush_rule_ssd }}"
# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }}
# and will move hosts into them which might lead to significant data movement in the cluster!
#
# In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so:
#
# [osds]
# ceph-osd-01 osd_crush_location="{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }"
#
# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host)
#create_crush_tree: false
##########
# DOCKER #

View File

@ -457,7 +457,6 @@
- name: run crush rules on osd nodes
hosts: "{{ osd_group_name|default('osds') }}"
become: True
tasks:
roles:
- ceph-defaults
- ceph-facts
@ -465,6 +464,7 @@
- import_role:
name: ceph-osd
tasks_from: crush_rules
when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(false) | bool
- name: upgrade ceph mdss cluster, deactivate all rank > 0

View File

@ -28,39 +28,6 @@ client_admin_ceph_authtool_cap:
mds: allow *
mgr: allow *
###############
# CRUSH RULES #
###############
crush_rule_config: false
crush_rule_hdd:
name: HDD
root: default
type: host
class: hdd
default: false
crush_rule_ssd:
name: SSD
root: default
type: host
class: ssd
default: false
crush_rules:
- "{{ crush_rule_hdd }}"
- "{{ crush_rule_ssd }}"
# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }}
# and will move hosts into them which might lead to significant data movement in the cluster!
#
# In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so:
#
# [osds]
# ceph-osd-01 osd_crush_location="{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }"
#
# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host)
create_crush_tree: false
##########
# DOCKER #

View File

@ -215,6 +215,39 @@ lvm_volumes: []
crush_device_class: ""
osds_per_device: 1
###############
# CRUSH RULES #
###############
crush_rule_config: false
crush_rule_hdd:
name: HDD
root: default
type: host
class: hdd
default: false
crush_rule_ssd:
name: SSD
root: default
type: host
class: ssd
default: false
crush_rules:
- "{{ crush_rule_hdd }}"
- "{{ crush_rule_ssd }}"
# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }}
# and will move hosts into them which might lead to significant data movement in the cluster!
#
# In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so:
#
# [osds]
# ceph-osd-01 osd_crush_location="{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }"
#
# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host)
create_crush_tree: false
##########
# DOCKER #

View File

@ -7,13 +7,13 @@
register: config_crush_hierarchy
delegate_to: '{{ groups[mon_group_name][0] }}'
when:
- hostvars[groups[mon_group_name][0]]['create_crush_tree'] | default(false) | bool
- hostvars[groups[mon_group_name][0]]['create_crush_tree'] | default(create_crush_tree) | bool
- osd_crush_location is defined
- name: create configured crush rules
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd crush rule {{ 'create-replicated' if item.class is defined else 'create-simple' }} {{ item.name }} {{ item.root }} {{ item.type }} {{ item.class | default('') }}"
changed_when: false
with_items: "{{ hostvars[groups[mon_group_name][0]]['crush_rules'] | unique }}"
with_items: "{{ hostvars[groups[mon_group_name][0]]['crush_rules'] | default(crush_rules) | unique }}"
delegate_to: '{{ groups[mon_group_name][0] }}'
run_once: true
@ -21,7 +21,7 @@
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd -f json crush rule dump {{ item.name }}"
register: info_ceph_default_crush_rule
changed_when: false
with_items: "{{ hostvars[groups[mon_group_name][0]]['crush_rules'] | unique }}"
with_items: "{{ hostvars[groups[mon_group_name][0]]['crush_rules'] | default(crush_rules) | unique }}"
delegate_to: '{{ groups[mon_group_name][0] }}'
run_once: true
when: item.default | bool

View File

@ -104,7 +104,7 @@
- name: include crush_rules.yml
include_tasks: crush_rules.yml
when:
- hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(false) | bool
- hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(crush_rule_config) | bool
- not rolling_update | bool
- name: set_fact openstack_keys_tmp - preserve backward compatibility after the introduction of the ceph_keys module