diff --git a/group_vars/mons.yml.sample b/group_vars/mons.yml.sample index 6f79ae41b..002d62b22 100644 --- a/group_vars/mons.yml.sample +++ b/group_vars/mons.yml.sample @@ -36,39 +36,6 @@ dummy: # mds: allow * # mgr: allow * -############### -# CRUSH RULES # -############### -#crush_rule_config: false - -#crush_rule_hdd: -# name: HDD -# root: default -# type: host -# class: hdd -# default: false - -#crush_rule_ssd: -# name: SSD -# root: default -# type: host -# class: ssd -# default: false - -#crush_rules: -# - "{{ crush_rule_hdd }}" -# - "{{ crush_rule_ssd }}" - -# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }} -# and will move hosts into them which might lead to significant data movement in the cluster! -# -# In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so: -# -# [osds] -# ceph-osd-01 osd_crush_location="{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }" -# -# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host) -#create_crush_tree: false ########## # DOCKER # diff --git a/group_vars/osds.yml.sample b/group_vars/osds.yml.sample index b37da6031..780e044ed 100644 --- a/group_vars/osds.yml.sample +++ b/group_vars/osds.yml.sample @@ -125,6 +125,39 @@ dummy: #crush_device_class: "" #osds_per_device: 1 +############### +# CRUSH RULES # +############### +#crush_rule_config: false + +#crush_rule_hdd: +# name: HDD +# root: default +# type: host +# class: hdd +# default: false + +#crush_rule_ssd: +# name: SSD +# root: default +# type: host +# class: ssd +# default: false + +#crush_rules: +# - "{{ crush_rule_hdd }}" +# - "{{ crush_rule_ssd }}" + +# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }} +# and will move hosts into them which might lead to significant data movement in the cluster! +# +# In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so: +# +# [osds] +# ceph-osd-01 osd_crush_location="{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }" +# +# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host) +#create_crush_tree: false ########## # DOCKER # diff --git a/roles/ceph-mon/defaults/main.yml b/roles/ceph-mon/defaults/main.yml index 46d20d506..ac362b412 100644 --- a/roles/ceph-mon/defaults/main.yml +++ b/roles/ceph-mon/defaults/main.yml @@ -28,39 +28,6 @@ client_admin_ceph_authtool_cap: mds: allow * mgr: allow * -############### -# CRUSH RULES # -############### -crush_rule_config: false - -crush_rule_hdd: - name: HDD - root: default - type: host - class: hdd - default: false - -crush_rule_ssd: - name: SSD - root: default - type: host - class: ssd - default: false - -crush_rules: - - "{{ crush_rule_hdd }}" - - "{{ crush_rule_ssd }}" - -# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }} -# and will move hosts into them which might lead to significant data movement in the cluster! -# -# In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so: -# -# [osds] -# ceph-osd-01 osd_crush_location="{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }" -# -# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host) -create_crush_tree: false ########## # DOCKER # diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index 25f7e4295..4cf563f35 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -117,6 +117,39 @@ lvm_volumes: [] crush_device_class: "" osds_per_device: 1 +############### +# CRUSH RULES # +############### +crush_rule_config: false + +crush_rule_hdd: + name: HDD + root: default + type: host + class: hdd + default: false + +crush_rule_ssd: + name: SSD + root: default + type: host + class: ssd + default: false + +crush_rules: + - "{{ crush_rule_hdd }}" + - "{{ crush_rule_ssd }}" + +# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }} +# and will move hosts into them which might lead to significant data movement in the cluster! +# +# In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so: +# +# [osds] +# ceph-osd-01 osd_crush_location="{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }" +# +# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host) +create_crush_tree: false ########## # DOCKER # diff --git a/roles/ceph-osd/tasks/crush_rules.yml b/roles/ceph-osd/tasks/crush_rules.yml index edeedc1ba..9a01063af 100644 --- a/roles/ceph-osd/tasks/crush_rules.yml +++ b/roles/ceph-osd/tasks/crush_rules.yml @@ -7,13 +7,13 @@ register: config_crush_hierarchy delegate_to: '{{ groups[mon_group_name][0] }}' when: - - hostvars[groups[mon_group_name][0]]['create_crush_tree'] | default(false) | bool + - hostvars[groups[mon_group_name][0]]['create_crush_tree'] | default(create_crush_tree) | bool - osd_crush_location is defined - name: create configured crush rules command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd crush rule {{ 'create-replicated' if item.class is defined else 'create-simple' }} {{ item.name }} {{ item.root }} {{ item.type }} {{ item.class | default('') }}" changed_when: false - with_items: "{{ hostvars[groups[mon_group_name][0]]['crush_rules'] | unique }}" + with_items: "{{ hostvars[groups[mon_group_name][0]]['crush_rules'] | default(crush_rules) | unique }}" delegate_to: '{{ groups[mon_group_name][0] }}' run_once: true @@ -21,7 +21,7 @@ command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd -f json crush rule dump {{ item.name }}" register: info_ceph_default_crush_rule changed_when: false - with_items: "{{ hostvars[groups[mon_group_name][0]]['crush_rules'] | unique }}" + with_items: "{{ hostvars[groups[mon_group_name][0]]['crush_rules'] | default(crush_rules) | unique }}" delegate_to: '{{ groups[mon_group_name][0] }}' run_once: true when: item.default | bool diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index c5e1b00e1..7e7a6437d 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -87,7 +87,7 @@ - name: include crush_rules.yml include_tasks: crush_rules.yml - when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(false) | bool + when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(crush_rule_config) | bool - name: set_fact openstack_keys_tmp - preserve backward compatibility after the introduction of the ceph_keys module set_fact: