config/osd: various fixes

- sets `osd_memory_target` per osd host.
- ceph.conf refactor (osd)

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=2056675

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 8a5628b516)
pull/7307/head v4.0.70.9
Guillaume Abrioux 2022-07-11 10:23:56 +02:00
parent 68470e91e7
commit f9b7bd327c
5 changed files with 15 additions and 11 deletions

View File

@ -372,6 +372,7 @@ dummy:
#is_hci: false
#hci_safety_factor: 0.2
#non_hci_safety_factor: 0.7
#safety_factor: "{{ hci_safety_factor if is_hci | bool else non_hci_safety_factor }}"
#osd_memory_target: 4294967296
#journal_size: 5120 # OSD journal size in MB
#block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'.

View File

@ -372,6 +372,7 @@ ceph_iscsi_config_dev: false
#is_hci: false
#hci_safety_factor: 0.2
#non_hci_safety_factor: 0.7
#safety_factor: "{{ hci_safety_factor if is_hci | bool else non_hci_safety_factor }}"
#osd_memory_target: 4294967296
#journal_size: 5120 # OSD journal size in MB
#block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'.

View File

@ -98,6 +98,18 @@
when:
- devices | default([]) | length > 0
- name: set_fact _osd_memory_target
set_fact:
_osd_memory_target: "{{ ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) | int }}"
when:
- num_osds | default(0) | int > 0
- ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) > osd_memory_target
- name: set osd_memory_target
command: "{{ ceph_cmd }} config set osd/host:{{ inventory_hostname }} osd_memory_target {{ _osd_memory_target | default(osd_memory_target) }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: create ceph conf directory
file:
path: "/etc/ceph"

View File

@ -90,17 +90,6 @@ filestore xattr use omap = true
{% if osd_objectstore == 'bluestore' %}
{% set _num_osds = num_osds | default(0) | int %}
[osd]
{% if is_hci | bool and _num_osds > 0 %}
{# hci_safety_factor is the safety factor for HCI deployments #}
{% if ansible_facts['memtotal_mb'] * 1048576 * hci_safety_factor / _num_osds > osd_memory_target %}
{% set _osd_memory_target = (ansible_facts['memtotal_mb'] * 1048576 * hci_safety_factor / _num_osds) | int %}
{% endif %}
{% elif _num_osds > 0 %}
{# non_hci_safety_factor is the safety factor for dedicated nodes #}
{% if ansible_facts['memtotal_mb'] * 1048576 * non_hci_safety_factor / _num_osds > osd_memory_target %}
{% set _osd_memory_target = (ansible_facts['memtotal_mb'] * 1048576 * non_hci_safety_factor / _num_osds) | int %}
{% endif %}
{% endif %}
osd memory target = {{ _osd_memory_target | default(osd_memory_target) }}
{% endif %}
{% endif %}

View File

@ -364,6 +364,7 @@ lvmetad_disabled: false
is_hci: false
hci_safety_factor: 0.2
non_hci_safety_factor: 0.7
safety_factor: "{{ hci_safety_factor if is_hci | bool else non_hci_safety_factor }}"
osd_memory_target: 4294967296
journal_size: 5120 # OSD journal size in MB
block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'.