config/osd: various fixes

- sets `osd_memory_target` per osd host.
- ceph.conf refactor (osd)

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=2056675

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/7253/head
Guillaume Abrioux 2022-07-11 10:23:56 +02:00
parent 5283fa6e96
commit 8a5628b516
5 changed files with 15 additions and 11 deletions

View File

@ -358,6 +358,7 @@ dummy:
#is_hci: false
#hci_safety_factor: 0.2
#non_hci_safety_factor: 0.7
#safety_factor: "{{ hci_safety_factor if is_hci | bool else non_hci_safety_factor }}"
#osd_memory_target: 4294967296
#journal_size: 5120 # OSD journal size in MB
#block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'.

View File

@ -358,6 +358,7 @@ ceph_iscsi_config_dev: false
#is_hci: false
#hci_safety_factor: 0.2
#non_hci_safety_factor: 0.7
#safety_factor: "{{ hci_safety_factor if is_hci | bool else non_hci_safety_factor }}"
#osd_memory_target: 4294967296
#journal_size: 5120 # OSD journal size in MB
#block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'.

View File

@ -98,6 +98,18 @@
when:
- devices | default([]) | length > 0
- name: set_fact _osd_memory_target
set_fact:
_osd_memory_target: "{{ ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) | int }}"
when:
- num_osds | default(0) | int > 0
- ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) > osd_memory_target
- name: set osd_memory_target
command: "{{ ceph_cmd }} config set osd/host:{{ inventory_hostname }} osd_memory_target {{ _osd_memory_target | default(osd_memory_target) }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: create ceph conf directory
file:
path: "/etc/ceph"

View File

@ -82,17 +82,6 @@ filestore xattr use omap = true
{% if osd_objectstore == 'bluestore' %}
{% set _num_osds = num_osds | default(0) | int %}
[osd]
{% if is_hci | bool and _num_osds > 0 %}
{# hci_safety_factor is the safety factor for HCI deployments #}
{% if ansible_facts['memtotal_mb'] * 1048576 * hci_safety_factor / _num_osds > osd_memory_target %}
{% set _osd_memory_target = (ansible_facts['memtotal_mb'] * 1048576 * hci_safety_factor / _num_osds) | int %}
{% endif %}
{% elif _num_osds > 0 %}
{# non_hci_safety_factor is the safety factor for dedicated nodes #}
{% if ansible_facts['memtotal_mb'] * 1048576 * non_hci_safety_factor / _num_osds > osd_memory_target %}
{% set _osd_memory_target = (ansible_facts['memtotal_mb'] * 1048576 * non_hci_safety_factor / _num_osds) | int %}
{% endif %}
{% endif %}
osd memory target = {{ _osd_memory_target | default(osd_memory_target) }}
{% endif %}
{% endif %}

View File

@ -350,6 +350,7 @@ lvmetad_disabled: false
is_hci: false
hci_safety_factor: 0.2
non_hci_safety_factor: 0.7
safety_factor: "{{ hci_safety_factor if is_hci | bool else non_hci_safety_factor }}"
osd_memory_target: 4294967296
journal_size: 5120 # OSD journal size in MB
block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'.