ceph-ansible/roles/ceph-config/templates/ceph.conf.j2

127 lines
5.2 KiB
Plaintext
Raw Normal View History

#jinja2: trim_blocks: "true", lstrip_blocks: "true"
# {{ ansible_managed }}
[global]
{% if not cephx %}
auth cluster required = none
auth service required = none
auth client required = none
auth supported = none
{% endif %}
{% if ip_version == 'ipv6' %}
ms bind ipv6 = true
{% endif %}
{% if common_single_host_mode is defined and common_single_host_mode %}
osd crush chooseleaf type = 0
{% endif %}
{# NOTE (leseb): the blank lines in-between are needed otherwise we won't get any line break #}
{% set nb_mon = groups.get(mon_group_name, []) | length | int %}
{% set nb_client = groups.get(client_group_name, []) | length | int %}
{% set nb_osd = groups.get(osd_group_name, []) | length | int %}
{% if nb_mon > 0 and inventory_hostname in groups.get(mon_group_name, []) %}
mon initial members = {% for host in groups[mon_group_name] %}
{% if hostvars[host]['ansible_fqdn'] is defined and mon_use_fqdn -%}
{{ hostvars[host]['ansible_fqdn'] }}
{%- elif hostvars[host]['ansible_hostname'] is defined -%}
{{ hostvars[host]['ansible_hostname'] }}
{%- endif %}
{%- if not loop.last %},{% endif %}
{% endfor %}
{% endif %}
fsid = {{ fsid }}
{% if containerized_deployment %}
log file = /dev/null
mon cluster log file = /dev/null
{% endif %}
mon host = {% if nb_mon > 0 %}
{% for host in _monitor_addresses -%}
{{ host.addr }}
{%- if not loop.last -%},{%- endif %}
{%- endfor %}
{% elif nb_mon == 0 and inventory_hostname in groups.get(client_group_name, []) %}
{{ external_cluster_mon_ips }}
{% endif %}
{% if public_network is defined %}
public network = {{ public_network | regex_replace(' ', '') }}
{% endif %}
{% if cluster_network is defined %}
cluster network = {{ cluster_network | regex_replace(' ', '') }}
{% endif %}
{% if rgw_override_bucket_index_max_shards is defined %}
rgw override bucket index max shards = {{ rgw_override_bucket_index_max_shards }}
{% endif %}
{% if rgw_bucket_default_quota_max_objects is defined %}
rgw bucket default quota max objects = {{ rgw_bucket_default_quota_max_objects }}
{% endif %}
{% if inventory_hostname in groups.get(client_group_name, []) %}
[client.libvirt]
admin socket = {{ rbd_client_admin_socket_path }}/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor
log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by SELinux or AppArmor
{% endif %}
{% if inventory_hostname in groups.get(osd_group_name, []) %}
{% if osd_objectstore == 'filestore' %}
[osd]
osd mkfs type = {{ osd_mkfs_type }}
osd mkfs options xfs = {{ osd_mkfs_options_xfs }}
osd mount options xfs = {{ osd_mount_options_xfs }}
osd journal size = {{ journal_size }}
{% if filestore_xattr_use_omap != None %}
filestore xattr use omap = {{ filestore_xattr_use_omap }}
{% elif osd_mkfs_type == "ext4" %}
filestore xattr use omap = true
{# else, default is false #}
{% endif %}
{% endif %}
{% if osd_objectstore == 'bluestore' %}
{% set _num_osds = num_osds | default(0) | int %}
[osd]
{% if is_hci and _num_osds > 0 %}
{# hci_safety_factor is the safety factor for HCI deployments #}
{% if ansible_memtotal_mb * 1048576 * hci_safety_factor / _num_osds > osd_memory_target %}
{% set _osd_memory_target = (ansible_memtotal_mb * 1048576 * hci_safety_factor / _num_osds) | int %}
{% endif %}
{% elif _num_osds > 0 %}
{# non_hci_safety_factor is the safety factor for dedicated nodes #}
{% if ansible_memtotal_mb * 1048576 * non_hci_safety_factor / _num_osds > osd_memory_target %}
{% set _osd_memory_target = (ansible_memtotal_mb * 1048576 * non_hci_safety_factor / _num_osds) | int %}
{% endif %}
{% endif %}
osd memory target = {{ _osd_memory_target | default(osd_memory_target) }}
{% endif %}
{% endif %}
config: do not duplicate sections when doing collocation Prior to this commit, when collocating a RGW and NFS on the same box the ceph.conf layout was the following: [client.rgw.rgw0] host = mds0 host = rgw0 rgw frontends = civetweb port=192.168.15.50:8080 num_threads=100[client.rgw.mds0] rgw frontends = civetweb port=192.168.15.70:8080 num_threads=100 rgw frontends = civetweb port=192.168.15.50:8080 num_threads=100 keyring = /var/lib/ceph/radosgw/test-rgw.mds0/keyring keyring = /var/lib/ceph/radosgw/test-rgw.rgw0/keyring rgw data = /var/lib/ceph/radosgw/test-rgw.rgw0 log file = /var/log/ceph/test-rgw-mds0.log log file = /var/log/ceph/test-rgw-rgw0.log [mds.mds0] host = mds0 [global] rgw override bucket index max shards = 16 fsid = 70e1d368-57b3-4978-b746-cbffce6e56b5 rgw bucket default quota max objects = 1638400 osd_pool_default_size = 1 public network = 192.168.15.0/24 mon host = 192.168.15.10,192.168.15.11,192.168.15.12 osd_pool_default_pg_num = 8 cluster network = 192.168.16.0/24 [mds.rgw0] host = rgw0 [client.rgw.mds0] host = mds0 rgw data = /var/lib/ceph/radosgw/test-rgw.mds0 keyring = /var/lib/ceph/radosgw/test-rgw.mds0/keyring rgw frontends = civetweb port=192.168.15.70:8080 num_threads=100 log file = /var/log/ceph/test-rgw-mds0.log Basically appending all the sections. This commits solves that. Now the sections appear like this: -bash-4.2# cat /etc/ceph/test.conf [client.rgw.rgw0] log file = /var/log/ceph/test-rgw-rgw0.log host = rgw0 keyring = /var/lib/ceph/radosgw/test-rgw.rgw0/keyring rgw frontends = civetweb port=192.168.15.50:8080 num_threads=100 [client.rgw.mds0] log file = /var/log/ceph/test-rgw-mds0.log host = mds0 keyring = /var/lib/ceph/radosgw/test-rgw.mds0/keyring rgw frontends = civetweb port=192.168.15.70:8080 num_threads=100 [global] cluster network = 192.168.16.0/24 mon host = 192.168.15.10,192.168.15.11,192.168.15.12 osd_pool_default_size = 1 public network = 192.168.15.0/24 rgw bucket default quota max objects = 1638400 osd_pool_default_pg_num = 8 rgw override bucket index max shards = 16 fsid = 77a21980-3033-4174-9264-1abc7185bcb3 [mds.rgw0] host = rgw0 [mds.mds0] host = mds0 Signed-off-by: Sébastien Han <seb@redhat.com>
2017-10-08 21:16:40 +08:00
{% if inventory_hostname in groups.get(rgw_group_name, []) %}
2015-06-11 08:42:41 +08:00
{% for host in groups[rgw_group_name] %}
{% set _rgw_hostname = hostvars[host]['rgw_hostname'] | default(hostvars[host]['ansible_hostname']) %}
{# {{ hostvars[host]['rgw_hostname'] }} for backward compatibility, fqdn issues. See bz1580408 #}
{% if hostvars[host]['rgw_instances'] is defined %}
{% for instance in hostvars[host]['rgw_instances'] %}
[client.rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}]
host = {{ _rgw_hostname }}
keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}/keyring
log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] + '.' + instance['instance_name'] }}.log
rgw frontends = {{ radosgw_frontend_type }} {{ 'port' if radosgw_frontend_type == 'civetweb' else 'endpoint' }}={{ instance['radosgw_address'] }}:{{ instance['radosgw_frontend_port'] }} {{ radosgw_frontend_options }}
{% endfor %}
{% endif %}
2014-03-19 19:40:02 +08:00
{% endfor %}
{% endif %}
{% if inventory_hostname in groups.get(nfs_group_name, []) and inventory_hostname not in groups.get(rgw_group_name, []) %}
{% for host in groups[nfs_group_name] %}
{% set _rgw_hostname = hostvars[host]['rgw_hostname'] | default(hostvars[host]['ansible_hostname']) %}
{% if nfs_obj_gw %}
[client.rgw.{{ _rgw_hostname }}]
host = {{ _rgw_hostname }}
keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname }}/keyring
log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] }}.log
{% endif %}
{% endfor %}
{% endif %}