mirror of https://github.com/ceph/ceph-ansible.git
migrate from ceph.conf to ceph config
keep the ceph.conf very simple. manage the common options such as `public_network` with `ceph_config` module. Signed-off-by: Guillaume Abrioux <gabrioux@ibm.com>pull/7474/head
parent
d6bc8e3b9a
commit
14b4abf7c0
|
@ -23,15 +23,6 @@ dummy:
|
||||||
# TUNING #
|
# TUNING #
|
||||||
##########
|
##########
|
||||||
|
|
||||||
# To support buckets with a very large number of objects it's
|
|
||||||
# important to split them into shards. We suggest about 100K
|
|
||||||
# objects per shard as a conservative maximum.
|
|
||||||
#rgw_override_bucket_index_max_shards: 16
|
|
||||||
|
|
||||||
# Consider setting a quota on buckets so that exceeding this
|
|
||||||
# limit will require admin intervention.
|
|
||||||
#rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16
|
|
||||||
|
|
||||||
# Declaring rgw_create_pools will create pools with the given number of pgs,
|
# Declaring rgw_create_pools will create pools with the given number of pgs,
|
||||||
# size, and type. The following are some important notes on this automatic
|
# size, and type. The following are some important notes on this automatic
|
||||||
# pool creation:
|
# pool creation:
|
||||||
|
|
|
@ -104,13 +104,6 @@
|
||||||
- "{{ ceph_conf_overrides.get('osd', {}).get('osd_memory_target', '') }}"
|
- "{{ ceph_conf_overrides.get('osd', {}).get('osd_memory_target', '') }}"
|
||||||
when: item
|
when: item
|
||||||
|
|
||||||
- name: drop osd_memory_target from conf override
|
|
||||||
set_fact:
|
|
||||||
ceph_conf_overrides: "{{ ceph_conf_overrides | combine({'osd': {item: omit}}, recursive=true) }}"
|
|
||||||
loop:
|
|
||||||
- osd memory target
|
|
||||||
- osd_memory_target
|
|
||||||
|
|
||||||
- name: set_fact _osd_memory_target
|
- name: set_fact _osd_memory_target
|
||||||
set_fact:
|
set_fact:
|
||||||
_osd_memory_target: "{{ ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) | int }}"
|
_osd_memory_target: "{{ ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) | int }}"
|
||||||
|
@ -141,7 +134,6 @@
|
||||||
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
|
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
|
||||||
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
|
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
config_overrides: "{{ ceph_conf_overrides }}"
|
|
||||||
config_type: ini
|
config_type: ini
|
||||||
notify:
|
notify:
|
||||||
- restart ceph mons
|
- restart ceph mons
|
||||||
|
|
|
@ -2,36 +2,14 @@
|
||||||
# {{ ansible_managed }}
|
# {{ ansible_managed }}
|
||||||
|
|
||||||
[global]
|
[global]
|
||||||
{% if not cephx | bool %}
|
#{% if not cephx | bool %}
|
||||||
auth cluster required = none
|
#auth cluster required = none
|
||||||
auth service required = none
|
#auth service required = none
|
||||||
auth client required = none
|
#auth client required = none
|
||||||
{% endif %}
|
#{% endif %}
|
||||||
{% if ip_version == 'ipv6' %}
|
|
||||||
ms bind ipv6 = true
|
|
||||||
ms bind ipv4 = false
|
|
||||||
{% endif %}
|
|
||||||
{% if common_single_host_mode is defined and common_single_host_mode %}
|
|
||||||
osd crush chooseleaf type = 0
|
|
||||||
{% endif %}
|
|
||||||
{# NOTE (leseb): the blank lines in-between are needed otherwise we won't get any line break #}
|
{# NOTE (leseb): the blank lines in-between are needed otherwise we won't get any line break #}
|
||||||
|
|
||||||
{% set nb_mon = groups.get(mon_group_name, []) | length | int %}
|
{% set nb_mon = groups.get(mon_group_name, []) | length | int %}
|
||||||
{% set nb_client = groups.get(client_group_name, []) | length | int %}
|
|
||||||
{% set nb_osd = groups.get(osd_group_name, []) | length | int %}
|
|
||||||
{% if inventory_hostname in groups.get(client_group_name, []) and not inventory_hostname == groups.get(client_group_name, []) | first %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if nb_mon > 0 and inventory_hostname in groups.get(mon_group_name, []) %}
|
|
||||||
mon initial members = {% for host in groups[mon_group_name] %}
|
|
||||||
{% if hostvars[host]['ansible_facts']['hostname'] is defined -%}
|
|
||||||
{{ hostvars[host]['ansible_facts']['hostname'] }}
|
|
||||||
{%- endif %}
|
|
||||||
{%- if not loop.last %},{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
|
|
||||||
osd pool default crush rule = {{ osd_pool_default_crush_rule }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
fsid = {{ fsid }}
|
fsid = {{ fsid }}
|
||||||
mon host = {% if nb_mon > 0 %}
|
mon host = {% if nb_mon > 0 %}
|
||||||
|
@ -46,70 +24,8 @@ mon host = {% if nb_mon > 0 %}
|
||||||
{{ external_cluster_mon_ips }}
|
{{ external_cluster_mon_ips }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if public_network is defined %}
|
|
||||||
public network = {{ public_network | regex_replace(' ', '') }}
|
|
||||||
{% endif %}
|
|
||||||
{% if cluster_network is defined %}
|
|
||||||
cluster network = {{ cluster_network | regex_replace(' ', '') }}
|
|
||||||
{% endif %}
|
|
||||||
{% if rgw_override_bucket_index_max_shards is defined %}
|
|
||||||
rgw override bucket index max shards = {{ rgw_override_bucket_index_max_shards }}
|
|
||||||
{% endif %}
|
|
||||||
{% if rgw_bucket_default_quota_max_objects is defined %}
|
|
||||||
rgw bucket default quota max objects = {{ rgw_bucket_default_quota_max_objects }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if inventory_hostname in groups.get(client_group_name, []) %}
|
{% if inventory_hostname in groups.get(client_group_name, []) %}
|
||||||
[client.libvirt]
|
[client.libvirt]
|
||||||
admin socket = {{ rbd_client_admin_socket_path }}/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor
|
admin socket = {{ rbd_client_admin_socket_path }}/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor
|
||||||
log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by SELinux or AppArmor
|
log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by SELinux or AppArmor
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if inventory_hostname in groups.get(osd_group_name, []) %}
|
|
||||||
[osd]
|
|
||||||
osd memory target = {{ _osd_memory_target | default(osd_memory_target) }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if inventory_hostname in groups.get(rgw_group_name, []) %}
|
|
||||||
{% set _rgw_hostname = hostvars[inventory_hostname]['rgw_hostname'] | default(hostvars[inventory_hostname]['ansible_facts']['hostname']) %}
|
|
||||||
{# {{ hostvars[host]['rgw_hostname'] }} for backward compatibility, fqdn issues. See bz1580408 #}
|
|
||||||
{% if hostvars[inventory_hostname]['rgw_instances'] is defined %}
|
|
||||||
{% for instance in hostvars[inventory_hostname]['rgw_instances'] %}
|
|
||||||
[client.rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}]
|
|
||||||
host = {{ _rgw_hostname }}
|
|
||||||
keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}/keyring
|
|
||||||
log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] + '.' + instance['instance_name'] }}.log
|
|
||||||
{% set _rgw_binding_socket = instance['radosgw_address'] | default(_radosgw_address) | string + ':' + instance['radosgw_frontend_port'] | default(radosgw_frontend_port) | string %}
|
|
||||||
{%- macro frontend_line(frontend_type) -%}
|
|
||||||
{%- if frontend_type == 'civetweb' -%}
|
|
||||||
{{ radosgw_frontend_type }} port={{ _rgw_binding_socket }}{{ 's ssl_certificate='+radosgw_frontend_ssl_certificate if radosgw_frontend_ssl_certificate else '' }}
|
|
||||||
{%- elif frontend_type == 'beast' -%}
|
|
||||||
{{ radosgw_frontend_type }} {{ 'ssl_' if radosgw_frontend_ssl_certificate else '' }}endpoint={{ _rgw_binding_socket }}{{ ' ssl_certificate='+radosgw_frontend_ssl_certificate if radosgw_frontend_ssl_certificate else '' }}
|
|
||||||
{%- endif -%}
|
|
||||||
{%- endmacro -%}
|
|
||||||
rgw frontends = {{ frontend_line(radosgw_frontend_type) }} {{ radosgw_frontend_options }}
|
|
||||||
{% if 'num_threads' not in radosgw_frontend_options %}
|
|
||||||
rgw thread pool size = {{ radosgw_thread_pool_size }}
|
|
||||||
{% endif %}
|
|
||||||
{% if rgw_multisite | bool %}
|
|
||||||
{% if ((instance['rgw_zonemaster'] | default(rgw_zonemaster) | bool) or (deploy_secondary_zones | default(True) | bool)) %}
|
|
||||||
rgw_realm = {{ instance['rgw_realm'] }}
|
|
||||||
rgw_zonegroup = {{ instance['rgw_zonegroup'] }}
|
|
||||||
rgw_zone = {{ instance['rgw_zone'] }}
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if inventory_hostname in groups.get(nfs_group_name, []) and inventory_hostname not in groups.get(rgw_group_name, []) %}
|
|
||||||
{% for host in groups[nfs_group_name] %}
|
|
||||||
{% set _rgw_hostname = hostvars[host]['rgw_hostname'] | default(hostvars[host]['ansible_facts']['hostname']) %}
|
|
||||||
{% if nfs_obj_gw | bool %}
|
|
||||||
[client.rgw.{{ _rgw_hostname }}]
|
|
||||||
host = {{ _rgw_hostname }}
|
|
||||||
keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname }}/keyring
|
|
||||||
log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_facts']['hostname'] }}.log
|
|
||||||
{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
|
@ -15,6 +15,21 @@
|
||||||
include_tasks: pre_requisite_container.yml
|
include_tasks: pre_requisite_container.yml
|
||||||
when: containerized_deployment | bool
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
|
- name: set_fact _rgw_hostname
|
||||||
|
set_fact:
|
||||||
|
_rgw_hostname: "{{ hostvars[inventory_hostname]['rgw_hostname'] | default(hostvars[inventory_hostname]['ansible_facts']['hostname']) }}"
|
||||||
|
|
||||||
|
- name: set rgw parameter (log file)
|
||||||
|
ceph_config:
|
||||||
|
action: set
|
||||||
|
who: "client.rgw.{{ _rgw_hostname }}"
|
||||||
|
option: "log file"
|
||||||
|
value: "/var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}.log"
|
||||||
|
environment:
|
||||||
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
||||||
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||||
|
loop: "{{ groups.get('nfss', []) }}"
|
||||||
|
|
||||||
- name: include create_rgw_nfs_user.yml
|
- name: include create_rgw_nfs_user.yml
|
||||||
import_tasks: create_rgw_nfs_user.yml
|
import_tasks: create_rgw_nfs_user.yml
|
||||||
when: groups.get(mon_group_name, []) | length > 0
|
when: groups.get(mon_group_name, []) | length > 0
|
||||||
|
|
|
@ -15,15 +15,6 @@ copy_admin_key: false
|
||||||
# TUNING #
|
# TUNING #
|
||||||
##########
|
##########
|
||||||
|
|
||||||
# To support buckets with a very large number of objects it's
|
|
||||||
# important to split them into shards. We suggest about 100K
|
|
||||||
# objects per shard as a conservative maximum.
|
|
||||||
#rgw_override_bucket_index_max_shards: 16
|
|
||||||
|
|
||||||
# Consider setting a quota on buckets so that exceeding this
|
|
||||||
# limit will require admin intervention.
|
|
||||||
#rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16
|
|
||||||
|
|
||||||
# Declaring rgw_create_pools will create pools with the given number of pgs,
|
# Declaring rgw_create_pools will create pools with the given number of pgs,
|
||||||
# size, and type. The following are some important notes on this automatic
|
# size, and type. The following are some important notes on this automatic
|
||||||
# pool creation:
|
# pool creation:
|
||||||
|
|
|
@ -1,4 +1,19 @@
|
||||||
---
|
---
|
||||||
|
- name: set global config
|
||||||
|
ceph_config:
|
||||||
|
action: set
|
||||||
|
who: "client.rgw.{{ _rgw_hostname + '.' + item.0.instance_name }}"
|
||||||
|
option: "{{ item.1 }}"
|
||||||
|
value: "{{ item.0[item.1] }}"
|
||||||
|
environment:
|
||||||
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
||||||
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||||
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
|
run_once: true
|
||||||
|
with_nested:
|
||||||
|
- "{{ rgw_instances }}"
|
||||||
|
- [ 'rgw_realm', 'rgw_zonegroup', 'rgw_zone']
|
||||||
|
|
||||||
- name: set_fact realms
|
- name: set_fact realms
|
||||||
set_fact:
|
set_fact:
|
||||||
realms: '{{ realms | default([]) | union([item.rgw_realm]) }}'
|
realms: '{{ realms | default([]) | union([item.rgw_realm]) }}'
|
||||||
|
|
|
@ -1,4 +1,34 @@
|
||||||
---
|
---
|
||||||
|
- name: set_fact _rgw_hostname
|
||||||
|
set_fact:
|
||||||
|
_rgw_hostname: "{{ hostvars[inventory_hostname]['rgw_hostname'] | default(hostvars[inventory_hostname]['ansible_facts']['hostname']) }}"
|
||||||
|
|
||||||
|
- name: set rgw parameter (log file)
|
||||||
|
ceph_config:
|
||||||
|
action: set
|
||||||
|
who: "client.rgw.{{ _rgw_hostname + '.' + item.instance_name }}"
|
||||||
|
option: "log file"
|
||||||
|
value: "/var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] + '.' + item.instance_name }}.log"
|
||||||
|
environment:
|
||||||
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
||||||
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||||
|
loop: "{{ hostvars[inventory_hostname]['rgw_instances'] }}"
|
||||||
|
|
||||||
|
- name: set rgw parameter (rgw_frontends)
|
||||||
|
ceph_config:
|
||||||
|
action: set
|
||||||
|
who: "client.rgw.{{ _rgw_hostname + '.' + item.instance_name }}"
|
||||||
|
option: "rgw_frontends"
|
||||||
|
value: "beast port={{ item.radosgw_frontend_port | string }}"
|
||||||
|
environment:
|
||||||
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
||||||
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||||
|
loop: "{{ hostvars[inventory_hostname]['rgw_instances'] }}"
|
||||||
|
notify: restart ceph rgws
|
||||||
|
|
||||||
|
# rgw_frontends
|
||||||
|
# {{ 'ssl_' if radosgw_frontend_ssl_certificate else '' }}endpoint={{ _rgw_binding_socket }}{{ ' ssl_certificate='+radosgw_frontend_ssl_certificate if radosgw_frontend_ssl_certificate else '' }}
|
||||||
|
|
||||||
- name: create rados gateway directories
|
- name: create rados gateway directories
|
||||||
file:
|
file:
|
||||||
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||||
|
|
|
@ -174,6 +174,60 @@
|
||||||
status: "Complete"
|
status: "Complete"
|
||||||
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
|
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
|
||||||
|
|
||||||
|
- hosts: mons[0]
|
||||||
|
become: True
|
||||||
|
gather_facts: false
|
||||||
|
any_errors_fatal: true
|
||||||
|
tasks:
|
||||||
|
- import_role:
|
||||||
|
name: ceph-defaults
|
||||||
|
|
||||||
|
- name: set global config
|
||||||
|
ceph_config:
|
||||||
|
action: set
|
||||||
|
who: "global"
|
||||||
|
option: "{{ item.key }}"
|
||||||
|
value: "{{ item.value }}"
|
||||||
|
environment:
|
||||||
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
||||||
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||||
|
with_dict:
|
||||||
|
"{{ {
|
||||||
|
'public_network': public_network | default(False),
|
||||||
|
'cluster_network': cluster_network | default(False),
|
||||||
|
'osd pool default crush rule': osd_pool_default_crush_rule,
|
||||||
|
'ms bind ipv6': 'true' if ip_version == 'ipv6' else 'false',
|
||||||
|
'ms bind ipv4': 'false' if ip_version == 'ipv6' else 'true',
|
||||||
|
'osd crush chooseleaf type': '0' if common_single_host_mode | default(False) | bool else False,
|
||||||
|
} }}"
|
||||||
|
when:
|
||||||
|
- inventory_hostname == ansible_play_hosts_all | last
|
||||||
|
- item.value
|
||||||
|
|
||||||
|
- name: set global config overrides
|
||||||
|
ceph_config:
|
||||||
|
action: set
|
||||||
|
who: "global"
|
||||||
|
option: "{{ item.key }}"
|
||||||
|
value: "{{ item.value }}"
|
||||||
|
environment:
|
||||||
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
||||||
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||||
|
when: inventory_hostname == ansible_play_hosts_all | last
|
||||||
|
with_dict: "{{ ceph_conf_overrides['global'] }}"
|
||||||
|
|
||||||
|
- name: set osd_memory_target
|
||||||
|
ceph_config:
|
||||||
|
action: set
|
||||||
|
who: "osd.*/{{ item }}:host"
|
||||||
|
option: "osd_memory_target"
|
||||||
|
value: "{{ _osd_memory_target | default(osd_memory_target) }}"
|
||||||
|
environment:
|
||||||
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
||||||
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||||
|
when: inventory_hostname == ansible_play_hosts_all | last
|
||||||
|
loop: "{{ groups[osd_group_name] | default([]) }}"
|
||||||
|
|
||||||
- hosts: osds
|
- hosts: osds
|
||||||
become: True
|
become: True
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
|
|
|
@ -166,6 +166,51 @@
|
||||||
status: "Complete"
|
status: "Complete"
|
||||||
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
|
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
|
||||||
|
|
||||||
|
- hosts: mons[0]
|
||||||
|
become: True
|
||||||
|
gather_facts: false
|
||||||
|
any_errors_fatal: true
|
||||||
|
tasks:
|
||||||
|
- import_role:
|
||||||
|
name: ceph-defaults
|
||||||
|
|
||||||
|
- name: set global config
|
||||||
|
ceph_config:
|
||||||
|
action: set
|
||||||
|
who: "global"
|
||||||
|
option: "{{ item.key }}"
|
||||||
|
value: "{{ item.value }}"
|
||||||
|
with_dict:
|
||||||
|
"{{ {
|
||||||
|
'public_network': public_network | default(False),
|
||||||
|
'cluster_network': cluster_network | default(False),
|
||||||
|
'osd pool default crush rule': osd_pool_default_crush_rule,
|
||||||
|
'ms bind ipv6': 'true' if ip_version == 'ipv6' else 'false',
|
||||||
|
'ms bind ipv4': 'false' if ip_version == 'ipv6' else 'true',
|
||||||
|
'osd crush chooseleaf type': '0' if common_single_host_mode | default(False) | bool else False,
|
||||||
|
} }}"
|
||||||
|
when:
|
||||||
|
- inventory_hostname == ansible_play_hosts_all | last
|
||||||
|
- item.value
|
||||||
|
|
||||||
|
- name: set global config overrides
|
||||||
|
ceph_config:
|
||||||
|
action: set
|
||||||
|
who: "global"
|
||||||
|
option: "{{ item.key }}"
|
||||||
|
value: "{{ item.value }}"
|
||||||
|
when: inventory_hostname == ansible_play_hosts_all | last
|
||||||
|
with_dict: "{{ ceph_conf_overrides['global'] }}"
|
||||||
|
|
||||||
|
- name: set osd_memory_target
|
||||||
|
ceph_config:
|
||||||
|
action: set
|
||||||
|
who: "osd.*/{{ item }}:host"
|
||||||
|
option: "osd_memory_target"
|
||||||
|
value: "{{ _osd_memory_target | default(osd_memory_target) }}"
|
||||||
|
when: inventory_hostname == ansible_play_hosts_all | last
|
||||||
|
loop: "{{ groups[osd_group_name] | default([]) }}"
|
||||||
|
|
||||||
- hosts: osds
|
- hosts: osds
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
become: True
|
become: True
|
||||||
|
|
|
@ -29,14 +29,3 @@ class TestMons(object):
|
||||||
output = host.check_output(cmd)
|
output = host.check_output(cmd)
|
||||||
assert output.strip().startswith("cluster")
|
assert output.strip().startswith("cluster")
|
||||||
|
|
||||||
def test_ceph_config_has_inital_members_line(self, node, host, setup):
|
|
||||||
assert host.file(setup["conf_path"]).contains("^mon initial members = .*$")
|
|
||||||
|
|
||||||
def test_initial_members_line_has_correct_value(self, node, host, setup):
|
|
||||||
mon_initial_members_line = host.check_output("grep 'mon initial members = ' /etc/ceph/{cluster}.conf".format(cluster=setup['cluster_name'])) # noqa E501
|
|
||||||
result = True
|
|
||||||
for host in node["vars"]["groups"]["mons"]:
|
|
||||||
pattern = re.compile(host)
|
|
||||||
if pattern.search(mon_initial_members_line) == None: # noqa E501
|
|
||||||
result = False
|
|
||||||
assert result
|
|
||||||
|
|
|
@ -1,59 +0,0 @@
|
||||||
import pytest
|
|
||||||
import json
|
|
||||||
|
|
||||||
|
|
||||||
class TestRGWs(object):
|
|
||||||
|
|
||||||
@pytest.mark.no_docker
|
|
||||||
def test_rgw_bucket_default_quota_is_set(self, node, host, setup):
|
|
||||||
assert host.file(setup["conf_path"]).contains(
|
|
||||||
"rgw override bucket index max shards")
|
|
||||||
assert host.file(setup["conf_path"]).contains(
|
|
||||||
"rgw bucket default quota max objects")
|
|
||||||
|
|
||||||
@pytest.mark.no_docker
|
|
||||||
def test_rgw_bucket_default_quota_is_applied(self, node, host, setup):
|
|
||||||
radosgw_admin_cmd = "timeout --foreground -s KILL 5 sudo radosgw-admin --cluster={cluster} -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring user info --uid=test --format=json".format( # noqa E501
|
|
||||||
hostname=node["vars"]["inventory_hostname"],
|
|
||||||
cluster=setup['cluster_name']
|
|
||||||
)
|
|
||||||
radosgw_admin_output = host.run(radosgw_admin_cmd)
|
|
||||||
if radosgw_admin_output.rc == 22:
|
|
||||||
radosgw_admin_cmd = "timeout --foreground -s KILL 5 sudo radosgw-admin --cluster={cluster} -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring user create --uid=test --display-name Test".format( # noqa E501
|
|
||||||
hostname=node["vars"]["inventory_hostname"],
|
|
||||||
cluster=setup['cluster_name']
|
|
||||||
)
|
|
||||||
radosgw_admin_output = host.run(radosgw_admin_cmd)
|
|
||||||
radosgw_admin_output_json = json.loads(radosgw_admin_output.stdout)
|
|
||||||
assert radosgw_admin_output_json["bucket_quota"]["enabled"] == True # noqa E501
|
|
||||||
assert radosgw_admin_output_json["bucket_quota"]["max_objects"] == 1638400 # noqa E501
|
|
||||||
|
|
||||||
@pytest.mark.no_docker
|
|
||||||
def test_rgw_tuning_pools_are_set(self, node, host, setup):
|
|
||||||
pools = node["vars"]["rgw_create_pools"]
|
|
||||||
if pools is None:
|
|
||||||
pytest.skip('rgw_create_pools not defined, nothing to test')
|
|
||||||
for pool_name in pools.keys():
|
|
||||||
cmd = host.run("sudo ceph --cluster={cluster} --connect-timeout 5 -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring osd pool get {pool_name} size".format( # noqa E501
|
|
||||||
hostname=node["vars"]["inventory_hostname"],
|
|
||||||
cluster=setup['cluster_name'],
|
|
||||||
pool_name=pool_name
|
|
||||||
))
|
|
||||||
assert cmd.rc == 0
|
|
||||||
|
|
||||||
@pytest.mark.docker
|
|
||||||
def test_docker_rgw_tuning_pools_are_set(self, node, host, setup):
|
|
||||||
hostname = node["vars"]["inventory_hostname"]
|
|
||||||
cluster = setup['cluster_name']
|
|
||||||
container_binary = setup["container_binary"]
|
|
||||||
pools = node["vars"].get("rgw_create_pools")
|
|
||||||
if pools is None:
|
|
||||||
pytest.skip('rgw_create_pools not defined, nothing to test')
|
|
||||||
for pool_name in pools.keys():
|
|
||||||
cmd = host.run("sudo {container_binary} exec ceph-rgw-{hostname}-rgw0 ceph --cluster={cluster} -n client.rgw.{hostname}.rgw0 --connect-timeout 5 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring osd pool get {pool_name} size".format( # noqa E501
|
|
||||||
hostname=hostname,
|
|
||||||
cluster=cluster,
|
|
||||||
pool_name=pool_name,
|
|
||||||
container_binary=container_binary
|
|
||||||
))
|
|
||||||
assert cmd.rc == 0
|
|
Loading…
Reference in New Issue