mirror of https://github.com/ceph/ceph-ansible.git
rgw: set tuning parameters
Change civetweb_num_thread default to 100 Add capability to override number of pgs for rgw pools. Add ceph.conf vars to enable default bucket object quota at users choosing into the ceph.conf.j2 template Resolves: rhbz#1437173 Resolves: rhbz#1391500 Signed-off-by: Ali Maredia <amaredia@redhat.com>pull/1474/head
parent
5b5e0b607a
commit
16108f5d23
|
@ -293,7 +293,7 @@ dummy:
|
|||
#radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names
|
||||
#radosgw_civetweb_port: 8080
|
||||
#radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}" # when using ipv6 enclose with brackets: "[{{ ansible_default_ipv6.address }}]"
|
||||
#radosgw_civetweb_num_threads: 50
|
||||
#radosgw_civetweb_num_threads: 100
|
||||
# For additional civetweb configuration options available such as SSL, logging,
|
||||
# keepalive, and timeout settings, please see the civetweb docs at
|
||||
# https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md
|
||||
|
|
|
@ -29,6 +29,32 @@ dummy:
|
|||
#rgw_pull_port: "{{ radosgw_civetweb_port }}"
|
||||
#rgw_pull_proto: "http"
|
||||
|
||||
########
|
||||
#TUNING#
|
||||
########
|
||||
|
||||
# To support buckets with a very large number of objects it's
|
||||
# important to split them into shards. We suggest about 100K
|
||||
# objects per shard as a conservative maximum.
|
||||
#rgw_override_bucket_index_max_shards: 16
|
||||
|
||||
# Consider setting a quota on buckets so that exceeding this
|
||||
# limit will require admin intervention.
|
||||
#rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16
|
||||
|
||||
# This dictionary will create pools with the given number of pgs.
|
||||
# This is important because they would be created with the default
|
||||
# of 8.
|
||||
# New pools and their corresponding pg_nums can be created
|
||||
# by adding to the create_pools dictionary (see foo).
|
||||
#create_pools:
|
||||
# defaults.rgw.buckets.data:
|
||||
# pg_num: 16
|
||||
# defaults.rgw.buckets.index:
|
||||
# pg_num: 32
|
||||
# foo:
|
||||
# pg_num: 4
|
||||
|
||||
##########
|
||||
# DOCKER #
|
||||
##########
|
||||
|
|
|
@ -285,7 +285,7 @@ mds_max_mds: 3
|
|||
radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names
|
||||
radosgw_civetweb_port: 8080
|
||||
radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}" # when using ipv6 enclose with brackets: "[{{ ansible_default_ipv6.address }}]"
|
||||
radosgw_civetweb_num_threads: 50
|
||||
radosgw_civetweb_num_threads: 100
|
||||
# For additional civetweb configuration options available such as SSL, logging,
|
||||
# keepalive, and timeout settings, please see the civetweb docs at
|
||||
# https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md
|
||||
|
|
|
@ -109,6 +109,12 @@ host = {{ hostvars[host]['ansible_hostname'] }}
|
|||
{% if radosgw_dns_name is defined %}
|
||||
rgw dns name = {{ radosgw_dns_name }}
|
||||
{% endif %}
|
||||
{% if rgw_override_bucket_index_max_shards is defined %}
|
||||
rgw override bucket index max shards = {{ rgw_override_bucket_index_max_shards }}
|
||||
{% endif %}
|
||||
{% if rgw_bucket_default_quota_max_objects is defined %}
|
||||
rgw bucket default quota max objects = {{ rgw_bucket_default_quota_max_objects }}
|
||||
{% endif %}
|
||||
host = {{ hostvars[host]['ansible_hostname'] }}
|
||||
keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ hostvars[host]['ansible_hostname'] }}/keyring
|
||||
rgw socket path = /tmp/radosgw-{{ hostvars[host]['ansible_hostname'] }}.sock
|
||||
|
|
|
@ -21,6 +21,32 @@ cephx: true
|
|||
rgw_pull_port: "{{ radosgw_civetweb_port }}"
|
||||
rgw_pull_proto: "http"
|
||||
|
||||
########
|
||||
#TUNING#
|
||||
########
|
||||
|
||||
# To support buckets with a very large number of objects it's
|
||||
# important to split them into shards. We suggest about 100K
|
||||
# objects per shard as a conservative maximum.
|
||||
#rgw_override_bucket_index_max_shards: 16
|
||||
|
||||
# Consider setting a quota on buckets so that exceeding this
|
||||
# limit will require admin intervention.
|
||||
#rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16
|
||||
|
||||
# This dictionary will create pools with the given number of pgs.
|
||||
# This is important because they would be created with the default
|
||||
# of 8.
|
||||
# New pools and their corresponding pg_nums can be created
|
||||
# by adding to the create_pools dictionary (see foo).
|
||||
#create_pools:
|
||||
# defaults.rgw.buckets.data:
|
||||
# pg_num: 16
|
||||
# defaults.rgw.buckets.index:
|
||||
# pg_num: 32
|
||||
# foo:
|
||||
# pg_num: 4
|
||||
|
||||
##########
|
||||
# DOCKER #
|
||||
##########
|
||||
|
|
|
@ -16,6 +16,10 @@
|
|||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- include: rgw_pool_pgs.yml
|
||||
when: create_pools is defined
|
||||
static: False
|
||||
|
||||
- name: include rgw multisite playbooks
|
||||
include: multisite/main.yml
|
||||
when:
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
- name: create rgw pools if create_pools is defined
|
||||
command: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: create_pools is defined
|
||||
with_dict: "{{ create_pools }}"
|
Loading…
Reference in New Issue