diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index fadbf2f9f..cb8b968f9 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -293,7 +293,7 @@ dummy: #radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names #radosgw_civetweb_port: 8080 #radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}" # when using ipv6 enclose with brackets: "[{{ ansible_default_ipv6.address }}]" -#radosgw_civetweb_num_threads: 50 +#radosgw_civetweb_num_threads: 100 # For additional civetweb configuration options available such as SSL, logging, # keepalive, and timeout settings, please see the civetweb docs at # https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md diff --git a/group_vars/rgws.yml.sample b/group_vars/rgws.yml.sample index 21ea8d9cf..d0c51deed 100644 --- a/group_vars/rgws.yml.sample +++ b/group_vars/rgws.yml.sample @@ -29,6 +29,32 @@ dummy: #rgw_pull_port: "{{ radosgw_civetweb_port }}" #rgw_pull_proto: "http" +######## +#TUNING# +######## + +# To support buckets with a very large number of objects it's +# important to split them into shards. We suggest about 100K +# objects per shard as a conservative maximum. +#rgw_override_bucket_index_max_shards: 16 + +# Consider setting a quota on buckets so that exceeding this +# limit will require admin intervention. +#rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16 + +# This dictionary will create pools with the given number of pgs. +# This is important because they would be created with the default +# of 8. +# New pools and their corresponding pg_nums can be created +# by adding to the create_pools dictionary (see foo). +#create_pools: +# defaults.rgw.buckets.data: +# pg_num: 16 +# defaults.rgw.buckets.index: +# pg_num: 32 +# foo: +# pg_num: 4 + ########## # DOCKER # ########## diff --git a/roles/ceph-common/defaults/main.yml b/roles/ceph-common/defaults/main.yml index ff50c426b..244e612c0 100644 --- a/roles/ceph-common/defaults/main.yml +++ b/roles/ceph-common/defaults/main.yml @@ -285,7 +285,7 @@ mds_max_mds: 3 radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names radosgw_civetweb_port: 8080 radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}" # when using ipv6 enclose with brackets: "[{{ ansible_default_ipv6.address }}]" -radosgw_civetweb_num_threads: 50 +radosgw_civetweb_num_threads: 100 # For additional civetweb configuration options available such as SSL, logging, # keepalive, and timeout settings, please see the civetweb docs at # https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md diff --git a/roles/ceph-common/templates/ceph.conf.j2 b/roles/ceph-common/templates/ceph.conf.j2 index 775003ec1..3ad873da3 100644 --- a/roles/ceph-common/templates/ceph.conf.j2 +++ b/roles/ceph-common/templates/ceph.conf.j2 @@ -109,6 +109,12 @@ host = {{ hostvars[host]['ansible_hostname'] }} {% if radosgw_dns_name is defined %} rgw dns name = {{ radosgw_dns_name }} {% endif %} +{% if rgw_override_bucket_index_max_shards is defined %} +rgw override bucket index max shards = {{ rgw_override_bucket_index_max_shards }} +{% endif %} +{% if rgw_bucket_default_quota_max_objects is defined %} +rgw bucket default quota max objects = {{ rgw_bucket_default_quota_max_objects }} +{% endif %} host = {{ hostvars[host]['ansible_hostname'] }} keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ hostvars[host]['ansible_hostname'] }}/keyring rgw socket path = /tmp/radosgw-{{ hostvars[host]['ansible_hostname'] }}.sock diff --git a/roles/ceph-rgw/defaults/main.yml b/roles/ceph-rgw/defaults/main.yml index 599870f46..2eb337b75 100644 --- a/roles/ceph-rgw/defaults/main.yml +++ b/roles/ceph-rgw/defaults/main.yml @@ -21,6 +21,32 @@ cephx: true rgw_pull_port: "{{ radosgw_civetweb_port }}" rgw_pull_proto: "http" +######## +#TUNING# +######## + +# To support buckets with a very large number of objects it's +# important to split them into shards. We suggest about 100K +# objects per shard as a conservative maximum. +#rgw_override_bucket_index_max_shards: 16 + +# Consider setting a quota on buckets so that exceeding this +# limit will require admin intervention. +#rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16 + +# This dictionary will create pools with the given number of pgs. +# This is important because they would be created with the default +# of 8. +# New pools and their corresponding pg_nums can be created +# by adding to the create_pools dictionary (see foo). +#create_pools: +# defaults.rgw.buckets.data: +# pg_num: 16 +# defaults.rgw.buckets.index: +# pg_num: 32 +# foo: +# pg_num: 4 + ########## # DOCKER # ########## diff --git a/roles/ceph-rgw/tasks/main.yml b/roles/ceph-rgw/tasks/main.yml index a89f84167..4e0dc5e45 100644 --- a/roles/ceph-rgw/tasks/main.yml +++ b/roles/ceph-rgw/tasks/main.yml @@ -16,6 +16,10 @@ # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False +- include: rgw_pool_pgs.yml + when: create_pools is defined + static: False + - name: include rgw multisite playbooks include: multisite/main.yml when: diff --git a/roles/ceph-rgw/tasks/rgw_pool_pgs.yml b/roles/ceph-rgw/tasks/rgw_pool_pgs.yml new file mode 100644 index 000000000..1e848b5c0 --- /dev/null +++ b/roles/ceph-rgw/tasks/rgw_pool_pgs.yml @@ -0,0 +1,7 @@ +--- +- name: create rgw pools if create_pools is defined + command: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }} + changed_when: false + failed_when: false + when: create_pools is defined + with_dict: "{{ create_pools }}"