From 7d2a21727094dfcd0c25bd0f4596fa068ff0593e Mon Sep 17 00:00:00 2001 From: Ali Maredia Date: Tue, 10 Sep 2019 22:01:48 +0000 Subject: [PATCH] rgw: extend automatic rgw pool creation capability Add support for erasure code pools. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1731148 Signed-off-by: Ali Maredia Co-authored-by: Dimitri Savineau (cherry picked from commit 1834c1e48de4627ac9b12f7d84691080c7fd8c7a) --- README-MULTISITE.md | 47 ++++++++++++++- group_vars/rgws.yml.sample | 53 +++++++++++++---- roles/ceph-rgw/defaults/main.yml | 53 +++++++++++++---- roles/ceph-rgw/tasks/main.yml | 36 ++--------- roles/ceph-rgw/tasks/rgw_create_pools.yml | 59 +++++++++++++++++++ roles/ceph-validate/tasks/check_rgw_pools.yml | 27 +++++++++ roles/ceph-validate/tasks/main.yml | 6 ++ tests/functional/add-rgws/group_vars/rgws | 5 +- .../all-in-one/container/group_vars/all | 7 +++ tests/functional/all-in-one/group_vars/all | 9 ++- .../all_daemons/container/group_vars/rgws | 5 +- tests/functional/all_daemons/group_vars/rgws | 5 +- .../collocation/container/group_vars/rgws | 5 +- tests/functional/collocation/group_vars/rgws | 5 +- tests/functional/podman/group_vars/rgws | 5 +- .../rgw-multisite/container/group_vars/rgws | 5 +- .../container/secondary/group_vars/rgws | 7 ++- .../functional/rgw-multisite/group_vars/rgws | 5 +- .../rgw-multisite/secondary/group_vars/rgws | 7 ++- .../shrink_rgw/container/group_vars/rgws | 5 +- tests/functional/shrink_rgw/group_vars/rgws | 5 +- 21 files changed, 275 insertions(+), 86 deletions(-) create mode 100644 roles/ceph-rgw/tasks/rgw_create_pools.yml create mode 100644 roles/ceph-validate/tasks/check_rgw_pools.yml diff --git a/README-MULTISITE.md b/README-MULTISITE.md index ef4dc4b4d..a491d4559 100644 --- a/README-MULTISITE.md +++ b/README-MULTISITE.md @@ -57,9 +57,30 @@ for example: `rgw_multisite_endpoints: http://foo.example.com:8080,http://bar.ex 3. Run the ceph-ansible playbook on your 1st cluster +3. **(Optional)** Edit the rgws.yml in group_vars for rgw related pool creation + +``` +rgw_create_pools: + "{{ rgw_zone }}.rgw.buckets.data": + pg_num: 64 + size: "" + type: ec + ec_profile: myecprofile + ec_k: 5 + ec_m: 3 + "{{ rgw_zone }}.rgw.buckets.index": + pg_num: 8 + size: "" + type: replicated +``` + +**Note:** A pgcalc tool should be used to determine the optimal sizes for the rgw.buckets.data, rgw.buckets.index pools as well as any other pools declared in this dictionary. + +4. Run the ceph-ansible playbook on your 1st cluster + ## Configuring the Secondary Zone in a Separate Cluster -4. Edit the all.yml in group_vars +5. Edit the all.yml in group_vars ``` copy_admin_key: true @@ -87,12 +108,34 @@ rgw_pullhost: cluster0-rgw0 **Note:** `rgw_zone_user`, `system_access_key`, and `system_secret_key` should match what you used in the Primary Cluster -**Note:** `ansible_fqdn` domain name assigned to `rgw_multisite_endpoint_addr` must be resolvable from the Primary Ceph cluster's mon and rgw node(s) +**Note:** `ansible_fqdn` domain name assigned to `rgw_multisite_endpoint_addr` must be resolvable from the Primary Ceph clusters mon and rgw node(s) **Note:** if there is more than 1 RGW in the Secondary Cluster, `rgw_multisite_endpoints` needs to be set with the RGWs in the Secondary Cluster just like it was set in the Primary Cluster 5. Run the ceph-ansible playbook on your 2nd cluster +6. **(Optional)** Edit the rgws.yml in group_vars for rgw related pool creation + +``` +rgw_create_pools: + "{{ rgw_zone }}.rgw.buckets.data": + pg_num: 64 + size: "" + type: ec + ec_profile: myecprofile + ec_k: 5 + ec_m: 3 + "{{ rgw_zone }}.rgw.buckets.index": + pg_num: 8 + size: "" + type: replicated +``` +**Note:** The pg_num values should match the values for the rgw pools created on the primary cluster. Mismatching pg_num values on different sites can result in very poor performance. + +**Note:** An online pgcalc tool (ex: https://ceph.io/pgcalc) should be used to determine the optimal sizes for the rgw.buckets.data, rgw.buckets.index pools as well as any other pools declared in this dictionary. + +7. Run the ceph-ansible playbook on your 2nd cluster + ## Conclusion You should now have a master zone on cluster0 and a secondary zone on cluster1 in an Active-Active mode. diff --git a/group_vars/rgws.yml.sample b/group_vars/rgws.yml.sample index 166f19f34..a3ee9f18d 100644 --- a/group_vars/rgws.yml.sample +++ b/group_vars/rgws.yml.sample @@ -32,21 +32,48 @@ dummy: # limit will require admin intervention. #rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16 -# This dictionary will create pools with the given number of pgs. -# This is important because they would be created with the default -# of 8. -# New pools and their corresponding pg_nums can be created -# by adding to the rgw_create_pools dictionary (see foo). +# Declaring rgw_create_pools will create pools with the given number of pgs, +# size, and type. The following are some important notes on this automatic +# pool creation: +# - The pools and associated pg_num's below are merely examples of pools that +# could be automatically created when rgws are deployed. +# - The default pg_num is 8 (from osd_pool_default_pg_num) for pool created +# if rgw_create_pools isn't declared and configured. +# - A pgcalc tool should be used to determine the optimal sizes for +# the rgw.buckets.data, rgw.buckets.index pools as well as any other +# pools declared in this dictionary. +# https://ceph.io/pgcalc is the upstream pgcalc tool +# https://access.redhat.com/labsinfo/cephpgc is a pgcalc tool offered by +# Red Hat if you are using RHCS. +# - The default value of {{ rgw_zone }} is 'default'. +# - The type must be set as either 'replicated' or 'ec' for +# each pool. +# - If a pool's type is 'ec', k and m values must be set via +# the ec_k, and ec_m variables. + #rgw_create_pools: -# defaults.rgw.buckets.data: +# "{{ rgw_zone }}.rgw.buckets.data": +# pg_num: 64 +# type: ec +# ec_profile: myecprofile +# ec_k: 5 +# ec_m: 3 +# "{{ rgw_zone }}.rgw.buckets.index": # pg_num: 16 -# size: "" -# defaults.rgw.buckets.index: -# pg_num: 32 -# size: "" -# foo: -# pg_num: 4 -# size: "" +# size: 3 +# type: replicated +# "{{ rgw_zone }}.rgw.meta": +# pg_num: 8 +# size: 3 +# type: replicated +# "{{ rgw_zone }}.rgw.log": +# pg_num: 8 +# size: 3 +# type: replicated +# "{{ rgw_zone }}.rgw.control": +# pg_num: 8 +# size: 3 +# type: replicated ########## diff --git a/roles/ceph-rgw/defaults/main.yml b/roles/ceph-rgw/defaults/main.yml index aa6990575..ce60be353 100644 --- a/roles/ceph-rgw/defaults/main.yml +++ b/roles/ceph-rgw/defaults/main.yml @@ -24,21 +24,48 @@ copy_admin_key: false # limit will require admin intervention. #rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16 -# This dictionary will create pools with the given number of pgs. -# This is important because they would be created with the default -# of 8. -# New pools and their corresponding pg_nums can be created -# by adding to the rgw_create_pools dictionary (see foo). +# Declaring rgw_create_pools will create pools with the given number of pgs, +# size, and type. The following are some important notes on this automatic +# pool creation: +# - The pools and associated pg_num's below are merely examples of pools that +# could be automatically created when rgws are deployed. +# - The default pg_num is 8 (from osd_pool_default_pg_num) for pool created +# if rgw_create_pools isn't declared and configured. +# - A pgcalc tool should be used to determine the optimal sizes for +# the rgw.buckets.data, rgw.buckets.index pools as well as any other +# pools declared in this dictionary. +# https://ceph.io/pgcalc is the upstream pgcalc tool +# https://access.redhat.com/labsinfo/cephpgc is a pgcalc tool offered by +# Red Hat if you are using RHCS. +# - The default value of {{ rgw_zone }} is 'default'. +# - The type must be set as either 'replicated' or 'ec' for +# each pool. +# - If a pool's type is 'ec', k and m values must be set via +# the ec_k, and ec_m variables. + #rgw_create_pools: -# defaults.rgw.buckets.data: +# "{{ rgw_zone }}.rgw.buckets.data": +# pg_num: 64 +# type: ec +# ec_profile: myecprofile +# ec_k: 5 +# ec_m: 3 +# "{{ rgw_zone }}.rgw.buckets.index": # pg_num: 16 -# size: "" -# defaults.rgw.buckets.index: -# pg_num: 32 -# size: "" -# foo: -# pg_num: 4 -# size: "" +# size: 3 +# type: replicated +# "{{ rgw_zone }}.rgw.meta": +# pg_num: 8 +# size: 3 +# type: replicated +# "{{ rgw_zone }}.rgw.log": +# pg_num: 8 +# size: 3 +# type: replicated +# "{{ rgw_zone }}.rgw.control": +# pg_num: 8 +# size: 3 +# type: replicated ########## diff --git a/roles/ceph-rgw/tasks/main.yml b/roles/ceph-rgw/tasks/main.yml index 55351058f..654b25e67 100644 --- a/roles/ceph-rgw/tasks/main.yml +++ b/roles/ceph-rgw/tasks/main.yml @@ -6,6 +6,11 @@ include_tasks: pre_requisite.yml when: not containerized_deployment | bool +- name: rgw pool creation tasks + include_tasks: rgw_create_pools.yml + run_once: true + when: rgw_create_pools is defined + - name: include_tasks openstack-keystone.yml include_tasks: openstack-keystone.yml when: radosgw_keystone_ssl | bool @@ -21,34 +26,3 @@ - name: include_tasks multisite/main.yml include_tasks: multisite/main.yml when: rgw_multisite | bool - -- name: rgw pool related tasks - when: rgw_create_pools is defined - block: - - name: create rgw pools if rgw_create_pools is defined - command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }}" - changed_when: false - with_dict: "{{ rgw_create_pools }}" - delegate_to: "{{ groups[mon_group_name][0] }}" - register: result - until: result is succeeded - run_once: true - - - name: customize pool size - command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.value.size | default(osd_pool_default_size) }}" - with_dict: "{{ rgw_create_pools }}" - delegate_to: "{{ groups[mon_group_name][0] }}" - changed_when: false - run_once: true - register: result - until: result is succeeded - when: item.value.size | default(osd_pool_default_size) != ceph_osd_pool_default_size - - - name: set the rgw_create_pools pools application to rgw - command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool application enable {{ item.key }} rgw" - changed_when: false - with_dict: "{{ rgw_create_pools }}" - delegate_to: "{{ groups[mon_group_name][0] }}" - register: result - until: result is succeeded - run_once: true diff --git a/roles/ceph-rgw/tasks/rgw_create_pools.yml b/roles/ceph-rgw/tasks/rgw_create_pools.yml new file mode 100644 index 000000000..33ce5a8b9 --- /dev/null +++ b/roles/ceph-rgw/tasks/rgw_create_pools.yml @@ -0,0 +1,59 @@ +--- +- name: remove ec profile + command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd erasure-code-profile rm {{ item.value.ec_profile }}" + with_dict: "{{ rgw_create_pools }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + when: + - item.value.type is defined + - item.value.type == 'ec' + failed_when: false + +- name: set ec profile + command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd erasure-code-profile set {{ item.value.ec_profile }} k={{ item.value.ec_k }} m={{ item.value.ec_m }}" + with_dict: "{{ rgw_create_pools }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + when: + - item.value.type is defined + - item.value.type == 'ec' + +- name: set crush rule + command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd crush rule create-erasure {{ item.key }} {{ item.value.ec_profile }}" + with_dict: "{{ rgw_create_pools }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + when: + - item.value.type is defined + - item.value.type == 'ec' + +- name: create ec pools for rgw + command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }} erasure {{ item.value.ec_profile }}" + with_dict: "{{ rgw_create_pools }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + when: + - item.value.type is defined + - item.value.type == 'ec' + +- name: create replicated pools for rgw + command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }} replicated" + changed_when: false + with_dict: "{{ rgw_create_pools }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + when: item.value.type is not defined or item.value.type == 'replicated' + +- name: customize replicated pool size + command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.value.size | default(osd_pool_default_size) }}" + with_dict: "{{ rgw_create_pools }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + when: + - item.value.type is not defined or item.value.type == 'replicated' + - item.value.size | default(osd_pool_default_size) != ceph_osd_pool_default_size + +- name: set the rgw_create_pools pools application to rgw + command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool application enable {{ item.key }} rgw" + changed_when: false + with_dict: "{{ rgw_create_pools }}" + delegate_to: "{{ groups[mon_group_name][0] }}" diff --git a/roles/ceph-validate/tasks/check_rgw_pools.yml b/roles/ceph-validate/tasks/check_rgw_pools.yml new file mode 100644 index 000000000..7f59046e3 --- /dev/null +++ b/roles/ceph-validate/tasks/check_rgw_pools.yml @@ -0,0 +1,27 @@ +--- +- name: fail if ec_profile is not set for ec pools + fail: + msg: "ec_profile must be set for ec pools" + with_dict: "{{ rgw_create_pools }}" + when: + - item.value.type is defined + - item.value.type == 'ec' + - item.value.ec_profile is undefined + +- name: fail if ec_k is not set for ec pools + fail: + msg: "ec_k must be set for ec pools" + with_dict: "{{ rgw_create_pools }}" + when: + - item.value.type is defined + - item.value.type == 'ec' + - item.value.ec_k is undefined + +- name: fail if ec_m is not set for ec pools + fail: + msg: "ec_m must be set for ec pools" + with_dict: "{{ rgw_create_pools }}" + when: + - item.value.type is defined + - item.value.type == 'ec' + - item.value.ec_m is undefined diff --git a/roles/ceph-validate/tasks/main.yml b/roles/ceph-validate/tasks/main.yml index f99f9d6cf..757cd1b2c 100644 --- a/roles/ceph-validate/tasks/main.yml +++ b/roles/ceph-validate/tasks/main.yml @@ -168,6 +168,12 @@ - radosgw_address == "x.x.x.x" - radosgw_address_block == "subnet" +- name: include check_rgw_pools.yml + include_tasks: check_rgw_pools.yml + when: + - inventory_hostname in groups.get(rgw_group_name, []) + - rgw_create_pools is defined + - name: include check_rgw_multisite.yml include_tasks: check_rgw_multisite.yml when: diff --git a/tests/functional/add-rgws/group_vars/rgws b/tests/functional/add-rgws/group_vars/rgws index a88254314..d9c09f81f 100644 --- a/tests/functional/add-rgws/group_vars/rgws +++ b/tests/functional/add-rgws/group_vars/rgws @@ -1,8 +1,9 @@ copy_admin_key: true rgw_create_pools: foo: - pg_num: 17 + pg_num: 16 + type: replicated bar: - pg_num: 19 + pg_num: 16 rgw_override_bucket_index_max_shards: 16 rgw_bucket_default_quota_max_objects: 1638400 diff --git a/tests/functional/all-in-one/container/group_vars/all b/tests/functional/all-in-one/container/group_vars/all index 658d5b18a..8ed94b1ec 100644 --- a/tests/functional/all-in-one/container/group_vars/all +++ b/tests/functional/all-in-one/container/group_vars/all @@ -24,8 +24,15 @@ handler_health_osd_check_delay: 10 rgw_create_pools: foo: pg_num: 16 + type: replicated bar: pg_num: 16 + ec: + pg_num: 16 + type: ec + ec_profile: myecprofile + ec_k: 2 + ec_m: 1 ceph_osd_docker_run_script_path: /var/tmp osd_objectstore: "bluestore" lvm_volumes: diff --git a/tests/functional/all-in-one/group_vars/all b/tests/functional/all-in-one/group_vars/all index ea859e68e..837bb9243 100644 --- a/tests/functional/all-in-one/group_vars/all +++ b/tests/functional/all-in-one/group_vars/all @@ -30,5 +30,12 @@ lvm_volumes: rgw_create_pools: foo: pg_num: 16 + type: replicated bar: - pg_num: 16 \ No newline at end of file + pg_num: 16 + ec: + pg_num: 16 + type: ec + ec_profile: myecprofile + ec_k: 2 + ec_m: 1 \ No newline at end of file diff --git a/tests/functional/all_daemons/container/group_vars/rgws b/tests/functional/all_daemons/container/group_vars/rgws index 8f2a9a368..639ade9ce 100644 --- a/tests/functional/all_daemons/container/group_vars/rgws +++ b/tests/functional/all_daemons/container/group_vars/rgws @@ -2,6 +2,7 @@ copy_admin_key: True rgw_create_pools: foo: - pg_num: 17 + pg_num: 16 + type: replicated bar: - pg_num: 19 + pg_num: 16 diff --git a/tests/functional/all_daemons/group_vars/rgws b/tests/functional/all_daemons/group_vars/rgws index a88254314..d9c09f81f 100644 --- a/tests/functional/all_daemons/group_vars/rgws +++ b/tests/functional/all_daemons/group_vars/rgws @@ -1,8 +1,9 @@ copy_admin_key: true rgw_create_pools: foo: - pg_num: 17 + pg_num: 16 + type: replicated bar: - pg_num: 19 + pg_num: 16 rgw_override_bucket_index_max_shards: 16 rgw_bucket_default_quota_max_objects: 1638400 diff --git a/tests/functional/collocation/container/group_vars/rgws b/tests/functional/collocation/container/group_vars/rgws index 265d18208..bcd5cc30f 100644 --- a/tests/functional/collocation/container/group_vars/rgws +++ b/tests/functional/collocation/container/group_vars/rgws @@ -1,6 +1,7 @@ --- rgw_create_pools: foo: - pg_num: 17 + pg_num: 16 + type: replicated bar: - pg_num: 19 \ No newline at end of file + pg_num: 16 diff --git a/tests/functional/collocation/group_vars/rgws b/tests/functional/collocation/group_vars/rgws index 265d18208..bcd5cc30f 100644 --- a/tests/functional/collocation/group_vars/rgws +++ b/tests/functional/collocation/group_vars/rgws @@ -1,6 +1,7 @@ --- rgw_create_pools: foo: - pg_num: 17 + pg_num: 16 + type: replicated bar: - pg_num: 19 \ No newline at end of file + pg_num: 16 diff --git a/tests/functional/podman/group_vars/rgws b/tests/functional/podman/group_vars/rgws index 8f2a9a368..639ade9ce 100644 --- a/tests/functional/podman/group_vars/rgws +++ b/tests/functional/podman/group_vars/rgws @@ -2,6 +2,7 @@ copy_admin_key: True rgw_create_pools: foo: - pg_num: 17 + pg_num: 16 + type: replicated bar: - pg_num: 19 + pg_num: 16 diff --git a/tests/functional/rgw-multisite/container/group_vars/rgws b/tests/functional/rgw-multisite/container/group_vars/rgws index 23b6f80d8..69475f4f0 100644 --- a/tests/functional/rgw-multisite/container/group_vars/rgws +++ b/tests/functional/rgw-multisite/container/group_vars/rgws @@ -13,8 +13,9 @@ system_access_key: 6kWkikvapSnHyE22P7nO system_secret_key: MGecsMrWtKZgngOHZdrd6d3JxGO5CPWgT2lcnpSt rgw_create_pools: foo: - pg_num: 17 + pg_num: 16 + type: replicated bar: - pg_num: 19 + pg_num: 16 rgw_override_bucket_index_max_shards: 16 rgw_bucket_default_quota_max_objects: 1638400 diff --git a/tests/functional/rgw-multisite/container/secondary/group_vars/rgws b/tests/functional/rgw-multisite/container/secondary/group_vars/rgws index 60860e6a5..2b3f09b5b 100644 --- a/tests/functional/rgw-multisite/container/secondary/group_vars/rgws +++ b/tests/functional/rgw-multisite/container/secondary/group_vars/rgws @@ -1,8 +1,9 @@ --- rgw_create_pools: foo: - pg_num: 17 + pg_num: 16 + type: replicated bar: - pg_num: 19 + pg_num: 16 rgw_override_bucket_index_max_shards: 16 -rgw_bucket_default_quota_max_objects: 1638400 \ No newline at end of file +rgw_bucket_default_quota_max_objects: 1638400 diff --git a/tests/functional/rgw-multisite/group_vars/rgws b/tests/functional/rgw-multisite/group_vars/rgws index 23b6f80d8..69475f4f0 100644 --- a/tests/functional/rgw-multisite/group_vars/rgws +++ b/tests/functional/rgw-multisite/group_vars/rgws @@ -13,8 +13,9 @@ system_access_key: 6kWkikvapSnHyE22P7nO system_secret_key: MGecsMrWtKZgngOHZdrd6d3JxGO5CPWgT2lcnpSt rgw_create_pools: foo: - pg_num: 17 + pg_num: 16 + type: replicated bar: - pg_num: 19 + pg_num: 16 rgw_override_bucket_index_max_shards: 16 rgw_bucket_default_quota_max_objects: 1638400 diff --git a/tests/functional/rgw-multisite/secondary/group_vars/rgws b/tests/functional/rgw-multisite/secondary/group_vars/rgws index 60860e6a5..2b3f09b5b 100644 --- a/tests/functional/rgw-multisite/secondary/group_vars/rgws +++ b/tests/functional/rgw-multisite/secondary/group_vars/rgws @@ -1,8 +1,9 @@ --- rgw_create_pools: foo: - pg_num: 17 + pg_num: 16 + type: replicated bar: - pg_num: 19 + pg_num: 16 rgw_override_bucket_index_max_shards: 16 -rgw_bucket_default_quota_max_objects: 1638400 \ No newline at end of file +rgw_bucket_default_quota_max_objects: 1638400 diff --git a/tests/functional/shrink_rgw/container/group_vars/rgws b/tests/functional/shrink_rgw/container/group_vars/rgws index 77466c8ae..66e660d23 100644 --- a/tests/functional/shrink_rgw/container/group_vars/rgws +++ b/tests/functional/shrink_rgw/container/group_vars/rgws @@ -2,8 +2,9 @@ copy_admin_key: true rgw_create_pools: foo: - pg_num: 17 + pg_num: 16 + type: replicated bar: - pg_num: 19 + pg_num: 16 rgw_override_bucket_index_max_shards: 16 rgw_bucket_default_quota_max_objects: 1638400 diff --git a/tests/functional/shrink_rgw/group_vars/rgws b/tests/functional/shrink_rgw/group_vars/rgws index 77466c8ae..66e660d23 100644 --- a/tests/functional/shrink_rgw/group_vars/rgws +++ b/tests/functional/shrink_rgw/group_vars/rgws @@ -2,8 +2,9 @@ copy_admin_key: true rgw_create_pools: foo: - pg_num: 17 + pg_num: 16 + type: replicated bar: - pg_num: 19 + pg_num: 16 rgw_override_bucket_index_max_shards: 16 rgw_bucket_default_quota_max_objects: 1638400