rgw: extend automatic rgw pool creation capability

Add support for erasure code pools.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1731148

Signed-off-by: Ali Maredia <amaredia@redhat.com>
Co-authored-by: Dimitri Savineau <dsavinea@redhat.com>
pull/5054/head
Ali Maredia 2019-09-10 22:01:48 +00:00 committed by Guillaume Abrioux
parent 9d081e2453
commit 1834c1e48d
21 changed files with 253 additions and 65 deletions

View File

@ -57,9 +57,30 @@ for example: `rgw_multisite_endpoints: http://foo.example.com:8080,http://bar.ex
3. Run the ceph-ansible playbook on your 1st cluster
3. **(Optional)** Edit the rgws.yml in group_vars for rgw related pool creation
```
rgw_create_pools:
"{{ rgw_zone }}.rgw.buckets.data":
pg_num: 64
size: ""
type: ec
ec_profile: myecprofile
ec_k: 5
ec_m: 3
"{{ rgw_zone }}.rgw.buckets.index":
pg_num: 8
size: ""
type: replicated
```
**Note:** A pgcalc tool should be used to determine the optimal sizes for the rgw.buckets.data, rgw.buckets.index pools as well as any other pools declared in this dictionary.
4. Run the ceph-ansible playbook on your 1st cluster
## Configuring the Secondary Zone in a Separate Cluster
4. Edit the all.yml in group_vars
5. Edit the all.yml in group_vars
```
copy_admin_key: true
@ -87,12 +108,34 @@ rgw_pullhost: cluster0-rgw0
**Note:** `rgw_zone_user`, `system_access_key`, and `system_secret_key` should match what you used in the Primary Cluster
**Note:** `ansible_fqdn` domain name assigned to `rgw_multisite_endpoint_addr` must be resolvable from the Primary Ceph cluster's mon and rgw node(s)
**Note:** `ansible_fqdn` domain name assigned to `rgw_multisite_endpoint_addr` must be resolvable from the Primary Ceph clusters mon and rgw node(s)
**Note:** if there is more than 1 RGW in the Secondary Cluster, `rgw_multisite_endpoints` needs to be set with the RGWs in the Secondary Cluster just like it was set in the Primary Cluster
5. Run the ceph-ansible playbook on your 2nd cluster
6. **(Optional)** Edit the rgws.yml in group_vars for rgw related pool creation
```
rgw_create_pools:
"{{ rgw_zone }}.rgw.buckets.data":
pg_num: 64
size: ""
type: ec
ec_profile: myecprofile
ec_k: 5
ec_m: 3
"{{ rgw_zone }}.rgw.buckets.index":
pg_num: 8
size: ""
type: replicated
```
**Note:** The pg_num values should match the values for the rgw pools created on the primary cluster. Mismatching pg_num values on different sites can result in very poor performance.
**Note:** An online pgcalc tool (ex: https://ceph.io/pgcalc) should be used to determine the optimal sizes for the rgw.buckets.data, rgw.buckets.index pools as well as any other pools declared in this dictionary.
7. Run the ceph-ansible playbook on your 2nd cluster
## Conclusion
You should now have a master zone on cluster0 and a secondary zone on cluster1 in an Active-Active mode.

View File

@ -32,21 +32,48 @@ dummy:
# limit will require admin intervention.
#rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16
# This dictionary will create pools with the given number of pgs.
# This is important because they would be created with the default
# of 8.
# New pools and their corresponding pg_nums can be created
# by adding to the rgw_create_pools dictionary (see foo).
# Declaring rgw_create_pools will create pools with the given number of pgs,
# size, and type. The following are some important notes on this automatic
# pool creation:
# - The pools and associated pg_num's below are merely examples of pools that
# could be automatically created when rgws are deployed.
# - The default pg_num is 8 (from osd_pool_default_pg_num) for pool created
# if rgw_create_pools isn't declared and configured.
# - A pgcalc tool should be used to determine the optimal sizes for
# the rgw.buckets.data, rgw.buckets.index pools as well as any other
# pools declared in this dictionary.
# https://ceph.io/pgcalc is the upstream pgcalc tool
# https://access.redhat.com/labsinfo/cephpgc is a pgcalc tool offered by
# Red Hat if you are using RHCS.
# - The default value of {{ rgw_zone }} is 'default'.
# - The type must be set as either 'replicated' or 'ec' for
# each pool.
# - If a pool's type is 'ec', k and m values must be set via
# the ec_k, and ec_m variables.
#rgw_create_pools:
# defaults.rgw.buckets.data:
# "{{ rgw_zone }}.rgw.buckets.data":
# pg_num: 64
# type: ec
# ec_profile: myecprofile
# ec_k: 5
# ec_m: 3
# "{{ rgw_zone }}.rgw.buckets.index":
# pg_num: 16
# size: ""
# defaults.rgw.buckets.index:
# pg_num: 32
# size: ""
# foo:
# pg_num: 4
# size: ""
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.meta":
# pg_num: 8
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.log":
# pg_num: 8
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.control":
# pg_num: 8
# size: 3
# type: replicated
##########

View File

@ -24,21 +24,48 @@ copy_admin_key: false
# limit will require admin intervention.
#rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16
# This dictionary will create pools with the given number of pgs.
# This is important because they would be created with the default
# of 8.
# New pools and their corresponding pg_nums can be created
# by adding to the rgw_create_pools dictionary (see foo).
# Declaring rgw_create_pools will create pools with the given number of pgs,
# size, and type. The following are some important notes on this automatic
# pool creation:
# - The pools and associated pg_num's below are merely examples of pools that
# could be automatically created when rgws are deployed.
# - The default pg_num is 8 (from osd_pool_default_pg_num) for pool created
# if rgw_create_pools isn't declared and configured.
# - A pgcalc tool should be used to determine the optimal sizes for
# the rgw.buckets.data, rgw.buckets.index pools as well as any other
# pools declared in this dictionary.
# https://ceph.io/pgcalc is the upstream pgcalc tool
# https://access.redhat.com/labsinfo/cephpgc is a pgcalc tool offered by
# Red Hat if you are using RHCS.
# - The default value of {{ rgw_zone }} is 'default'.
# - The type must be set as either 'replicated' or 'ec' for
# each pool.
# - If a pool's type is 'ec', k and m values must be set via
# the ec_k, and ec_m variables.
#rgw_create_pools:
# defaults.rgw.buckets.data:
# "{{ rgw_zone }}.rgw.buckets.data":
# pg_num: 64
# type: ec
# ec_profile: myecprofile
# ec_k: 5
# ec_m: 3
# "{{ rgw_zone }}.rgw.buckets.index":
# pg_num: 16
# size: ""
# defaults.rgw.buckets.index:
# pg_num: 32
# size: ""
# foo:
# pg_num: 4
# size: ""
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.meta":
# pg_num: 8
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.log":
# pg_num: 8
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.control":
# pg_num: 8
# size: 3
# type: replicated
##########

View File

@ -6,6 +6,11 @@
include_tasks: pre_requisite.yml
when: not containerized_deployment | bool
- name: rgw pool creation tasks
include_tasks: rgw_create_pools.yml
run_once: true
when: rgw_create_pools is defined
- name: include_tasks openstack-keystone.yml
include_tasks: openstack-keystone.yml
when: radosgw_keystone_ssl | bool
@ -21,34 +26,3 @@
- name: include_tasks multisite/main.yml
include_tasks: multisite/main.yml
when: rgw_multisite | bool
- name: rgw pool related tasks
when: rgw_create_pools is defined
block:
- name: create rgw pools if rgw_create_pools is defined
command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
changed_when: false
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
register: result
until: result is succeeded
run_once: true
- name: customize pool size
command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.value.size | default(osd_pool_default_size) }}"
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
run_once: true
register: result
until: result is succeeded
when: item.value.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
- name: set the rgw_create_pools pools application to rgw
command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool application enable {{ item.key }} rgw"
changed_when: false
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
register: result
until: result is succeeded
run_once: true

View File

@ -0,0 +1,59 @@
---
- name: remove ec profile
command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd erasure-code-profile rm {{ item.value.ec_profile }}"
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when:
- item.value.type is defined
- item.value.type == 'ec'
failed_when: false
- name: set ec profile
command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd erasure-code-profile set {{ item.value.ec_profile }} k={{ item.value.ec_k }} m={{ item.value.ec_m }}"
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when:
- item.value.type is defined
- item.value.type == 'ec'
- name: set crush rule
command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd crush rule create-erasure {{ item.key }} {{ item.value.ec_profile }}"
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when:
- item.value.type is defined
- item.value.type == 'ec'
- name: create ec pools for rgw
command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }} erasure {{ item.value.ec_profile }}"
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when:
- item.value.type is defined
- item.value.type == 'ec'
- name: create replicated pools for rgw
command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }} replicated"
changed_when: false
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: item.value.type is not defined or item.value.type == 'replicated'
- name: customize replicated pool size
command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.value.size | default(osd_pool_default_size) }}"
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when:
- item.value.type is not defined or item.value.type == 'replicated'
- item.value.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
- name: set the rgw_create_pools pools application to rgw
command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool application enable {{ item.key }} rgw"
changed_when: false
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"

View File

@ -0,0 +1,27 @@
---
- name: fail if ec_profile is not set for ec pools
fail:
msg: "ec_profile must be set for ec pools"
with_dict: "{{ rgw_create_pools }}"
when:
- item.value.type is defined
- item.value.type == 'ec'
- item.value.ec_profile is undefined
- name: fail if ec_k is not set for ec pools
fail:
msg: "ec_k must be set for ec pools"
with_dict: "{{ rgw_create_pools }}"
when:
- item.value.type is defined
- item.value.type == 'ec'
- item.value.ec_k is undefined
- name: fail if ec_m is not set for ec pools
fail:
msg: "ec_m must be set for ec pools"
with_dict: "{{ rgw_create_pools }}"
when:
- item.value.type is defined
- item.value.type == 'ec'
- item.value.ec_m is undefined

View File

@ -180,6 +180,12 @@
- radosgw_address == "x.x.x.x"
- radosgw_address_block == "subnet"
- name: include check_rgw_pools.yml
include_tasks: check_rgw_pools.yml
when:
- inventory_hostname in groups.get(rgw_group_name, [])
- rgw_create_pools is defined
- name: include check_rgw_multisite.yml
include_tasks: check_rgw_multisite.yml
when:

View File

@ -2,6 +2,7 @@ copy_admin_key: true
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16
rgw_override_bucket_index_max_shards: 16

View File

@ -25,8 +25,15 @@ handler_health_osd_check_delay: 10
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16
ec:
pg_num: 16
type: ec
ec_profile: myecprofile
ec_k: 2
ec_m: 1
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore"
lvm_volumes:

View File

@ -31,5 +31,12 @@ lvm_volumes:
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16
ec:
pg_num: 16
type: ec
ec_profile: myecprofile
ec_k: 2
ec_m: 1

View File

@ -3,5 +3,6 @@ copy_admin_key: True
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16

View File

@ -2,6 +2,7 @@ copy_admin_key: true
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16
rgw_override_bucket_index_max_shards: 16

View File

@ -2,5 +2,6 @@
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16

View File

@ -2,5 +2,6 @@
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16

View File

@ -3,5 +3,5 @@ copy_admin_key: True
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16

View File

@ -14,6 +14,7 @@ system_secret_key: MGecsMrWtKZgngOHZdrd6d3JxGO5CPWgT2lcnpSt
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16
rgw_override_bucket_index_max_shards: 16

View File

@ -2,6 +2,7 @@
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16
rgw_override_bucket_index_max_shards: 16

View File

@ -14,6 +14,7 @@ system_secret_key: MGecsMrWtKZgngOHZdrd6d3JxGO5CPWgT2lcnpSt
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16
rgw_override_bucket_index_max_shards: 16

View File

@ -2,6 +2,7 @@
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16
rgw_override_bucket_index_max_shards: 16

View File

@ -3,6 +3,7 @@ copy_admin_key: true
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16
rgw_override_bucket_index_max_shards: 16

View File

@ -3,6 +3,7 @@ copy_admin_key: true
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16
rgw_override_bucket_index_max_shards: 16