2016-02-29 23:35:07 +08:00
---
# You can override vars by using host or group vars
###########
# GENERAL #
###########
# Even though RGW nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
copy_admin_key : false
2018-04-16 21:57:23 +08:00
##########
# TUNING #
##########
2017-04-20 00:30:40 +08:00
2019-09-11 06:01:48 +08:00
# Declaring rgw_create_pools will create pools with the given number of pgs,
# size, and type. The following are some important notes on this automatic
# pool creation:
# - The pools and associated pg_num's below are merely examples of pools that
# could be automatically created when rgws are deployed.
# - The default pg_num is 8 (from osd_pool_default_pg_num) for pool created
# if rgw_create_pools isn't declared and configured.
# - A pgcalc tool should be used to determine the optimal sizes for
# the rgw.buckets.data, rgw.buckets.index pools as well as any other
# pools declared in this dictionary.
# https://ceph.io/pgcalc is the upstream pgcalc tool
# https://access.redhat.com/labsinfo/cephpgc is a pgcalc tool offered by
# Red Hat if you are using RHCS.
# - The default value of {{ rgw_zone }} is 'default'.
# - The type must be set as either 'replicated' or 'ec' for
# each pool.
# - If a pool's type is 'ec', k and m values must be set via
# the ec_k, and ec_m variables.
2020-08-18 01:55:47 +08:00
# - The rule_name key can be used with a specific crush rule value (must exist).
# If the key doesn't exist it falls back to the default replicated_rule.
# This only works for replicated pool type not erasure.
2019-09-11 06:01:48 +08:00
2024-02-14 18:14:02 +08:00
# rgw_create_pools:
# "{{ rgw_zone }}.rgw.buckets.data":
# pg_num: 64
# type: ec
# ec_profile: myecprofile
# ec_k: 5
# ec_m: 3
# "{{ rgw_zone }}.rgw.buckets.index":
# pg_num: 16
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.meta":
# pg_num: 8
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.log":
# pg_num: 8
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.control":
# pg_num: 8
# size: 3
# type: replicated
# rule_name: foo
2017-04-20 00:30:40 +08:00
2017-08-18 01:25:20 +08:00
2016-02-29 23:35:07 +08:00
##########
# DOCKER #
##########
2017-09-06 16:53:30 +08:00
# Resource limitation
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
2020-07-09 19:07:32 +08:00
ceph_rgw_docker_memory_limit : "4096m"
2019-04-02 22:39:42 +08:00
ceph_rgw_docker_cpu_limit : 8
2024-02-14 18:14:02 +08:00
# ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
# ceph_rgw_docker_cpuset_mems: "0"
2017-09-06 16:53:30 +08:00
2016-11-03 17:16:33 +08:00
ceph_config_keys : [ ] # DON'T TOUCH ME
2016-12-01 06:47:51 +08:00
rgw_config_keys : "/" # DON'T TOUCH ME
2024-03-03 05:06:14 +08:00
# If you want to add parameters, you should retain the existing ones and include the new ones.
ceph_rgw_container_params :
volumes :
- /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}:/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}:z
args :
- -f
- -n =client.rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}
- -k=/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}/keyring
2017-07-05 21:47:48 +08:00
###########
# SYSTEMD #
###########
# ceph_rgw_systemd_overrides will override the systemd settings
# for the ceph-rgw services.
# For example,to set "PrivateDevices=false" you can specify:
2024-02-14 18:14:02 +08:00
# ceph_rgw_systemd_overrides:
# Service:
# PrivateDevices: false