2016-02-29 23:35:07 +08:00
---
# Variables here are applicable to all host groups NOT roles
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# You can override vars by using host or group vars
###########
# GENERAL #
###########
# Even though RGW nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
#copy_admin_key: false
2018-04-16 21:57:23 +08:00
##########
# TUNING #
##########
2017-04-20 00:30:40 +08:00
2017-08-18 01:25:20 +08:00
# To support buckets with a very large number of objects it's
2017-04-20 00:30:40 +08:00
# important to split them into shards. We suggest about 100K
# objects per shard as a conservative maximum.
#rgw_override_bucket_index_max_shards: 16
2017-08-18 01:25:20 +08:00
2017-04-20 00:30:40 +08:00
# Consider setting a quota on buckets so that exceeding this
# limit will require admin intervention.
#rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16
2019-09-11 06:01:48 +08:00
# Declaring rgw_create_pools will create pools with the given number of pgs,
# size, and type. The following are some important notes on this automatic
# pool creation:
# - The pools and associated pg_num's below are merely examples of pools that
# could be automatically created when rgws are deployed.
# - The default pg_num is 8 (from osd_pool_default_pg_num) for pool created
# if rgw_create_pools isn't declared and configured.
# - A pgcalc tool should be used to determine the optimal sizes for
# the rgw.buckets.data, rgw.buckets.index pools as well as any other
# pools declared in this dictionary.
# https://ceph.io/pgcalc is the upstream pgcalc tool
# https://access.redhat.com/labsinfo/cephpgc is a pgcalc tool offered by
# Red Hat if you are using RHCS.
# - The default value of {{ rgw_zone }} is 'default'.
# - The type must be set as either 'replicated' or 'ec' for
# each pool.
# - If a pool's type is 'ec', k and m values must be set via
# the ec_k, and ec_m variables.
2020-08-18 01:55:47 +08:00
# - The rule_name key can be used with a specific crush rule value (must exist).
# If the key doesn't exist it falls back to the default replicated_rule.
# This only works for replicated pool type not erasure.
2019-09-11 06:01:48 +08:00
2018-05-31 23:01:44 +08:00
#rgw_create_pools:
2019-09-11 06:01:48 +08:00
# "{{ rgw_zone }}.rgw.buckets.data":
# pg_num: 64
# type: ec
# ec_profile: myecprofile
# ec_k: 5
# ec_m: 3
# "{{ rgw_zone }}.rgw.buckets.index":
2017-04-20 00:30:40 +08:00
# pg_num: 16
2019-09-11 06:01:48 +08:00
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.meta":
# pg_num: 8
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.log":
# pg_num: 8
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.control":
# pg_num: 8
# size: 3
# type: replicated
2020-08-18 01:55:47 +08:00
# rule_name: foo
2017-04-20 00:30:40 +08:00
2017-08-18 01:25:20 +08:00
2016-02-29 23:35:07 +08:00
##########
# DOCKER #
##########
2017-09-06 16:53:30 +08:00
# Resource limitation
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_rgw_docker_extra_env' variable.
2020-07-09 19:07:32 +08:00
#ceph_rgw_docker_memory_limit: "4096m"
2019-04-02 22:39:42 +08:00
#ceph_rgw_docker_cpu_limit: 8
2019-03-22 02:54:34 +08:00
#ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
#ceph_rgw_docker_cpuset_mems: "0"
2017-09-06 16:53:30 +08:00
2017-08-18 01:25:20 +08:00
#ceph_rgw_docker_extra_env:
2016-11-03 17:16:33 +08:00
#ceph_config_keys: [] # DON'T TOUCH ME
2016-12-07 00:28:30 +08:00
#rgw_config_keys: "/" # DON'T TOUCH ME
2016-02-29 23:35:07 +08:00
2017-07-05 21:47:48 +08:00
###########
# SYSTEMD #
###########
# ceph_rgw_systemd_overrides will override the systemd settings
# for the ceph-rgw services.
# For example,to set "PrivateDevices=false" you can specify:
#ceph_rgw_systemd_overrides:
# Service:
# PrivateDevices: False