--- # You can override vars by using host or group vars ########### # GENERAL # ########### # Even though RGW nodes should not have the admin key # at their disposal, some people might want to have it # distributed on RGW nodes. Setting 'copy_admin_key' to 'true' # will copy the admin key to the /etc/ceph/ directory copy_admin_key: false ########## # TUNING # ########## # To support buckets with a very large number of objects it's # important to split them into shards. We suggest about 100K # objects per shard as a conservative maximum. #rgw_override_bucket_index_max_shards: 16 # Consider setting a quota on buckets so that exceeding this # limit will require admin intervention. #rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16 # This dictionary will create pools with the given number of pgs. # This is important because they would be created with the default # of 8. # New pools and their corresponding pg_nums can be created # by adding to the rgw_create_pools dictionary (see foo). #rgw_create_pools: # defaults.rgw.buckets.data: # pg_num: 16 # size: "" # defaults.rgw.buckets.index: # pg_num: 32 # size: "" # foo: # pg_num: 4 # size: "" ########## # DOCKER # ########## # Resource limitation # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html/red_hat_ceph_storage_hardware_selection_guide/ceph-hardware-min-recommend # These options can be passed using the 'ceph_rgw_docker_extra_env' variable. ceph_rgw_docker_memory_limit: "{{ ansible_memtotal_mb }}m" ceph_rgw_docker_cpu_limit: 8 ceph_rgw_docker_extra_env: ceph_config_keys: [] # DON'T TOUCH ME rgw_config_keys: "/" # DON'T TOUCH ME ########### # SYSTEMD # ########### # ceph_rgw_systemd_overrides will override the systemd settings # for the ceph-rgw services. # For example,to set "PrivateDevices=false" you can specify: #ceph_rgw_systemd_overrides: # Service: # PrivateDevices: False