--- # You can override vars by using host or group vars ########### # GENERAL # ########### fetch_directory: fetch/ # Even though RGW nodes should not have the admin key # at their disposal, some people might want to have it # distributed on RGW nodes. Setting 'copy_admin_key' to 'true' # will copy the admin key to the /etc/ceph/ directory copy_admin_key: false ## Ceph options # cephx: true # Multi-site remote pull URL variables rgw_pull_port: "{{ radosgw_civetweb_port }}" rgw_pull_proto: "http" ######## #TUNING# ######## # To support buckets with a very large number of objects it's # important to split them into shards. We suggest about 100K # objects per shard as a conservative maximum. #rgw_override_bucket_index_max_shards: 16 # Consider setting a quota on buckets so that exceeding this # limit will require admin intervention. #rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16 # This dictionary will create pools with the given number of pgs. # This is important because they would be created with the default # of 8. # New pools and their corresponding pg_nums can be created # by adding to the create_pools dictionary (see foo). #create_pools: # defaults.rgw.buckets.data: # pg_num: 16 # defaults.rgw.buckets.index: # pg_num: 32 # foo: # pg_num: 4 ########## # DOCKER # ########## rgw_containerized_deployment: false rgw_containerized_deployment_with_kv: false kv_type: etcd kv_endpoint: 127.0.0.1 ceph_rgw_civetweb_port: "{{ radosgw_civetweb_port }}" ceph_docker_image: "ceph/daemon" ceph_docker_image_tag: latest ceph_rgw_docker_extra_env: -e CLUSTER={{ cluster }} -e RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }} ceph_docker_on_openstack: false ceph_config_keys: [] # DON'T TOUCH ME rgw_config_keys: "/" # DON'T TOUCH ME