ceph-ansible/group_vars/rgws.yml.sample

107 lines
3.7 KiB
Plaintext

---
# Variables here are applicable to all host groups NOT roles
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# You can override vars by using host or group vars
###########
# GENERAL #
###########
# Even though RGW nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
#copy_admin_key: false
##########
# TUNING #
##########
# Declaring rgw_create_pools will create pools with the given number of pgs,
# size, and type. The following are some important notes on this automatic
# pool creation:
# - The pools and associated pg_num's below are merely examples of pools that
# could be automatically created when rgws are deployed.
# - The default pg_num is 8 (from osd_pool_default_pg_num) for pool created
# if rgw_create_pools isn't declared and configured.
# - A pgcalc tool should be used to determine the optimal sizes for
# the rgw.buckets.data, rgw.buckets.index pools as well as any other
# pools declared in this dictionary.
# https://ceph.io/pgcalc is the upstream pgcalc tool
# https://access.redhat.com/labsinfo/cephpgc is a pgcalc tool offered by
# Red Hat if you are using RHCS.
# - The default value of {{ rgw_zone }} is 'default'.
# - The type must be set as either 'replicated' or 'ec' for
# each pool.
# - If a pool's type is 'ec', k and m values must be set via
# the ec_k, and ec_m variables.
# - The rule_name key can be used with a specific crush rule value (must exist).
# If the key doesn't exist it falls back to the default replicated_rule.
# This only works for replicated pool type not erasure.
# rgw_create_pools:
# "{{ rgw_zone }}.rgw.buckets.data":
# pg_num: 64
# type: ec
# ec_profile: myecprofile
# ec_k: 5
# ec_m: 3
# "{{ rgw_zone }}.rgw.buckets.index":
# pg_num: 16
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.meta":
# pg_num: 8
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.log":
# pg_num: 8
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.control":
# pg_num: 8
# size: 3
# type: replicated
# rule_name: foo
##########
# DOCKER #
##########
# Resource limitation
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
#ceph_rgw_docker_memory_limit: "4096m"
#ceph_rgw_docker_cpu_limit: 8
# ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
# ceph_rgw_docker_cpuset_mems: "0"
#ceph_config_keys: [] # DON'T TOUCH ME
#rgw_config_keys: "/" # DON'T TOUCH ME
# If you want to add parameters, you should retain the existing ones and include the new ones.
#ceph_rgw_container_params:
# volumes:
# - /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}:/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}:z
# args:
# - -f
# - -n=client.rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}
# - -k=/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}/keyring
###########
# SYSTEMD #
###########
# ceph_rgw_systemd_overrides will override the systemd settings
# for the ceph-rgw services.
# For example,to set "PrivateDevices=false" you can specify:
# ceph_rgw_systemd_overrides:
# Service:
# PrivateDevices: false