ceph-ansible/group_vars/all

55 lines
1.6 KiB
Plaintext

---
# Variables here are applicable to all host groups NOT roles
## Setup options
#
distro_release: "{{ facter_lsbdistcodename }}"
ceph_release: firefly
redhat_distro: el6 # supported distros are el6, rhel6, f18, f19, opensuse12.2, sles11
## Ceph options
#
cephx: true
fsid: # /!\ GENERATE ONE WITH 'uuidgen -r' /!\
## Monitor options
#
monitor_interface: eth1
mon_osd_down_out_interval: 600
mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
## MDS options
#
mds: true # disable mds configuration in ceph.conf
# Rados Gateway options
radosgw: true
redhat_distro_ceph_extra: centos6.4 # supported distros are centos6.3, centos6.4, centos6, fedora18, fedora19, opensuse12.2, rhel6.3, rhel6.4, rhel6.5, rhel6, sles11sp2
radosgw_interface: eth1 # the public interface which the radosgw talks to the world with, this variable is used in the haproxy role, this does not need to be set if haproxy is not used.
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
email_address: foo@bar.com
## OSD options
#
journal_size: 100
pool_default_pg_num: 128
pool_default_pgp_num: 128
pool_default_size: 2
cluster_network: 192.168.0.0/24
public_network: 192.168.0.0/24
osd_mkfs_type: xfs
osd_mon_heartbeat_interval: 30
# Performance tuning
filestore_merge_threshold: 40
filestore_split_multiple: 8
osd_op_threads: 8
# Recovery tuning
osd_recovery_max_active: 5
osd_max_backfills: 2
osd_recovery_op_priority: 2
## Testing mode
# enable this mode _only_ when you have a single node
common_single_host_mode: true