2014-03-04 02:08:51 +08:00
---
# Variables here are applicable to all host groups NOT roles
2014-04-11 20:07:37 +08:00
## Setup options
#
2014-03-04 02:08:51 +08:00
distro_release: "{{ facter_lsbdistcodename }}"
2014-03-19 18:56:31 +08:00
apt_key: http://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
2014-03-04 02:08:51 +08:00
ceph_release: emperor
2014-03-10 00:08:47 +08:00
redhat_distro: el6 # supported distros are el6, rhel6, f18, f19, opensuse12.2, sles11
2014-03-04 02:08:51 +08:00
2014-04-11 20:07:37 +08:00
## Ceph options
#
2014-03-04 02:08:51 +08:00
cephx: true
2014-03-11 07:47:24 +08:00
fsid: # /!\ GENERATE ONE WITH 'uuidgen -r' /!\
2014-03-04 02:08:51 +08:00
2014-04-11 20:07:37 +08:00
## Monitor options
#
2014-03-11 00:23:41 +08:00
monitor_interface: eth1
2014-04-11 20:07:37 +08:00
mon_osd_down_out_interval: 600
2014-04-18 16:47:30 +08:00
mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
2014-03-11 00:23:41 +08:00
2014-04-11 20:07:37 +08:00
## MDS options
#
2014-03-06 20:54:37 +08:00
mds: true # disable mds configuration in ceph.conf
# Rados Gateway options
radosgw: true
redhat_distro_ceph_extra: centos6.4 # supported distros are centos6.3, centos6.4, centos6, fedora18, fedora19, opensuse12.2, rhel6.3, rhel6.4, rhel6.5, rhel6, sles11sp2
2014-03-17 15:17:27 +08:00
radosgw_interface: eth1 # the public interface which the radosgw talks to the world with, this variable is used in the haproxy role, this does not need to be set if haproxy is not used.
2014-04-07 18:06:24 +08:00
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
2014-04-30 20:56:37 +08:00
email_address: foo@bar.com
2014-03-06 20:54:37 +08:00
2014-04-11 20:07:37 +08:00
## OSD options
#
2014-03-04 02:08:51 +08:00
journal_size: 100
pool_default_pg_num: 128
pool_default_pgp_num: 128
pool_default_size: 2
cluster_network: 192.168.0.0/24
public_network: 192.168.0.0/24
osd_mkfs_type: xfs
2014-04-11 20:07:37 +08:00
osd_mon_heartbeat_interval: 30
# Performance tuning
filestore_merge_threshold: 40
filestore_split_multiple: 8
osd_op_threads: 8
# Recovery tuning
osd_recovery_max_active: 5
osd_max_backfills: 2
osd_recovery_op_priority: 2
2014-05-01 07:24:20 +08:00
## Testing mode
# enable this mode _only_ when you have a single node
common_single_host_mode: true