--- # You can override vars by using host or group vars ## Setup options # # ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT #fsid: ## Packages branch ceph_key: https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc ceph_stable: true # use ceph stable branch ceph_stable_release: giant # ceph stable release # This option is needed for _both_ stable and dev version, so please always fill the right version # # supported distros are el6, rhel6, f18, f19, opensuse12.2, sles11, centos7 (see http://ceph.com/rpm-firefly/) ceph_stable_redhat_distro: el7 ceph_dev: false # use ceph developement branch ceph_dev_branch: master # developement branch you would like to use e.g: master, wip-hack # supported distros are centos6, centos7, fc17, fc18, fc19, fc20, fedora17, fedora18, # fedora19, fedora20, opensuse12, sles0. (see http://gitbuilder.ceph.com/). # For rhel, please pay attention to the versions: 'rhel6 3' or 'rhel 4', the fullname is _very_ important. ceph_dev_redhat_distro: centos7 ## Ceph options # cephx: true cephx_require_signatures: true cephx_cluster_require_signatures: true cephx_service_require_signatures: false disable_in_memory_logs: true ## Monitor options # monitor_interface: eth1 mon_osd_down_out_interval: 600 mon_osd_min_down_reporters: 7 # number of OSDs per host + 1 mon_clock_drift_allowed: .15 mon_clock_drift_warn_backoff: 30 mon_osd_full_ratio: .95 mon_osd_nearfull_ratio: .85 mon_osd_report_timeout: 300 ## OSD options # journal_size: 100 pool_default_pg_num: 128 pool_default_pgp_num: 128 pool_default_size: 2 pool_default_min_size: 1 cluster_network: 192.168.42.0/24 public_network: 192.168.42.0/24 osd_mkfs_type: xfs osd_mkfs_options_xfs: -f -i size=2048 osd_mount_options_xfs: noatime osd_mon_heartbeat_interval: 30 # CRUSH pool_default_crush_rule: 0 osd_crush_update_on_start: "true" # Object backend osd_objectstore: filestore # Performance tuning filestore_merge_threshold: 40 filestore_split_multiple: 8 osd_op_threads: 8 filestore_op_threads: 8 filestore_max_sync_interval: 5 osd_max_scrubs: 1 # Recovery tuning osd_recovery_max_active: 5 osd_max_backfills: 2 osd_recovery_op_priority: 2 osd_recovery_max_chunk: 1048576 osd_recovery_threads: 1 ## MDS options # mds: true # disable mds configuration in ceph.conf # Rados Gateway options # radosgw: true # referenced in monitor role too. #radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls ## Testing mode # enable this mode _only_ when you have a single node # if you don't want it keep the option commented #common_single_host_mode: true