2014-03-04 02:08:51 +08:00
---
# Variables here are applicable to all host groups NOT roles
2014-06-25 23:03:29 +08:00
# Dummy variable to avoid error because ansible does not recognize the file as a good configuration file when no variable in it.
dummy:
2015-08-27 18:42:01 +08:00
###########
# GENERAL #
###########
#fetch_directory: fetch/
2015-01-07 05:47:49 +08:00
#########
# INSTALL
#########
2015-06-11 08:42:41 +08:00
#mon_group_name: mons
#osd_group_name: osds
#rgw_group_name: rgws
#mds_group_name: mdss
2015-07-02 17:08:59 +08:00
#restapi_group_name: restapis
2015-06-11 08:42:41 +08:00
2015-01-07 05:47:49 +08:00
# /!\ EITHER ACTIVE ceph_stable OR ceph_stable_ice OR ceph_dev /!\
2015-08-26 17:19:35 +08:00
#debian_package_dependencies:
# - python-pycurl
# - hdparm
# - ntp
#redhat_package_dependencies:
# - python-pycurl
# - hdparm
# - yum-plugin-priorities.noarch
# - epel-release
# - ntp
2015-08-31 21:23:29 +08:00
## Configure package origin
2015-08-28 00:01:34 +08:00
#
#ceph_origin: 'upstream' # or 'distro'
2015-08-31 21:23:29 +08:00
#ceph_use_distro_backports: false # DEBIAN ONLY
2015-08-28 00:01:34 +08:00
2015-01-07 05:47:49 +08:00
# STABLE
########
# COMMUNITY VERSION
2015-07-04 00:38:30 +08:00
#ceph_stable: false # use ceph stable branch
2015-10-17 04:27:21 +08:00
#ceph_stable_key: https://download.ceph.com/keys/release.asc
2015-04-14 21:45:51 +08:00
#ceph_stable_release: hammer # ceph stable release
2015-01-07 05:47:49 +08:00
2015-07-03 22:26:21 +08:00
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source:
2015-01-07 05:47:49 +08:00
# This option is needed for _both_ stable and dev version, so please always fill the right version
2015-01-26 14:22:06 +08:00
# # for supported distros, see http://ceph.com/rpm-{{ ceph_stable_release }}/
2015-01-07 05:47:49 +08:00
#ceph_stable_redhat_distro: el7
2015-07-01 21:58:38 +08:00
# ENTERPRISE VERSION ICE (old, prior to the 1.3)
2015-01-07 05:47:49 +08:00
#ceph_stable_ice: false # use Inktank Ceph Enterprise
#ceph_stable_ice_url: https://download.inktank.com/enterprise
#ceph_stable_ice_temp_path: /opt/ICE/ceph-repo/
#ceph_stable_ice_kmod: 3.10-0.1.20140702gitdc9ac62.el7.x86_64
#ceph_stable_ice_distro: rhel7 # Please check the download website for the supported versions.
#ceph_stable_ice_version: 1.2.2
#ceph_stable_ice_kmod_version: 1.2
#ceph_stable_ice_user: # htaccess user
#ceph_stable_ice_password: # htaccess password
2015-07-01 21:58:38 +08:00
# ENTERPRISE VERSION RED HAT STORAGE (from 1.3)
# This version is only supported on RHEL 7.1
# As of RHEL 7.1, libceph.ko and rbd.ko are now included in Red Hat's kernel
# packages natively. The RHEL 7.1 kernel packages are more stable and secure than
# using these 3rd-party kmods with RHEL 7.0. Please update your systems to RHEL
# 7.1 or later if you want to use the kernel RBD client.
#
# The CephFS kernel client is undergoing rapid development upstream, and we do
# not recommend running the CephFS kernel module on RHEL 7's 3.10 kernel at this
# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS
# on RHEL 7.
#
#ceph_stable_rh_storage: false
2015-07-04 00:38:30 +08:00
#ceph_stable_rh_storage_cdn_install: false # assumes all the nodes can connect to cdn.redhat.com
2015-07-03 00:23:11 +08:00
#ceph_stable_rh_storage_iso_install: false # usually used when nodes don't have access to cdn.redhat.com
#ceph_stable_rh_storage_iso_path:
#ceph_stable_rh_storage_mount_path: /tmp/rh-storage-mount
#ceph_stable_rh_storage_repository_path: /tmp/rh-storage-repo # where to copy iso's content
2015-07-01 21:58:38 +08:00
2015-01-07 05:47:49 +08:00
# DEV
# ###
#ceph_dev: false # use ceph development branch
2015-10-17 04:27:21 +08:00
#ceph_dev_key: https://download.ceph.com/keys/autobuild.asc
2015-01-07 05:47:49 +08:00
#ceph_dev_branch: master # development branch you would like to use e.g: master, wip-hack
# supported distros are centos6, centos7, fc17, fc18, fc19, fc20, fedora17, fedora18,
# fedora19, fedora20, opensuse12, sles0. (see http://gitbuilder.ceph.com/).
# For rhel, please pay attention to the versions: 'rhel6 3' or 'rhel 4', the fullname is _very_ important.
#ceph_dev_redhat_distro: centos7
###############
# CONFIGURATION
###############
2014-03-04 02:08:51 +08:00
2014-04-11 20:07:37 +08:00
## Ceph options
#
2015-01-07 05:47:49 +08:00
#fsid: "{{ cluster_uuid.stdout }}"
2014-06-25 22:51:28 +08:00
#cephx: true
2015-01-07 05:47:49 +08:00
#cephx_require_signatures: true # Kernel RBD does NOT support signatures!
#cephx_cluster_require_signatures: true
#cephx_service_require_signatures: false
#max_open_files: 131072
#disable_in_memory_logs: true
2014-03-04 02:08:51 +08:00
2015-06-09 06:42:01 +08:00
## Client options
#
2015-06-09 22:32:17 +08:00
#rbd_cache: "true"
#rbd_cache_writethrough_until_flush: "true"
#rbd_concurrent_management_ops: 20
#rbd_client_directories: false # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions, this WON'T work if libvirt and kvm are installed
#rbd_client_log_file: /var/log/rbd-clients/qemu-guest-$pid.log # must be writable by QEMU and allowed by SELinux or AppArmor
#rbd_client_log_path: /var/log/rbd-clients/
2015-07-02 17:08:59 +08:00
#rbd_client_admin_socket_path: /var/run/ceph/rbd-clients/
2015-07-06 19:58:08 +08:00
#rbd_default_features: 3
#rbd_default_map_options: rw
2015-07-08 20:21:16 +08:00
#rbd_default_format: 2
2015-06-09 06:42:01 +08:00
2014-04-11 20:07:37 +08:00
## Monitor options
#
2015-05-15 20:32:26 +08:00
#monitor_interface: interface
2015-07-17 20:06:25 +08:00
#monitor_secret: "{{ monitor_keyring.stdout }}"
2014-06-25 22:51:28 +08:00
#mon_osd_down_out_interval: 600
#mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
2015-01-07 05:47:49 +08:00
#mon_clock_drift_allowed: .15
#mon_clock_drift_warn_backoff: 30
#mon_osd_full_ratio: .95
#mon_osd_nearfull_ratio: .85
#mon_osd_report_timeout: 300
2015-07-07 23:55:05 +08:00
#mon_pg_warn_max_per_osd: 0 # disable complains about low pgs numbers per osd
2015-07-20 20:48:58 +08:00
#mon_osd_allow_primary_affinity: "true"
2015-09-04 05:08:51 +08:00
#mon_pg_warn_max_object_skew: 10 # set to 20 or higher to disable complaints about number of PGs being too low if some pools have very few objects bringing down the average number of objects per pool. This happens when running RadosGW. Ceph default is 10
2014-03-06 20:54:37 +08:00
2014-04-11 20:07:37 +08:00
## OSD options
#
2015-05-14 19:09:08 +08:00
#journal_size: 0
2014-06-25 22:51:28 +08:00
#pool_default_pg_num: 128
#pool_default_pgp_num: 128
#pool_default_size: 2
2015-01-07 05:47:49 +08:00
#pool_default_min_size: 1
2015-05-15 20:32:26 +08:00
#public_network: 0.0.0.0/0
2015-07-08 00:18:47 +08:00
#cluster_network: {{ public_network }}
2014-06-25 22:51:28 +08:00
#osd_mkfs_type: xfs
2015-01-07 05:47:49 +08:00
#osd_mkfs_options_xfs: -f -i size=2048
#osd_mount_options_xfs: noatime
2014-06-25 22:51:28 +08:00
#osd_mon_heartbeat_interval: 30
2015-06-11 22:49:57 +08:00
2015-01-07 05:47:49 +08:00
# CRUSH
#pool_default_crush_rule: 0
#osd_crush_update_on_start: "true"
# Object backend
#osd_objectstore: filestore
2015-06-11 22:49:57 +08:00
2014-04-11 20:07:37 +08:00
# Performance tuning
2014-06-25 22:51:28 +08:00
#filestore_merge_threshold: 40
#filestore_split_multiple: 8
#osd_op_threads: 8
2015-01-07 05:47:49 +08:00
#filestore_op_threads: 8
#filestore_max_sync_interval: 5
#osd_max_scrubs: 1
2015-06-11 22:49:57 +08:00
2014-04-11 20:07:37 +08:00
# Recovery tuning
2014-06-25 22:51:28 +08:00
#osd_recovery_max_active: 5
#osd_max_backfills: 2
#osd_recovery_op_priority: 2
2015-01-07 05:47:49 +08:00
#osd_recovery_max_chunk: 1048576
#osd_recovery_threads: 1
2014-05-01 07:24:20 +08:00
2015-06-11 22:49:57 +08:00
# Deep scrub
#osd_scrub_sleep: .1
#osd_disk_thread_ioprio_class: idle
#osd_disk_thread_ioprio_priority: 0
#osd_scrub_chunk_max: 5
#osd_deep_scrub_stride: 1048576
2015-01-07 05:47:49 +08:00
# Rados Gateway options
#
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
2015-07-25 00:14:59 +08:00
#radosgw_frontend: civetweb # supported options are 'apache' or 'civetweb', also edit roles/ceph-rgw/defaults/main.yml
2015-06-03 22:31:51 +08:00
#radosgw_civetweb_port: 80
2015-06-04 01:01:23 +08:00
#radosgw_keystone: false # activate OpenStack Keystone options full detail here: http://ceph.com/docs/master/radosgw/keystone/
#radosgw_keystone_url: # url:admin_port ie: http://192.168.0.1:35357
#radosgw_keystone_admin_token: password
#radosgw_keystone_accepted_roles: Member, _member_, admin
#radosgw_keystone_token_cache_size: 10000
#radosgw_keystone_revocation_internal: 900
#radosgw_s3_auth_use_keystone: "true"
#radosgw_nss_db_path: /var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_hostname }}/nss
2015-08-03 23:39:57 +08:00
# Toggle 100-continue support for Apache and FastCGI
# WARNING: Changing this value will cause an outage of Apache while it is reinstalled on RGW nodes
#http_100_continue: false
# Rados Gateway options
#redhat_distro_ceph_extra: centos6.4 # supported distros are centos6.3, centos6.4, centos6, fedora18, fedora19, opensuse12.2, rhel6.3, rhel6.4, rhel6.5, rhel6, sles11sp2
#email_address: foo@bar.com
2014-05-01 07:24:20 +08:00
2015-03-23 22:08:58 +08:00
## REST API options
#
2015-07-02 17:08:59 +08:00
#restapi_interface: "{{ monitor_interface }}"
#restapi_port: 5000
2015-03-23 22:08:58 +08:00
#restapi_base_url: /api/v0.1
#restapi_log_level: warning
2014-05-01 07:24:20 +08:00
## Testing mode
# enable this mode _only_ when you have a single node
2015-01-07 05:47:49 +08:00
# if you don't want it keep the option commented
2014-06-25 22:51:28 +08:00
#common_single_host_mode: true
2015-06-09 22:32:17 +08:00
#############
# OS TUNING #
#############
#disable_transparent_hugepage: true
#disable_swap: true
#os_tuning_params:
# - { name: kernel.pid_max, value: 4194303 }
# - { name: fs.file-max, value: 26234859 }
# - { name: vm.zone_reclaim_mode, value: 0 }
# - { name: vm.vfs_cache_pressure, value: 50 }
##########
# DOCKER #
##########
#docker: false