ceph-ansible/roles/ceph-common/defaults/main.yml

209 lines
7.1 KiB
YAML
Raw Normal View History

---
2014-09-05 03:14:11 +08:00
# You can override vars by using host or group vars
###########
# INSTALL #
###########
2014-09-05 03:14:11 +08:00
2015-06-11 08:42:41 +08:00
mon_group_name: mons
osd_group_name: osds
rgw_group_name: rgws
mds_group_name: mdss
restapi_group_name: restapis
2015-06-11 08:42:41 +08:00
# /!\ EITHER ACTIVE ceph_stable OR ceph_stable_ice OR ceph_dev /!\
2014-09-05 03:14:11 +08:00
# STABLE
########
# COMMUNITY VERSION
2014-09-05 03:14:11 +08:00
ceph_stable: true # use ceph stable branch
ceph_stable_key: https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
ceph_stable_release: hammer # ceph stable release
2014-09-05 03:14:11 +08:00
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source:
2014-09-05 03:14:11 +08:00
# This option is needed for _both_ stable and dev version, so please always fill the right version
# # for supported distros, see http://ceph.com/rpm-{{ ceph_stable_release }}/
2014-09-05 03:14:11 +08:00
ceph_stable_redhat_distro: el7
# ENTERPRISE VERSION ICE (old, prior to the 1.3)
ceph_stable_ice: false # use Inktank Ceph Enterprise
#ceph_stable_ice_url: https://download.inktank.com/enterprise
#ceph_stable_ice_temp_path: /opt/ICE/ceph-repo/
#ceph_stable_ice_kmod: 3.10-0.1.20140702gitdc9ac62.el7.x86_64
#ceph_stable_ice_distro: rhel7 # Please check the download website for the supported versions.
#ceph_stable_ice_version: 1.2.2
#ceph_stable_ice_kmod_version: 1.2
#ceph_stable_ice_user: # htaccess user
#ceph_stable_ice_password: # htaccess password
# ENTERPRISE VERSION RED HAT STORAGE (from 1.3)
# This version is only supported on RHEL 7.1
# As of RHEL 7.1, libceph.ko and rbd.ko are now included in Red Hat's kernel
# packages natively. The RHEL 7.1 kernel packages are more stable and secure than
# using these 3rd-party kmods with RHEL 7.0. Please update your systems to RHEL
# 7.1 or later if you want to use the kernel RBD client.
#
# The CephFS kernel client is undergoing rapid development upstream, and we do
# not recommend running the CephFS kernel module on RHEL 7's 3.10 kernel at this
# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS
# on RHEL 7.
#
ceph_stable_rh_storage: false
ceph_stable_rh_storage_cdn_install: true # assumes all the nodes can connect to cdn.redhat.com
ceph_stable_rh_storage_iso_install: false # usually used when nodes don't have access to cdn.redhat.com
#ceph_stable_rh_storage_iso_path:
ceph_stable_rh_storage_mount_path: /tmp/rh-storage-mount
ceph_stable_rh_storage_repository_path: /tmp/rh-storage-repo # where to copy iso's content
# DEV
# ###
ceph_dev: false # use ceph development branch
ceph_dev_key: https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc
ceph_dev_branch: master # development branch you would like to use e.g: master, wip-hack
2014-09-05 03:14:11 +08:00
# supported distros are centos6, centos7, fc17, fc18, fc19, fc20, fedora17, fedora18,
# fedora19, fedora20, opensuse12, sles0. (see http://gitbuilder.ceph.com/).
# For rhel, please pay attention to the versions: 'rhel6 3' or 'rhel 4', the fullname is _very_ important.
ceph_dev_redhat_distro: centos7
######################
# CEPH CONFIGURATION #
######################
2014-09-05 03:14:11 +08:00
## Ceph options
#
fsid: "{{ cluster_uuid.stdout }}"
2014-09-05 03:14:11 +08:00
cephx: true
cephx_require_signatures: true # Kernel RBD does NOT support signatures for Kernels < 3.18!
2014-09-05 03:14:11 +08:00
cephx_cluster_require_signatures: true
cephx_service_require_signatures: false
max_open_files: 131072
disable_in_memory_logs: true # set this to false while enabling the options below
# Debug logs
enable_debug_global: false
debug_global_level: 20
enable_debug_mon: false
debug_mon_level: 20
enable_debug_osd: false
debug_osd_level: 20
enable_debug_mds: false
debug_mds_level: 20
2014-09-05 03:14:11 +08:00
2015-06-09 06:42:01 +08:00
## Client options
#
rbd_cache: "true"
rbd_cache_writethrough_until_flush: "true"
2015-06-09 06:42:01 +08:00
rbd_concurrent_management_ops: 20
rbd_client_directories: false # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions, this WON'T work if libvirt and kvm are installed
rbd_client_log_file: /var/log/rbd-clients/qemu-guest-$pid.log # must be writable by QEMU and allowed by SELinux or AppArmor
rbd_client_log_path: /var/log/rbd-clients/
rbd_client_admin_socket_path: /var/run/ceph/rbd-clients # must be writable by QEMU and allowed by SELinux or AppArmor
2015-06-09 06:42:01 +08:00
2014-09-05 03:14:11 +08:00
## Monitor options
#
monitor_interface: interface
2014-09-05 03:14:11 +08:00
mon_osd_down_out_interval: 600
mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
mon_clock_drift_allowed: .15
mon_clock_drift_warn_backoff: 30
mon_osd_full_ratio: .95
mon_osd_nearfull_ratio: .85
mon_osd_report_timeout: 300
## OSD options
#
2015-05-14 19:09:08 +08:00
journal_size: 0
2014-09-05 03:14:11 +08:00
pool_default_pg_num: 128
pool_default_pgp_num: 128
pool_default_size: 2
pool_default_min_size: 1
cluster_network: 0.0.0.0/0
public_network: 0.0.0.0/0
2014-09-05 03:14:11 +08:00
osd_mkfs_type: xfs
osd_mkfs_options_xfs: -f -i size=2048
osd_mount_options_xfs: noatime,largeio,inode64,swalloc
2014-09-05 03:14:11 +08:00
osd_mon_heartbeat_interval: 30
2014-09-05 03:14:11 +08:00
# CRUSH
pool_default_crush_rule: 0
osd_crush_update_on_start: "true"
2014-09-05 03:14:11 +08:00
# Object backend
osd_objectstore: filestore
2014-09-05 03:14:11 +08:00
# Performance tuning
filestore_merge_threshold: 40
filestore_split_multiple: 8
osd_op_threads: 8
filestore_op_threads: 8
filestore_max_sync_interval: 5
osd_max_scrubs: 1
2014-09-05 03:14:11 +08:00
# Recovery tuning
osd_recovery_max_active: 5
osd_max_backfills: 2
osd_recovery_op_priority: 2
osd_recovery_max_chunk: 1048576
osd_recovery_threads: 1
2014-09-05 03:14:11 +08:00
# Deep scrub
osd_scrub_sleep: .1
osd_disk_thread_ioprio_class: idle
osd_disk_thread_ioprio_priority: 0
osd_scrub_chunk_max: 5
osd_deep_scrub_stride: 1048576
## Rados Gateway options
2014-09-05 03:14:11 +08:00
#
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
radosgw_frontend: civetweb # supported options are 'apache' or 'civetweb', also edit roles/ceph-radosgw/defaults/main.yml
radosgw_civetweb_port: 80
radosgw_keystone: false # activate OpenStack Keystone options full detail here: http://ceph.com/docs/master/radosgw/keystone/
#radosgw_keystone_url: # url:admin_port ie: http://192.168.0.1:35357
radosgw_keystone_admin_token: password
radosgw_keystone_accepted_roles: Member, _member_, admin
radosgw_keystone_token_cache_size: 10000
radosgw_keystone_revocation_internal: 900
radosgw_s3_auth_use_keystone: "true"
radosgw_nss_db_path: /var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_hostname }}/nss
2014-09-05 03:14:11 +08:00
## REST API options
#
restapi_interface: "{{ monitor_interface }}"
restapi_port: 5000
restapi_base_url: /api/v0.1
restapi_log_level: warning # available level are: critical, error, warning, info, debug
2014-09-05 03:14:11 +08:00
## Testing mode
# enable this mode _only_ when you have a single node
# if you don't want it keep the option commented
#common_single_host_mode: true
#############
# OS TUNING #
#############
disable_transparent_hugepage: true
disable_swap: true
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
- { name: vm.zone_reclaim_mode, value: 0 }
- { name: vm.vfs_cache_pressure, value: 50 }
##########
# DOCKER #
##########
docker: false