Merge pull request #174 from leseb/update-group-vars

Update group_vars files to match roles defaults vars
pull/175/head
Leseb 2015-01-06 22:50:12 +01:00
commit 695d6bf133
3 changed files with 118 additions and 27 deletions

View File

@ -4,54 +4,114 @@
# Dummy variable to avoid error because ansible does not recognize the file as a good configuration file when no variable in it.
dummy:
## Setup options
#
#ceph_release: firefly
#redhat_distro: el6 # supported distros are el6, rhel6, f18, f19, opensuse12.2, sles11
#########
# INSTALL
#########
# /!\ EITHER ACTIVE ceph_stable OR ceph_stable_ice OR ceph_dev /!\
# STABLE
########
# COMMUNITY VERSION
#ceph_stable: true # use ceph stable branch
#ceph_stable_key: https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
#ceph_stable_release: giant # ceph stable release
# This option is needed for _both_ stable and dev version, so please always fill the right version
# # supported distros are el6, rhel6, f18, f19, opensuse12.2, sles11, centos7 (see http://ceph.com/rpm-firefly/)
#ceph_stable_redhat_distro: el7
# ENTERPRISE VERSION
#ceph_stable_ice: false # use Inktank Ceph Enterprise
#ceph_stable_ice_url: https://download.inktank.com/enterprise
#ceph_stable_ice_temp_path: /opt/ICE/ceph-repo/
#ceph_stable_ice_kmod: 3.10-0.1.20140702gitdc9ac62.el7.x86_64
#ceph_stable_ice_distro: rhel7 # Please check the download website for the supported versions.
#ceph_stable_ice_version: 1.2.2
#ceph_stable_ice_kmod_version: 1.2
#ceph_stable_ice_user: # htaccess user
#ceph_stable_ice_password: # htaccess password
# DEV
# ###
#ceph_dev: false # use ceph development branch
#ceph_dev_key: https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc
#ceph_dev_branch: master # development branch you would like to use e.g: master, wip-hack
# supported distros are centos6, centos7, fc17, fc18, fc19, fc20, fedora17, fedora18,
# fedora19, fedora20, opensuse12, sles0. (see http://gitbuilder.ceph.com/).
# For rhel, please pay attention to the versions: 'rhel6 3' or 'rhel 4', the fullname is _very_ important.
#ceph_dev_redhat_distro: centos7
###############
# CONFIGURATION
###############
## Ceph options
#
#fsid: "{{ cluster_uuid.stdout }}"
#cephx: true
#fsid: # /!\ GENERATE ONE WITH 'uuidgen -r' /!\
#cephx_require_signatures: true # Kernel RBD does NOT support signatures!
#cephx_cluster_require_signatures: true
#cephx_service_require_signatures: false
#max_open_files: 131072
#disable_in_memory_logs: true
## Monitor options
#
#monitor_interface: eth1
#mon_osd_down_out_interval: 600
#mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
## MDS options
#
#mds: true # disable mds configuration in ceph.conf
# Rados Gateway options
#radosgw: true
#redhat_distro_ceph_extra: centos6.4 # supported distros are centos6.3, centos6.4, centos6, fedora18, fedora19, opensuse12.2, rhel6.3, rhel6.4, rhel6.5, rhel6, sles11sp2
#radosgw_interface: eth1 # the public interface which the radosgw talks to the world with, this variable is used in the haproxy role, this does not need to be set if haproxy is not used.
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
#email_address: foo@bar.com
#mon_clock_drift_allowed: .15
#mon_clock_drift_warn_backoff: 30
#mon_osd_full_ratio: .95
#mon_osd_nearfull_ratio: .85
#mon_osd_report_timeout: 300
## OSD options
#
#osd_auto_discovery: true
#journal_size: 100
#pool_default_pg_num: 128
#pool_default_pgp_num: 128
#pool_default_size: 2
#cluster_network: 192.168.0.0/24
#public_network: 192.168.0.0/24
#pool_default_min_size: 1
#cluster_network: 192.168.42.0/24
#public_network: 192.168.42.0/24
#osd_mkfs_type: xfs
#osd_mkfs_options_xfs: -f -i size=2048
#osd_mount_options_xfs: noatime
#osd_mon_heartbeat_interval: 30
# CRUSH
#pool_default_crush_rule: 0
#osd_crush_update_on_start: "true"
# Object backend
#osd_objectstore: filestore
# Performance tuning
#filestore_merge_threshold: 40
#filestore_split_multiple: 8
#osd_op_threads: 8
#filestore_op_threads: 8
#filestore_max_sync_interval: 5
#osd_max_scrubs: 1
# Recovery tuning
#osd_recovery_max_active: 5
#osd_max_backfills: 2
#osd_recovery_op_priority: 2
#osd_recovery_max_chunk: 1048576
#osd_recovery_threads: 1
## MDS options
#
#mds: false # disable mds configuration in ceph.conf
# Rados Gateway options
#
#radosgw: false # referenced in monitor role too.
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
## Testing mode
# enable this mode _only_ when you have a single node
# if you don't want it keep the option commented
#common_single_host_mode: true

View File

@ -6,3 +6,7 @@ dummy:
# Monitor options
#monitor_secret: # /!\ GENERATE ONE WITH 'ceph-authtool --gen-print-key' /!\
#cephx: true
# Rados Gateway options
#radosgw: false

View File

@ -5,6 +5,11 @@
# Dummy variable to avoid error because ansible does not recognize the file as a good configuration file when no variable in it.
dummy:
## Ceph options
#
#cephx: true
# Devices to be used as OSDs
# You can pre-provision disks that are not present yet.
# Ansible will just skip them. Newly added disk will be
@ -18,26 +23,40 @@ dummy:
#
# !! WARNING !!
# USE WITH CAUTION
# Erase partitions structure and layout of the given devices below prior to prepare them
#zap_devices: false
# Declare devices
# All the scenarii inherit from the following device declaration
#
#devices: [ '/dev/sdb', '/dev/sdc', '/dev/sdd', '/dev/sde']
#devices:
# - /dev/sdb
# - /dev/sdc
# - /dev/sdd
# - /dev/sde
# Device discovery is based on the Ansible fact 'ansible_devices'
# which reports all the devices on a system. If chosen all the disks
# found will be passed to ceph-disk. You should not be worried on using
# this option since ceph-disk has a built-in check which looks for empty devices.
# Thus devices with existing partition tables will not be used.
# This mode prevents you from filling out the 'devices' variable above.
#
#osd_auto_discovery: false
# I. First scenario: journal and osd_data on the same device
# Use 'true' to enable this scenario
# This will collocate both journal and data on the same disk
# creating a partition at the beginning of the device
#
#journal_collocation: true
# II. Second scenario: single journal device for N OSDs
# Use 'true' to enable this scenario
#
#raw_journal: false
#raw_journal_device: /dev/sdb
# deprecated, please use scenario III with a single raw_journal_device
# III. Third scenario: N journal devices for N OSDs
@ -51,11 +70,19 @@ dummy:
# 2. Progressively add new devices
#raw_multi_journal: false
#raw_journal_devices: [ '/dev/sdb', '/dev/sdb', '/dev/sdc', '/dev/sdc' ]
#raw_journal_devices:
# - /dev/sdb
# - /dev/sdb
# - /dev/sdc
# - /dev/sdc
#
# IV. Fourth scenario: use directory instead of disk for OSDs
# Use 'true' to enable this scenario
#osd_directory: false
#osd_directories: [ '/var/lib/ceph/osd/mydir1', '/var/lib/ceph/osd/mydir2', '/var/lib/ceph/osd/mydir3', '/var/lib/ceph/osd/mydir4']
#osd_directories:
# - /var/lib/ceph/osd/mydir1
# - /var/lib/ceph/osd/mydir2
# - /var/lib/ceph/osd/mydir3
# - /var/lib/ceph/osd/mydir4