Moved variables into "defaults" directory of each role, and commented the ones in group_vars.

pull/87/head
Alexis Lahouze 2014-06-25 16:51:28 +02:00
parent e8d60215cd
commit 1d2a66666f
9 changed files with 174 additions and 35 deletions

View File

@ -3,52 +3,51 @@
## Setup options
#
distro_release: "{{ facter_lsbdistcodename }}"
ceph_release: firefly
redhat_distro: el6 # supported distros are el6, rhel6, f18, f19, opensuse12.2, sles11
#ceph_release: firefly
#redhat_distro: el6 # supported distros are el6, rhel6, f18, f19, opensuse12.2, sles11
## Ceph options
#
cephx: true
fsid: # /!\ GENERATE ONE WITH 'uuidgen -r' /!\
#cephx: true
#fsid: # /!\ GENERATE ONE WITH 'uuidgen -r' /!\
## Monitor options
#
monitor_interface: eth1
mon_osd_down_out_interval: 600
mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
#monitor_interface: eth1
#mon_osd_down_out_interval: 600
#mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
## MDS options
#
mds: true # disable mds configuration in ceph.conf
#mds: true # disable mds configuration in ceph.conf
# Rados Gateway options
radosgw: true
redhat_distro_ceph_extra: centos6.4 # supported distros are centos6.3, centos6.4, centos6, fedora18, fedora19, opensuse12.2, rhel6.3, rhel6.4, rhel6.5, rhel6, sles11sp2
radosgw_interface: eth1 # the public interface which the radosgw talks to the world with, this variable is used in the haproxy role, this does not need to be set if haproxy is not used.
#radosgw: true
#redhat_distro_ceph_extra: centos6.4 # supported distros are centos6.3, centos6.4, centos6, fedora18, fedora19, opensuse12.2, rhel6.3, rhel6.4, rhel6.5, rhel6, sles11sp2
#radosgw_interface: eth1 # the public interface which the radosgw talks to the world with, this variable is used in the haproxy role, this does not need to be set if haproxy is not used.
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
email_address: foo@bar.com
#email_address: foo@bar.com
## OSD options
#
journal_size: 100
pool_default_pg_num: 128
pool_default_pgp_num: 128
pool_default_size: 2
cluster_network: 192.168.0.0/24
public_network: 192.168.0.0/24
osd_mkfs_type: xfs
osd_mon_heartbeat_interval: 30
#journal_size: 100
#pool_default_pg_num: 128
#pool_default_pgp_num: 128
#pool_default_size: 2
#cluster_network: 192.168.0.0/24
#public_network: 192.168.0.0/24
#osd_mkfs_type: xfs
#osd_mon_heartbeat_interval: 30
# Performance tuning
filestore_merge_threshold: 40
filestore_split_multiple: 8
osd_op_threads: 8
#filestore_merge_threshold: 40
#filestore_split_multiple: 8
#osd_op_threads: 8
# Recovery tuning
osd_recovery_max_active: 5
osd_max_backfills: 2
osd_recovery_op_priority: 2
#osd_recovery_max_active: 5
#osd_max_backfills: 2
#osd_recovery_op_priority: 2
## Testing mode
# enable this mode _only_ when you have a single node
common_single_host_mode: true
#common_single_host_mode: true

View File

@ -2,4 +2,4 @@
# Variables here are applicable to all host groups NOT roles
# Monitor options
monitor_secret: # /!\ GENERATE ONE WITH 'ceph-authtool --gen-print-key' /!\
#monitor_secret: # /!\ GENERATE ONE WITH 'ceph-authtool --gen-print-key' /!\

View File

@ -19,7 +19,7 @@
# Declare devices
# All the scenarii inherit from the following device declaration
#
devices: [ '/dev/sdb', '/dev/sdc', '/dev/sdd', '/dev/sde']
#devices: [ '/dev/sdb', '/dev/sdc', '/dev/sdd', '/dev/sde']
# I. First scenario: journal and osd_data on the same device
@ -27,14 +27,14 @@ devices: [ '/dev/sdb', '/dev/sdc', '/dev/sdd', '/dev/sde']
# This will collocate both journal and data on the same disk
# creating a partition at the beginning of the device
#
journal_collocation: true
#journal_collocation: true
# II. Second scenario: single journal device for N OSDs
# Use 'true' to enable this scenario
#
raw_journal: false
raw_journal_device: /dev/sdb
#raw_journal: false
#raw_journal_device: /dev/sdb
# III. Third scenario: N journal devices for N OSDs
@ -47,8 +47,8 @@ raw_journal_device: /dev/sdb
# 1. Pre-allocate all the devices
# 2. Progressively add new devices
raw_multi_journal: false
raw_journal_devices: [ '/dev/sdb', '/dev/sdb', '/dev/sdc', '/dev/sdc' ]
#raw_multi_journal: false
#raw_journal_devices: [ '/dev/sdb', '/dev/sdb', '/dev/sdc', '/dev/sdc' ]
# IV. Fourth scenario: use directory instead of disk for OSDs

View File

@ -0,0 +1,50 @@
---
# Variables here are applicable to the current role
## Setup options
#
distro_release: "{{ facter_lsbdistcodename }}" # Seems to be not used.
apt_key: http://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
ceph_release: emperor
redhat_distro: el6 # supported distros are el6, rhel6, f18, f19, opensuse12.2, sles11
## Ceph options
#
cephx: true
## Monitor options
#
monitor_interface: eth1
mon_osd_down_out_interval: 600
mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
## MDS options
#
mds: true # disable mds configuration in ceph.conf
# Rados Gateway options
#
radosgw: true # referenced in mon role too.
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
## OSD options
#
journal_size: 100
pool_default_pg_num: 128
pool_default_pgp_num: 128
pool_default_size: 2
cluster_network: 192.168.0.0/24
public_network: 192.168.0.0/24
osd_mkfs_type: xfs
osd_mon_heartbeat_interval: 30
# Performance tuning
filestore_merge_threshold: 40
filestore_split_multiple: 8
osd_op_threads: 8
# Recovery tuning
osd_recovery_max_active: 5
osd_max_backfills: 2
osd_recovery_op_priority: 2
## Testing mode
# enable this mode _only_ when you have a single node
common_single_host_mode: true

View File

@ -0,0 +1,6 @@
---
# Variables here are applicable to the current role
# Rados Gateway options
radosgw_interface: eth1 # the public interface which the radosgw talks to the world with, this variable is used in the haproxy role, this does not need to be set if haproxy is not used.

View File

@ -0,0 +1,7 @@
---
# Variables here are applicable to the current role
## Ceph options
#
cephx: true

View File

@ -0,0 +1,10 @@
---
# Variables here are applicable to the current role
## Ceph options
#
cephx: true
# Rados Gateway options
# referenced in common role too.
radosgw: true

View File

@ -0,0 +1,55 @@
---
# Variables here are applicable to the current role
#
## Ceph options
#
cephx: true
# Devices to be used as OSDs
# You can pre-provision disks that are not present yet.
# Ansible will just skip them. Newly added disk will be
# automatically configured during the next run.
#
# !! WARNING !!
#
# /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
#
# !! WARNING !!
# Declare devices
# All the scenarii inherit from the following device declaration
#
devices: [ '/dev/sdb', '/dev/sdc', '/dev/sdd', '/dev/sde']
# I. First scenario: journal and osd_data on the same device
# Use 'true' to enable this scenario
# This will collocate both journal and data on the same disk
# creating a partition at the beginning of the device
#
journal_collocation: true
# II. Second scenario: single journal device for N OSDs
# Use 'true' to enable this scenario
#
raw_journal: false
raw_journal_device: /dev/sdb
# III. Third scenario: N journal devices for N OSDs
# Use 'true' to enable this scenario
#
# In the following example:
# * sdd and sde will get sdb as a journal
# * sdf and sdg will get sdc as a journal
# While starting you have 2 options:
# 1. Pre-allocate all the devices
# 2. Progressively add new devices
raw_multi_journal: false
raw_journal_devices: [ '/dev/sdb', '/dev/sdb', '/dev/sdc', '/dev/sdc' ]

View File

@ -0,0 +1,12 @@
---
# Variables here are applicable to the current role
## Ceph options
#
cephx: true
# Rados Gateway options
redhat_distro_ceph_extra: centos6.4 # supported distros are centos6.3, centos6.4, centos6, fedora18, fedora19, opensuse12.2, rhel6.3, rhel6.4, rhel6.5, rhel6, sles11sp2
email_address: foo@bar.com