Merge pull request #580 from stpierre/generate-group-vars-sample

Generate all.sample automagically
pull/589/head
Leseb 2016-03-01 19:00:19 +01:00
commit 40c61f5431
13 changed files with 827 additions and 713 deletions

View File

@ -0,0 +1,38 @@
#!/bin/bash
set -euo pipefail
basedir=$(dirname "$0")
for role in "$basedir"/roles/ceph-*; do
rolename=$(basename "$role")
if [[ $rolename == "ceph-common" ]]; then
output="all.sample"
elif [[ $rolename == "ceph-agent" ]]; then
output="agent.sample"
elif [[ $rolename == "ceph-fetch-keys" ]]; then
continue
else
output="${rolename:5}s.sample"
fi
cat <<EOF > "$basedir"/group_vars/"$output"
---
# Variables here are applicable to all host groups NOT roles
# This sample file generated by $(basename "$0")
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
EOF
defaults="$role"/defaults/main.yml
if [[ ! -f $defaults ]]; then
continue
fi
sed '/^---/d; s/^\([A-Za-z[:space:]].\+\)/#\1/' \
"$defaults" >> "$basedir"/group_vars/"$output"
echo >> "$basedir"/group_vars/"$output"
done

View File

@ -1,12 +1,19 @@
---
# You can override vars by using host or group vars
# Variables here are applicable to all host groups NOT roles
# Dummy variable to avoid error because ansible does not recognize the file as a good configuration file when no variable in it.
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# You can override vars by using host or group vars
###########
# GENERAL #
###########
# The agent needs an agent_master_host variable defined so that it can connect
# and push information back to it
#agent_master_host: "localhost"

View File

@ -1,18 +1,23 @@
---
# Variables here are applicable to all host groups NOT roles
# Dummy variable to avoid error because ansible does not recognize the file as a good configuration file when no variable in it.
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# You can override vars by using host or group vars
###########
# GENERAL #
###########
#fetch_directory: fetch/
#########
# INSTALL
#########
###########
# INSTALL #
###########
#mon_group_name: mons
#osd_group_name: osds
@ -54,6 +59,7 @@ dummy:
#
#ceph_use_distro_backports: false # DEBIAN ONLY
# STABLE
########
@ -72,7 +78,7 @@ dummy:
# - firefly
# - giant
# - hammer
#
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
@ -85,6 +91,9 @@ dummy:
# ENTERPRISE VERSION ICE (old, prior to the 1.3)
#ceph_stable_ice: false # use Inktank Ceph Enterprise
#ceph_stable_ice_url: https://download.inktank.com/enterprise
# these two variables are used in `with_items` and starting
# with ansible 2.0 these need to be defined even if the tasks's
# `when` clause doesn't evaluate to true
#ceph_stable_ice_temp_path: /opt/ICE/ceph-repo/
#ceph_stable_ice_kmod: 3.10-0.1.20140702gitdc9ac62.el7.x86_64
#ceph_stable_ice_distro: rhel7 # Please check the download website for the supported versions.
@ -124,9 +133,10 @@ dummy:
# For rhel, please pay attention to the versions: 'rhel6 3' or 'rhel 4', the fullname is _very_ important.
#ceph_dev_redhat_distro: centos7
###############
# CONFIGURATION
###############
######################
# CEPH CONFIGURATION #
######################
## Ceph options
#
@ -140,11 +150,21 @@ dummy:
#generate_fsid: true
#cephx: true
#cephx_require_signatures: true # Kernel RBD does NOT support signatures!
#cephx_require_signatures: true # Kernel RBD does NOT support signatures for Kernels < 3.18!
#cephx_cluster_require_signatures: true
#cephx_service_require_signatures: false
#max_open_files: 131072
#disable_in_memory_logs: true
#disable_in_memory_logs: true # set this to false while enabling the options below
# Debug logs
#enable_debug_global: false
#debug_global_level: 20
#enable_debug_mon: false
#debug_mon_level: 20
#enable_debug_osd: false
#debug_osd_level: 20
#enable_debug_mds: false
#debug_mds_level: 20
## Client options
#
@ -154,7 +174,7 @@ dummy:
#rbd_client_directories: false # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions, this WON'T work if libvirt and kvm are installed
#rbd_client_log_file: /var/log/rbd-clients/qemu-guest-$pid.log # must be writable by QEMU and allowed by SELinux or AppArmor
#rbd_client_log_path: /var/log/rbd-clients/
#rbd_client_admin_socket_path: /var/run/ceph/rbd-clients/
#rbd_client_admin_socket_path: /var/run/ceph/rbd-clients # must be writable by QEMU and allowed by SELinux or AppArmor
#rbd_default_features: 3
#rbd_default_map_options: rw
#rbd_default_format: 2
@ -162,7 +182,6 @@ dummy:
## Monitor options
#
#monitor_interface: interface
#monitor_secret: "{{ monitor_keyring.stdout }}"
#mon_osd_down_out_interval: 600
#mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
#mon_clock_drift_allowed: .15
@ -182,15 +201,16 @@ dummy:
#pool_default_size: 2
#pool_default_min_size: 1
#public_network: 0.0.0.0/0
#cluster_network: {{ public_network }}
#cluster_network: "{{ public_network }}"
#osd_mkfs_type: xfs
#osd_mkfs_options_xfs: -f -i size=2048
#osd_mount_options_xfs: noatime
#osd_mount_options_xfs: noatime,largeio,inode64,swalloc
#osd_mon_heartbeat_interval: 30
# CRUSH
#pool_default_crush_rule: 0
#osd_crush_update_on_start: "true"
# Object backend
#osd_objectstore: filestore
@ -199,7 +219,7 @@ dummy:
# be set to 'true' or 'false' to explicitly override those
# defaults. Leave it 'null' to use the default for your chosen mkfs
# type.
# filestore_xattr_use_omap: null
#filestore_xattr_use_omap: null
# Performance tuning
#filestore_merge_threshold: 40
@ -228,11 +248,11 @@ dummy:
#osd_scrub_chunk_max: 5
#osd_deep_scrub_stride: 1048576
# Rados Gateway options
## Rados Gateway options
#
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
#radosgw_frontend: civetweb # supported options are 'apache' or 'civetweb', also edit roles/ceph-rgw/defaults/main.yml
#radosgw_civetweb_port: 8080
#radosgw_civetweb_port: 8080 # on Infernalis we get: "set_ports_option: cannot bind to 80: 13 (Permission denied)"
#radosgw_keystone: false # activate OpenStack Keystone options full detail here: http://ceph.com/docs/master/radosgw/keystone/
#radosgw_keystone_url: # url:admin_port ie: http://192.168.0.1:35357
#radosgw_keystone_admin_token: password
@ -253,7 +273,7 @@ dummy:
#restapi_interface: "{{ monitor_interface }}"
#restapi_port: 5000
#restapi_base_url: /api/v0.1
#restapi_log_level: warning
#restapi_log_level: warning # available level are: critical, error, warning, info, debug
## Testing mode
# enable this mode _only_ when you have a single node
@ -291,3 +311,11 @@ dummy:
# - { name: vm.zone_reclaim_mode, value: 0 }
# - { name: vm.vfs_cache_pressure, value: 50 }
# - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" }
##########
# DOCKER #
##########
#docker: false

View File

@ -1,20 +1,28 @@
---
# Variables here are applicable to all host groups NOT roles
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# You can override vars by using host or group vars
###########
# GENERAL #
###########
#cephx: true
#fetch_directory: fetch/
# Even though MDS nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on MDS nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
# copy_admin_key: false
#copy_admin_key: false
#cephx: true
##########
# DOCKER #
@ -24,3 +32,4 @@ dummy:
#ceph_mds_docker_username: ceph
#ceph_mds_docker_imagename: daemon
#ceph_mds_docker_extra_env: "MDS_NAME={{ ansible_hostname }}" # comma separated variables

View File

@ -1,14 +1,22 @@
---
# You can override vars by using host or group vars
# Variables here are applicable to all host groups NOT roles
# Dummy variable to avoid error because ansible does not recognize the file as a good configuration file when no variable in it.
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# You can override vars by using host or group vars
###########
# GENERAL #
###########
#fetch_directory: fetch/
#mon_group_name: mons
# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
#fsid: "{{ cluster_uuid.stdout }}"
#monitor_secret: "{{ monitor_keyring.stdout }}"
@ -31,33 +39,39 @@ dummy:
# - nopgchange
# - nodelete
# - nosizechange
#
#fetch_directory: fetch/
#############
# OPENSTACK #
#############
#openstack_config: false
#openstack_glance_pool: images
#openstack_cinder_pool: volumes
#openstack_nova_pool: vms
#openstack_cinder_backup_pool: backups
#
#openstack_keys:
# - { name: client.glance, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool }}'" }
# - { name: client.cinder, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool }}, allow rwx pool={{ openstack_nova_pool }}, allow rx pool={{ openstack_glance_pool }}'" }
# - { name: client.cinder-backup, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_backup_pool }}'" }
#openstack_glance_pool:
# name: images
# pg_num: "{{ pool_default_pg_num }}"
#openstack_cinder_pool:
# name: volumes
# pg_num: "{{ pool_default_pg_num }}"
#openstack_nova_pool:
# name: vms
# pg_num: "{{ pool_default_pg_num }}"
#openstack_cinder_backup_pool:
# name: backups
# pg_num: "{{ pool_default_pg_num }}"
#openstack_keys:
# - { name: client.glance, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}'" }
# - { name: client.cinder, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rx pool={{ openstack_glance_pool.name }}'" }
# - { name: client.cinder-backup, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_backup_pool.name }}'" }
##########
# DOCKER #
##########
#mon_containerized_deployment: false
#ceph_mon_docker_username: ceph
#ceph_mon_docker_imagename: "daemon"
#ceph_mon_docker_interface: eth0
#ceph_mon_docker_subnet: # subnet of the ceph_mon_docker_interface
#ceph_mon_docker_username: ceph
#ceph_mon_docker_imagename: daemon
#ceph_mon_extra_envs: "MON_NAME={{ ansible_hostname }}" # comma separated variables

View File

@ -1,10 +1,15 @@
---
# Variables here are applicable to all host groups NOT roles
#
# Dummy variable to avoid error because ansible does not recognize the file as a good configuration file when no variable in it.
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# You can override default vars defined in defaults/main.yml here,
# but I would advice to use host or group vars instead
###########
# GENERAL #
###########
@ -32,13 +37,15 @@ dummy:
# [osds]
# osd0 ceph_crush_root=foo ceph_crush_rack=bar
crush_location: false
osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"
#crush_location: false
#osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"
##############
# CEPH OPTIONS
##############
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
#fsid: "{{ cluster_uuid.stdout }}"
#cephx: true
# Devices to be used as OSDs
@ -60,8 +67,6 @@ osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host
#devices:
# - /dev/sdb
# - /dev/sdc
# - /dev/sdd
# - /dev/sde
# Device discovery is based on the Ansible fact 'ansible_devices'
# which reports all the devices on a system. If chosen all the disks
@ -102,7 +107,7 @@ osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host
# - /dev/sdb
# - /dev/sdc
# - /dev/sdc
#
# IV. Fourth scenario: use directory instead of disk for OSDs
# Use 'true' to enable this scenario
@ -111,8 +116,6 @@ osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host
#osd_directories:
# - /var/lib/ceph/osd/mydir1
# - /var/lib/ceph/osd/mydir2
# - /var/lib/ceph/osd/mydir3
# - /var/lib/ceph/osd/mydir4
##########
@ -125,3 +128,4 @@ osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host
#ceph_osd_docker_extra_env: "CEPH_DAEMON=OSD_CEPH_DISK" # comma separated variables
#ceph_osd_docker_devices:
# - /dev/sdb

View File

@ -1,9 +1,13 @@
---
# Variables here are applicable to all host groups NOT roles
# Dummy variable to avoid error because ansible does not recognize the file as a good configuration file when no variable in it.
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
###########
# GENERAL #
###########
@ -20,3 +24,4 @@ dummy:
#ceph_restapi_docker_username: ceph
#ceph_restapi_docker_imagename: daemon
#ceph_restapi_docker_extra_env: "RESTAPI_IP=0.0.0.0" # comma separated variables

View File

@ -1,33 +1,42 @@
---
# Variables here are applicable to all host groups NOT roles
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# You can override vars by using host or group vars
###########
# GENERAL #
###########
## Ceph options
#
#cephx: true
#fetch_directory: fetch/
# Even though RGW nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
# copy_admin_key: false
#copy_admin_key: false
## Ceph options
#
#cephx: true
# Used for the sudo exception while starting the radosgw process
# a new entry /etc/sudoers.d/ceph will be created
# allowing root to not require tty
#radosgw_user: root
#fetch_directory: fetch/
##########
# DOCKER #
##########
#rgw_containerized_deployment: false
#ceph_rgw_civetweb_port: 80
#ceph_rgw_docker_username: ceph
#ceph_rgw_docker_imagename: daemon
#ceph_rgw_civetweb_port: 80
#ceph_rgw_docker_extra_env: "RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}" # comma separated variables