Generate group_vars samples automagically

This adds a script, generate_group_vars_sample.sh, that generates
group_vars/*.sample from roles/ceph-*/defaults/main.yml to avoid
discrepancies between the sets of files. It also converts the line
endings in the various main.yml from DOS to Unix, since generating the
samples was spreading the line ending plague around to more files.
pull/580/head
Chris St. Pierre 2016-02-29 09:35:07 -06:00
parent f9c3159e0d
commit c4a9b1020f
13 changed files with 827 additions and 713 deletions

View File

@ -0,0 +1,38 @@
#!/bin/bash
set -euo pipefail
basedir=$(dirname "$0")
for role in "$basedir"/roles/ceph-*; do
rolename=$(basename "$role")
if [[ $rolename == "ceph-common" ]]; then
output="all.sample"
elif [[ $rolename == "ceph-agent" ]]; then
output="agent.sample"
elif [[ $rolename == "ceph-fetch-keys" ]]; then
continue
else
output="${rolename:5}s.sample"
fi
cat <<EOF > "$basedir"/group_vars/"$output"
---
# Variables here are applicable to all host groups NOT roles
# This sample file generated by $(basename "$0")
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
EOF
defaults="$role"/defaults/main.yml
if [[ ! -f $defaults ]]; then
continue
fi
sed '/^---/d; s/^\([A-Za-z[:space:]].\+\)/#\1/' \
"$defaults" >> "$basedir"/group_vars/"$output"
echo >> "$basedir"/group_vars/"$output"
done

View File

@ -1,12 +1,19 @@
---
# You can override vars by using host or group vars
# Variables here are applicable to all host groups NOT roles
# Dummy variable to avoid error because ansible does not recognize the file as a good configuration file when no variable in it.
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# You can override vars by using host or group vars
###########
# GENERAL #
###########
# The agent needs an agent_master_host variable defined so that it can connect
# and push information back to it
#agent_master_host: "localhost"

View File

@ -1,18 +1,23 @@
---
# Variables here are applicable to all host groups NOT roles
# Dummy variable to avoid error because ansible does not recognize the file as a good configuration file when no variable in it.
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# You can override vars by using host or group vars
###########
# GENERAL #
###########
#fetch_directory: fetch/
#########
# INSTALL
#########
###########
# INSTALL #
###########
#mon_group_name: mons
#osd_group_name: osds
@ -54,6 +59,7 @@ dummy:
#
#ceph_use_distro_backports: false # DEBIAN ONLY
# STABLE
########
@ -72,7 +78,7 @@ dummy:
# - firefly
# - giant
# - hammer
#
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
@ -85,6 +91,9 @@ dummy:
# ENTERPRISE VERSION ICE (old, prior to the 1.3)
#ceph_stable_ice: false # use Inktank Ceph Enterprise
#ceph_stable_ice_url: https://download.inktank.com/enterprise
# these two variables are used in `with_items` and starting
# with ansible 2.0 these need to be defined even if the tasks's
# `when` clause doesn't evaluate to true
#ceph_stable_ice_temp_path: /opt/ICE/ceph-repo/
#ceph_stable_ice_kmod: 3.10-0.1.20140702gitdc9ac62.el7.x86_64
#ceph_stable_ice_distro: rhel7 # Please check the download website for the supported versions.
@ -124,9 +133,10 @@ dummy:
# For rhel, please pay attention to the versions: 'rhel6 3' or 'rhel 4', the fullname is _very_ important.
#ceph_dev_redhat_distro: centos7
###############
# CONFIGURATION
###############
######################
# CEPH CONFIGURATION #
######################
## Ceph options
#
@ -140,11 +150,21 @@ dummy:
#generate_fsid: true
#cephx: true
#cephx_require_signatures: true # Kernel RBD does NOT support signatures!
#cephx_require_signatures: true # Kernel RBD does NOT support signatures for Kernels < 3.18!
#cephx_cluster_require_signatures: true
#cephx_service_require_signatures: false
#max_open_files: 131072
#disable_in_memory_logs: true
#disable_in_memory_logs: true # set this to false while enabling the options below
# Debug logs
#enable_debug_global: false
#debug_global_level: 20
#enable_debug_mon: false
#debug_mon_level: 20
#enable_debug_osd: false
#debug_osd_level: 20
#enable_debug_mds: false
#debug_mds_level: 20
## Client options
#
@ -154,7 +174,7 @@ dummy:
#rbd_client_directories: false # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions, this WON'T work if libvirt and kvm are installed
#rbd_client_log_file: /var/log/rbd-clients/qemu-guest-$pid.log # must be writable by QEMU and allowed by SELinux or AppArmor
#rbd_client_log_path: /var/log/rbd-clients/
#rbd_client_admin_socket_path: /var/run/ceph/rbd-clients/
#rbd_client_admin_socket_path: /var/run/ceph/rbd-clients # must be writable by QEMU and allowed by SELinux or AppArmor
#rbd_default_features: 3
#rbd_default_map_options: rw
#rbd_default_format: 2
@ -162,7 +182,6 @@ dummy:
## Monitor options
#
#monitor_interface: interface
#monitor_secret: "{{ monitor_keyring.stdout }}"
#mon_osd_down_out_interval: 600
#mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
#mon_clock_drift_allowed: .15
@ -182,15 +201,16 @@ dummy:
#pool_default_size: 2
#pool_default_min_size: 1
#public_network: 0.0.0.0/0
#cluster_network: {{ public_network }}
#cluster_network: "{{ public_network }}"
#osd_mkfs_type: xfs
#osd_mkfs_options_xfs: -f -i size=2048
#osd_mount_options_xfs: noatime
#osd_mount_options_xfs: noatime,largeio,inode64,swalloc
#osd_mon_heartbeat_interval: 30
# CRUSH
#pool_default_crush_rule: 0
#osd_crush_update_on_start: "true"
# Object backend
#osd_objectstore: filestore
@ -199,7 +219,7 @@ dummy:
# be set to 'true' or 'false' to explicitly override those
# defaults. Leave it 'null' to use the default for your chosen mkfs
# type.
# filestore_xattr_use_omap: null
#filestore_xattr_use_omap: null
# Performance tuning
#filestore_merge_threshold: 40
@ -228,11 +248,11 @@ dummy:
#osd_scrub_chunk_max: 5
#osd_deep_scrub_stride: 1048576
# Rados Gateway options
## Rados Gateway options
#
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
#radosgw_frontend: civetweb # supported options are 'apache' or 'civetweb', also edit roles/ceph-rgw/defaults/main.yml
#radosgw_civetweb_port: 8080
#radosgw_civetweb_port: 8080 # on Infernalis we get: "set_ports_option: cannot bind to 80: 13 (Permission denied)"
#radosgw_keystone: false # activate OpenStack Keystone options full detail here: http://ceph.com/docs/master/radosgw/keystone/
#radosgw_keystone_url: # url:admin_port ie: http://192.168.0.1:35357
#radosgw_keystone_admin_token: password
@ -253,7 +273,7 @@ dummy:
#restapi_interface: "{{ monitor_interface }}"
#restapi_port: 5000
#restapi_base_url: /api/v0.1
#restapi_log_level: warning
#restapi_log_level: warning # available level are: critical, error, warning, info, debug
## Testing mode
# enable this mode _only_ when you have a single node
@ -291,3 +311,11 @@ dummy:
# - { name: vm.zone_reclaim_mode, value: 0 }
# - { name: vm.vfs_cache_pressure, value: 50 }
# - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" }
##########
# DOCKER #
##########
#docker: false

View File

@ -1,26 +1,35 @@
---
dummy:
###########
# GENERAL #
###########
#cephx: true
#fetch_directory: fetch/
# Even though MDS nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on MDS nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
# copy_admin_key: false
##########
# DOCKER #
##########
#mds_containerized_deployment: false
#ceph_mds_docker_username: ceph
#ceph_mds_docker_imagename: daemon
#ceph_mds_docker_extra_env: "MDS_NAME={{ ansible_hostname }}" # comma separated variables
---
# Variables here are applicable to all host groups NOT roles
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# You can override vars by using host or group vars
###########
# GENERAL #
###########
#fetch_directory: fetch/
# Even though MDS nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on MDS nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
#copy_admin_key: false
#cephx: true
##########
# DOCKER #
##########
#mds_containerized_deployment: false
#ceph_mds_docker_username: ceph
#ceph_mds_docker_imagename: daemon
#ceph_mds_docker_extra_env: "MDS_NAME={{ ansible_hostname }}" # comma separated variables

View File

@ -1,63 +1,77 @@
---
# You can override vars by using host or group vars
# Variables here are applicable to all host groups NOT roles
# Dummy variable to avoid error because ansible does not recognize the file as a good configuration file when no variable in it.
dummy:
###########
# GENERAL #
###########
# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
#fsid: "{{ cluster_uuid.stdout }}"
#monitor_secret: "{{ monitor_keyring.stdout }}"
#cephx: true
# CephFS
#pool_default_pg_num: 128
#cephfs_data: cephfs_data
#cephfs_metadata: cephfs_metadata
#cephfs: cephfs
# Secure your cluster
# This will set the following flags on all the pools:
# * nosizechange
# * nopgchange
# * nodelete
#secure_cluster: false
#secure_cluster_flags:
# - nopgchange
# - nodelete
# - nosizechange
#
#fetch_directory: fetch/
#############
# OPENSTACK #
#############
#openstack_config: false
#openstack_glance_pool: images
#openstack_cinder_pool: volumes
#openstack_nova_pool: vms
#openstack_cinder_backup_pool: backups
#
#openstack_keys:
# - { name: client.glance, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool }}'" }
# - { name: client.cinder, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool }}, allow rwx pool={{ openstack_nova_pool }}, allow rx pool={{ openstack_glance_pool }}'" }
# - { name: client.cinder-backup, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_backup_pool }}'" }
##########
# DOCKER #
##########
#mon_containerized_deployment: false
#ceph_mon_docker_username: ceph
#ceph_mon_docker_imagename: "daemon"
#ceph_mon_docker_interface: eth0
#ceph_mon_docker_subnet: # subnet of the ceph_mon_docker_interface
#ceph_mon_extra_envs: "MON_NAME={{ ansible_hostname }}" # comma separated variables
---
# Variables here are applicable to all host groups NOT roles
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# You can override vars by using host or group vars
###########
# GENERAL #
###########
#fetch_directory: fetch/
#mon_group_name: mons
# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
#fsid: "{{ cluster_uuid.stdout }}"
#monitor_secret: "{{ monitor_keyring.stdout }}"
#cephx: true
# CephFS
#pool_default_pg_num: 128
#cephfs_data: cephfs_data
#cephfs_metadata: cephfs_metadata
#cephfs: cephfs
# Secure your cluster
# This will set the following flags on all the pools:
# * nosizechange
# * nopgchange
# * nodelete
#secure_cluster: false
#secure_cluster_flags:
# - nopgchange
# - nodelete
# - nosizechange
#############
# OPENSTACK #
#############
#openstack_config: false
#openstack_glance_pool:
# name: images
# pg_num: "{{ pool_default_pg_num }}"
#openstack_cinder_pool:
# name: volumes
# pg_num: "{{ pool_default_pg_num }}"
#openstack_nova_pool:
# name: vms
# pg_num: "{{ pool_default_pg_num }}"
#openstack_cinder_backup_pool:
# name: backups
# pg_num: "{{ pool_default_pg_num }}"
#openstack_keys:
# - { name: client.glance, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}'" }
# - { name: client.cinder, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rx pool={{ openstack_glance_pool.name }}'" }
# - { name: client.cinder-backup, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_backup_pool.name }}'" }
##########
# DOCKER #
##########
#mon_containerized_deployment: false
#ceph_mon_docker_interface: eth0
#ceph_mon_docker_subnet: # subnet of the ceph_mon_docker_interface
#ceph_mon_docker_username: ceph
#ceph_mon_docker_imagename: daemon
#ceph_mon_extra_envs: "MON_NAME={{ ansible_hostname }}" # comma separated variables

View File

@ -1,10 +1,15 @@
---
# Variables here are applicable to all host groups NOT roles
#
# Dummy variable to avoid error because ansible does not recognize the file as a good configuration file when no variable in it.
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# You can override default vars defined in defaults/main.yml here,
# but I would advice to use host or group vars instead
###########
# GENERAL #
###########
@ -32,13 +37,15 @@ dummy:
# [osds]
# osd0 ceph_crush_root=foo ceph_crush_rack=bar
crush_location: false
osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"
#crush_location: false
#osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"
##############
# CEPH OPTIONS
##############
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
#fsid: "{{ cluster_uuid.stdout }}"
#cephx: true
# Devices to be used as OSDs
@ -60,8 +67,6 @@ osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host
#devices:
# - /dev/sdb
# - /dev/sdc
# - /dev/sdd
# - /dev/sde
# Device discovery is based on the Ansible fact 'ansible_devices'
# which reports all the devices on a system. If chosen all the disks
@ -102,7 +107,7 @@ osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host
# - /dev/sdb
# - /dev/sdc
# - /dev/sdc
#
# IV. Fourth scenario: use directory instead of disk for OSDs
# Use 'true' to enable this scenario
@ -111,8 +116,6 @@ osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host
#osd_directories:
# - /var/lib/ceph/osd/mydir1
# - /var/lib/ceph/osd/mydir2
# - /var/lib/ceph/osd/mydir3
# - /var/lib/ceph/osd/mydir4
##########
@ -125,3 +128,4 @@ osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host
#ceph_osd_docker_extra_env: "CEPH_DAEMON=OSD_CEPH_DISK" # comma separated variables
#ceph_osd_docker_devices:
# - /dev/sdb

View File

@ -1,9 +1,13 @@
---
# Variables here are applicable to all host groups NOT roles
# Dummy variable to avoid error because ansible does not recognize the file as a good configuration file when no variable in it.
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
###########
# GENERAL #
###########
@ -20,3 +24,4 @@ dummy:
#ceph_restapi_docker_username: ceph
#ceph_restapi_docker_imagename: daemon
#ceph_restapi_docker_extra_env: "RESTAPI_IP=0.0.0.0" # comma separated variables

View File

@ -1,33 +1,42 @@
---
dummy:
###########
# GENERAL #
###########
## Ceph options
#
#cephx: true
# Even though RGW nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
# copy_admin_key: false
# Used for the sudo exception while starting the radosgw process
# a new entry /etc/sudoers.d/ceph will be created
# allowing root to not require tty
#radosgw_user: root
#fetch_directory: fetch/
##########
# DOCKER #
##########
#rgw_containerized_deployment: false
#ceph_rgw_docker_username: ceph
#ceph_rgw_docker_imagename: daemon
#ceph_rgw_civetweb_port: 80
#ceph_rgw_docker_extra_env: "RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}" # comma separated variables
---
# Variables here are applicable to all host groups NOT roles
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# You can override vars by using host or group vars
###########
# GENERAL #
###########
#fetch_directory: fetch/
# Even though RGW nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
#copy_admin_key: false
## Ceph options
#
#cephx: true
# Used for the sudo exception while starting the radosgw process
# a new entry /etc/sudoers.d/ceph will be created
# allowing root to not require tty
#radosgw_user: root
##########
# DOCKER #
##########
#rgw_containerized_deployment: false
#ceph_rgw_civetweb_port: 80
#ceph_rgw_docker_username: ceph
#ceph_rgw_docker_imagename: daemon
#ceph_rgw_docker_extra_env: "RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}" # comma separated variables

View File

@ -1,312 +1,312 @@
---
# You can override vars by using host or group vars
###########
# GENERAL #
###########
fetch_directory: fetch/
###########
# INSTALL #
###########
mon_group_name: mons
osd_group_name: osds
rgw_group_name: rgws
mds_group_name: mdss
restapi_group_name: restapis
# If check_firewall is true, then ansible will try to determine if the
# Ceph ports are blocked by a firewall. If the machine running ansible
# cannot reach the Ceph ports for some other reason, you may need or
# want to set this to False to skip those checks.
check_firewall: True
# This variable determines if ceph packages can be updated. If False, the
# package resources will use "state=present". If True, they will use
# "state=latest".
upgrade_ceph_packages: False
# /!\ EITHER ACTIVE ceph_stable OR ceph_stable_ice OR ceph_dev /!\
debian_package_dependencies:
- python-pycurl
- hdparm
- ntp
redhat_package_dependencies:
- python-pycurl
- hdparm
- yum-plugin-priorities.noarch
- epel-release
- ntp
- python-setuptools
## Configure package origin
#
ceph_origin: 'upstream' # or 'distro'
# 'distro' means that no separate repo file will be added
# you will get whatever version of Ceph is included in your Linux distro.
#
ceph_use_distro_backports: false # DEBIAN ONLY
# STABLE
########
# COMMUNITY VERSION
ceph_stable: false # use ceph stable branch
ceph_stable_key: https://download.ceph.com/keys/release.asc
ceph_stable_release: infernalis # ceph stable release
ceph_stable_repo: "http://ceph.com/debian-{{ ceph_stable_release }}"
###################
# Stable Releases #
###################
ceph_stable_releases:
- dumpling
- emperor
- firefly
- giant
- hammer
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source:
# This option is needed for _both_ stable and dev version, so please always fill the right version
# # for supported distros, see http://ceph.com/rpm-{{ ceph_stable_release }}/
ceph_stable_redhat_distro: el7
# ENTERPRISE VERSION ICE (old, prior to the 1.3)
ceph_stable_ice: false # use Inktank Ceph Enterprise
#ceph_stable_ice_url: https://download.inktank.com/enterprise
# these two variables are used in `with_items` and starting
# with ansible 2.0 these need to be defined even if the tasks's
# `when` clause doesn't evaluate to true
ceph_stable_ice_temp_path: /opt/ICE/ceph-repo/
ceph_stable_ice_kmod: 3.10-0.1.20140702gitdc9ac62.el7.x86_64
#ceph_stable_ice_distro: rhel7 # Please check the download website for the supported versions.
#ceph_stable_ice_version: 1.2.2
#ceph_stable_ice_kmod_version: 1.2
#ceph_stable_ice_user: # htaccess user
#ceph_stable_ice_password: # htaccess password
# ENTERPRISE VERSION RED HAT STORAGE (from 1.3)
# This version is only supported on RHEL 7.1
# As of RHEL 7.1, libceph.ko and rbd.ko are now included in Red Hat's kernel
# packages natively. The RHEL 7.1 kernel packages are more stable and secure than
# using these 3rd-party kmods with RHEL 7.0. Please update your systems to RHEL
# 7.1 or later if you want to use the kernel RBD client.
#
# The CephFS kernel client is undergoing rapid development upstream, and we do
# not recommend running the CephFS kernel module on RHEL 7's 3.10 kernel at this
# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS
# on RHEL 7.
#
ceph_stable_rh_storage: false
ceph_stable_rh_storage_cdn_install: false # assumes all the nodes can connect to cdn.redhat.com
ceph_stable_rh_storage_iso_install: false # usually used when nodes don't have access to cdn.redhat.com
#ceph_stable_rh_storage_iso_path:
ceph_stable_rh_storage_mount_path: /tmp/rh-storage-mount
ceph_stable_rh_storage_repository_path: /tmp/rh-storage-repo # where to copy iso's content
# DEV
# ###
ceph_dev: false # use ceph development branch
ceph_dev_key: https://download.ceph.com/keys/autobuild.asc
ceph_dev_branch: master # development branch you would like to use e.g: master, wip-hack
# supported distros are centos6, centos7, fc17, fc18, fc19, fc20, fedora17, fedora18,
# fedora19, fedora20, opensuse12, sles0. (see http://gitbuilder.ceph.com/).
# For rhel, please pay attention to the versions: 'rhel6 3' or 'rhel 4', the fullname is _very_ important.
ceph_dev_redhat_distro: centos7
######################
# CEPH CONFIGURATION #
######################
## Ceph options
#
# Each cluster requires a unique, consistent filesystem ID. By
# default, the playbook generates one for you and stores it in a file
# in `fetch_directory`. If you want to customize how the fsid is
# generated, you may find it useful to disable fsid generation to
# avoid cluttering up your ansible repo. If you set `generate_fsid` to
# false, you *must* generate `fsid` in another way.
fsid: "{{ cluster_uuid.stdout }}"
generate_fsid: true
cephx: true
cephx_require_signatures: true # Kernel RBD does NOT support signatures for Kernels < 3.18!
cephx_cluster_require_signatures: true
cephx_service_require_signatures: false
max_open_files: 131072
disable_in_memory_logs: true # set this to false while enabling the options below
# Debug logs
enable_debug_global: false
debug_global_level: 20
enable_debug_mon: false
debug_mon_level: 20
enable_debug_osd: false
debug_osd_level: 20
enable_debug_mds: false
debug_mds_level: 20
## Client options
#
rbd_cache: "true"
rbd_cache_writethrough_until_flush: "true"
rbd_concurrent_management_ops: 20
rbd_client_directories: false # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions, this WON'T work if libvirt and kvm are installed
rbd_client_log_file: /var/log/rbd-clients/qemu-guest-$pid.log # must be writable by QEMU and allowed by SELinux or AppArmor
rbd_client_log_path: /var/log/rbd-clients/
rbd_client_admin_socket_path: /var/run/ceph/rbd-clients # must be writable by QEMU and allowed by SELinux or AppArmor
rbd_default_features: 3
rbd_default_map_options: rw
rbd_default_format: 2
## Monitor options
#
monitor_interface: interface
mon_osd_down_out_interval: 600
mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
mon_clock_drift_allowed: .15
mon_clock_drift_warn_backoff: 30
mon_osd_full_ratio: .95
mon_osd_nearfull_ratio: .85
mon_osd_report_timeout: 300
mon_pg_warn_max_per_osd: 0 # disable complains about low pgs numbers per osd
mon_osd_allow_primary_affinity: "true"
mon_pg_warn_max_object_skew: 10 # set to 20 or higher to disable complaints about number of PGs being too low if some pools have very few objects bringing down the average number of objects per pool. This happens when running RadosGW. Ceph default is 10
## OSD options
#
journal_size: 0
pool_default_pg_num: 128
pool_default_pgp_num: 128
pool_default_size: 2
pool_default_min_size: 1
public_network: 0.0.0.0/0
cluster_network: "{{ public_network }}"
osd_mkfs_type: xfs
osd_mkfs_options_xfs: -f -i size=2048
osd_mount_options_xfs: noatime,largeio,inode64,swalloc
osd_mon_heartbeat_interval: 30
# CRUSH
pool_default_crush_rule: 0
osd_crush_update_on_start: "true"
# Object backend
osd_objectstore: filestore
# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can
# be set to 'true' or 'false' to explicitly override those
# defaults. Leave it 'null' to use the default for your chosen mkfs
# type.
filestore_xattr_use_omap: null
# Performance tuning
filestore_merge_threshold: 40
filestore_split_multiple: 8
osd_op_threads: 8
filestore_op_threads: 8
filestore_max_sync_interval: 5
osd_max_scrubs: 1
# The OSD scrub window can be configured starting hammer only!
# Default settings will define a 24h window for the scrubbing operation
# The window is predefined from 0am midnight to midnight the next day.
osd_scrub_begin_hour: 0
osd_scrub_end_hour: 24
# Recovery tuning
osd_recovery_max_active: 5
osd_max_backfills: 2
osd_recovery_op_priority: 2
osd_recovery_max_chunk: 1048576
osd_recovery_threads: 1
# Deep scrub
osd_scrub_sleep: .1
osd_disk_thread_ioprio_class: idle
osd_disk_thread_ioprio_priority: 0
osd_scrub_chunk_max: 5
osd_deep_scrub_stride: 1048576
## Rados Gateway options
#
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
radosgw_frontend: civetweb # supported options are 'apache' or 'civetweb', also edit roles/ceph-rgw/defaults/main.yml
radosgw_civetweb_port: 8080 # on Infernalis we get: "set_ports_option: cannot bind to 80: 13 (Permission denied)"
radosgw_keystone: false # activate OpenStack Keystone options full detail here: http://ceph.com/docs/master/radosgw/keystone/
#radosgw_keystone_url: # url:admin_port ie: http://192.168.0.1:35357
radosgw_keystone_admin_token: password
radosgw_keystone_accepted_roles: Member, _member_, admin
radosgw_keystone_token_cache_size: 10000
radosgw_keystone_revocation_internal: 900
radosgw_s3_auth_use_keystone: "true"
radosgw_nss_db_path: /var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_hostname }}/nss
# Toggle 100-continue support for Apache and FastCGI
# WARNING: Changing this value will cause an outage of Apache while it is reinstalled on RGW nodes
http_100_continue: false
# Rados Gateway options
redhat_distro_ceph_extra: centos6.4 # supported distros are centos6.3, centos6.4, centos6, fedora18, fedora19, opensuse12.2, rhel6.3, rhel6.4, rhel6.5, rhel6, sles11sp2
email_address: foo@bar.com
## REST API options
#
restapi_interface: "{{ monitor_interface }}"
restapi_port: 5000
restapi_base_url: /api/v0.1
restapi_log_level: warning # available level are: critical, error, warning, info, debug
## Testing mode
# enable this mode _only_ when you have a single node
# if you don't want it keep the option commented
#common_single_host_mode: true
###################
# CONFIG OVERRIDE #
###################
# Ceph configuration file override.
# This allows you to specify more configuration options
# using an INI style format.
# The following sections are supported: [global], [mon], [osd], [mds], [rgw]
#
# Example:
# ceph_conf_overrides:
# global:
# foo: 1234
# bar: 5678
#
ceph_conf_overrides: {}
#############
# OS TUNING #
#############
disable_transparent_hugepage: true
disable_swap: true
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
- { name: vm.zone_reclaim_mode, value: 0 }
- { name: vm.vfs_cache_pressure, value: 50 }
- { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" }
##########
# DOCKER #
##########
docker: false
---
# You can override vars by using host or group vars
###########
# GENERAL #
###########
fetch_directory: fetch/
###########
# INSTALL #
###########
mon_group_name: mons
osd_group_name: osds
rgw_group_name: rgws
mds_group_name: mdss
restapi_group_name: restapis
# If check_firewall is true, then ansible will try to determine if the
# Ceph ports are blocked by a firewall. If the machine running ansible
# cannot reach the Ceph ports for some other reason, you may need or
# want to set this to False to skip those checks.
check_firewall: True
# This variable determines if ceph packages can be updated. If False, the
# package resources will use "state=present". If True, they will use
# "state=latest".
upgrade_ceph_packages: False
# /!\ EITHER ACTIVE ceph_stable OR ceph_stable_ice OR ceph_dev /!\
debian_package_dependencies:
- python-pycurl
- hdparm
- ntp
redhat_package_dependencies:
- python-pycurl
- hdparm
- yum-plugin-priorities.noarch
- epel-release
- ntp
- python-setuptools
## Configure package origin
#
ceph_origin: 'upstream' # or 'distro'
# 'distro' means that no separate repo file will be added
# you will get whatever version of Ceph is included in your Linux distro.
#
ceph_use_distro_backports: false # DEBIAN ONLY
# STABLE
########
# COMMUNITY VERSION
ceph_stable: false # use ceph stable branch
ceph_stable_key: https://download.ceph.com/keys/release.asc
ceph_stable_release: infernalis # ceph stable release
ceph_stable_repo: "http://ceph.com/debian-{{ ceph_stable_release }}"
###################
# Stable Releases #
###################
ceph_stable_releases:
- dumpling
- emperor
- firefly
- giant
- hammer
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source:
# This option is needed for _both_ stable and dev version, so please always fill the right version
# # for supported distros, see http://ceph.com/rpm-{{ ceph_stable_release }}/
ceph_stable_redhat_distro: el7
# ENTERPRISE VERSION ICE (old, prior to the 1.3)
ceph_stable_ice: false # use Inktank Ceph Enterprise
#ceph_stable_ice_url: https://download.inktank.com/enterprise
# these two variables are used in `with_items` and starting
# with ansible 2.0 these need to be defined even if the tasks's
# `when` clause doesn't evaluate to true
ceph_stable_ice_temp_path: /opt/ICE/ceph-repo/
ceph_stable_ice_kmod: 3.10-0.1.20140702gitdc9ac62.el7.x86_64
#ceph_stable_ice_distro: rhel7 # Please check the download website for the supported versions.
#ceph_stable_ice_version: 1.2.2
#ceph_stable_ice_kmod_version: 1.2
#ceph_stable_ice_user: # htaccess user
#ceph_stable_ice_password: # htaccess password
# ENTERPRISE VERSION RED HAT STORAGE (from 1.3)
# This version is only supported on RHEL 7.1
# As of RHEL 7.1, libceph.ko and rbd.ko are now included in Red Hat's kernel
# packages natively. The RHEL 7.1 kernel packages are more stable and secure than
# using these 3rd-party kmods with RHEL 7.0. Please update your systems to RHEL
# 7.1 or later if you want to use the kernel RBD client.
#
# The CephFS kernel client is undergoing rapid development upstream, and we do
# not recommend running the CephFS kernel module on RHEL 7's 3.10 kernel at this
# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS
# on RHEL 7.
#
ceph_stable_rh_storage: false
ceph_stable_rh_storage_cdn_install: false # assumes all the nodes can connect to cdn.redhat.com
ceph_stable_rh_storage_iso_install: false # usually used when nodes don't have access to cdn.redhat.com
#ceph_stable_rh_storage_iso_path:
ceph_stable_rh_storage_mount_path: /tmp/rh-storage-mount
ceph_stable_rh_storage_repository_path: /tmp/rh-storage-repo # where to copy iso's content
# DEV
# ###
ceph_dev: false # use ceph development branch
ceph_dev_key: https://download.ceph.com/keys/autobuild.asc
ceph_dev_branch: master # development branch you would like to use e.g: master, wip-hack
# supported distros are centos6, centos7, fc17, fc18, fc19, fc20, fedora17, fedora18,
# fedora19, fedora20, opensuse12, sles0. (see http://gitbuilder.ceph.com/).
# For rhel, please pay attention to the versions: 'rhel6 3' or 'rhel 4', the fullname is _very_ important.
ceph_dev_redhat_distro: centos7
######################
# CEPH CONFIGURATION #
######################
## Ceph options
#
# Each cluster requires a unique, consistent filesystem ID. By
# default, the playbook generates one for you and stores it in a file
# in `fetch_directory`. If you want to customize how the fsid is
# generated, you may find it useful to disable fsid generation to
# avoid cluttering up your ansible repo. If you set `generate_fsid` to
# false, you *must* generate `fsid` in another way.
fsid: "{{ cluster_uuid.stdout }}"
generate_fsid: true
cephx: true
cephx_require_signatures: true # Kernel RBD does NOT support signatures for Kernels < 3.18!
cephx_cluster_require_signatures: true
cephx_service_require_signatures: false
max_open_files: 131072
disable_in_memory_logs: true # set this to false while enabling the options below
# Debug logs
enable_debug_global: false
debug_global_level: 20
enable_debug_mon: false
debug_mon_level: 20
enable_debug_osd: false
debug_osd_level: 20
enable_debug_mds: false
debug_mds_level: 20
## Client options
#
rbd_cache: "true"
rbd_cache_writethrough_until_flush: "true"
rbd_concurrent_management_ops: 20
rbd_client_directories: false # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions, this WON'T work if libvirt and kvm are installed
rbd_client_log_file: /var/log/rbd-clients/qemu-guest-$pid.log # must be writable by QEMU and allowed by SELinux or AppArmor
rbd_client_log_path: /var/log/rbd-clients/
rbd_client_admin_socket_path: /var/run/ceph/rbd-clients # must be writable by QEMU and allowed by SELinux or AppArmor
rbd_default_features: 3
rbd_default_map_options: rw
rbd_default_format: 2
## Monitor options
#
monitor_interface: interface
mon_osd_down_out_interval: 600
mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
mon_clock_drift_allowed: .15
mon_clock_drift_warn_backoff: 30
mon_osd_full_ratio: .95
mon_osd_nearfull_ratio: .85
mon_osd_report_timeout: 300
mon_pg_warn_max_per_osd: 0 # disable complains about low pgs numbers per osd
mon_osd_allow_primary_affinity: "true"
mon_pg_warn_max_object_skew: 10 # set to 20 or higher to disable complaints about number of PGs being too low if some pools have very few objects bringing down the average number of objects per pool. This happens when running RadosGW. Ceph default is 10
## OSD options
#
journal_size: 0
pool_default_pg_num: 128
pool_default_pgp_num: 128
pool_default_size: 2
pool_default_min_size: 1
public_network: 0.0.0.0/0
cluster_network: "{{ public_network }}"
osd_mkfs_type: xfs
osd_mkfs_options_xfs: -f -i size=2048
osd_mount_options_xfs: noatime,largeio,inode64,swalloc
osd_mon_heartbeat_interval: 30
# CRUSH
pool_default_crush_rule: 0
osd_crush_update_on_start: "true"
# Object backend
osd_objectstore: filestore
# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can
# be set to 'true' or 'false' to explicitly override those
# defaults. Leave it 'null' to use the default for your chosen mkfs
# type.
filestore_xattr_use_omap: null
# Performance tuning
filestore_merge_threshold: 40
filestore_split_multiple: 8
osd_op_threads: 8
filestore_op_threads: 8
filestore_max_sync_interval: 5
osd_max_scrubs: 1
# The OSD scrub window can be configured starting hammer only!
# Default settings will define a 24h window for the scrubbing operation
# The window is predefined from 0am midnight to midnight the next day.
osd_scrub_begin_hour: 0
osd_scrub_end_hour: 24
# Recovery tuning
osd_recovery_max_active: 5
osd_max_backfills: 2
osd_recovery_op_priority: 2
osd_recovery_max_chunk: 1048576
osd_recovery_threads: 1
# Deep scrub
osd_scrub_sleep: .1
osd_disk_thread_ioprio_class: idle
osd_disk_thread_ioprio_priority: 0
osd_scrub_chunk_max: 5
osd_deep_scrub_stride: 1048576
## Rados Gateway options
#
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
radosgw_frontend: civetweb # supported options are 'apache' or 'civetweb', also edit roles/ceph-rgw/defaults/main.yml
radosgw_civetweb_port: 8080 # on Infernalis we get: "set_ports_option: cannot bind to 80: 13 (Permission denied)"
radosgw_keystone: false # activate OpenStack Keystone options full detail here: http://ceph.com/docs/master/radosgw/keystone/
#radosgw_keystone_url: # url:admin_port ie: http://192.168.0.1:35357
radosgw_keystone_admin_token: password
radosgw_keystone_accepted_roles: Member, _member_, admin
radosgw_keystone_token_cache_size: 10000
radosgw_keystone_revocation_internal: 900
radosgw_s3_auth_use_keystone: "true"
radosgw_nss_db_path: /var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_hostname }}/nss
# Toggle 100-continue support for Apache and FastCGI
# WARNING: Changing this value will cause an outage of Apache while it is reinstalled on RGW nodes
http_100_continue: false
# Rados Gateway options
redhat_distro_ceph_extra: centos6.4 # supported distros are centos6.3, centos6.4, centos6, fedora18, fedora19, opensuse12.2, rhel6.3, rhel6.4, rhel6.5, rhel6, sles11sp2
email_address: foo@bar.com
## REST API options
#
restapi_interface: "{{ monitor_interface }}"
restapi_port: 5000
restapi_base_url: /api/v0.1
restapi_log_level: warning # available level are: critical, error, warning, info, debug
## Testing mode
# enable this mode _only_ when you have a single node
# if you don't want it keep the option commented
#common_single_host_mode: true
###################
# CONFIG OVERRIDE #
###################
# Ceph configuration file override.
# This allows you to specify more configuration options
# using an INI style format.
# The following sections are supported: [global], [mon], [osd], [mds], [rgw]
#
# Example:
# ceph_conf_overrides:
# global:
# foo: 1234
# bar: 5678
#
ceph_conf_overrides: {}
#############
# OS TUNING #
#############
disable_transparent_hugepage: true
disable_swap: true
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
- { name: vm.zone_reclaim_mode, value: 0 }
- { name: vm.vfs_cache_pressure, value: 50 }
- { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" }
##########
# DOCKER #
##########
docker: false

View File

@ -1,26 +1,26 @@
---
# You can override vars by using host or group vars
###########
# GENERAL #
###########
fetch_directory: fetch/
# Even though MDS nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on MDS nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
copy_admin_key: false
cephx: true
##########
# DOCKER #
##########
mds_containerized_deployment: false
ceph_mds_docker_username: ceph
ceph_mds_docker_imagename: daemon
ceph_mds_docker_extra_env: "MDS_NAME={{ ansible_hostname }}" # comma separated variables
---
# You can override vars by using host or group vars
###########
# GENERAL #
###########
fetch_directory: fetch/
# Even though MDS nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on MDS nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
copy_admin_key: false
cephx: true
##########
# DOCKER #
##########
mds_containerized_deployment: false
ceph_mds_docker_username: ceph
ceph_mds_docker_imagename: daemon
ceph_mds_docker_extra_env: "MDS_NAME={{ ansible_hostname }}" # comma separated variables

View File

@ -1,68 +1,68 @@
---
# You can override vars by using host or group vars
###########
# GENERAL #
###########
fetch_directory: fetch/
mon_group_name: mons
# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
fsid: "{{ cluster_uuid.stdout }}"
monitor_secret: "{{ monitor_keyring.stdout }}"
cephx: true
# CephFS
pool_default_pg_num: 128
cephfs_data: cephfs_data
cephfs_metadata: cephfs_metadata
cephfs: cephfs
# Secure your cluster
# This will set the following flags on all the pools:
# * nosizechange
# * nopgchange
# * nodelete
secure_cluster: false
secure_cluster_flags:
- nopgchange
- nodelete
- nosizechange
#############
# OPENSTACK #
#############
openstack_config: false
openstack_glance_pool:
name: images
pg_num: "{{ pool_default_pg_num }}"
openstack_cinder_pool:
name: volumes
pg_num: "{{ pool_default_pg_num }}"
openstack_nova_pool:
name: vms
pg_num: "{{ pool_default_pg_num }}"
openstack_cinder_backup_pool:
name: backups
pg_num: "{{ pool_default_pg_num }}"
openstack_keys:
- { name: client.glance, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}'" }
- { name: client.cinder, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rx pool={{ openstack_glance_pool.name }}'" }
- { name: client.cinder-backup, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_backup_pool.name }}'" }
##########
# DOCKER #
##########
mon_containerized_deployment: false
ceph_mon_docker_interface: eth0
#ceph_mon_docker_subnet: # subnet of the ceph_mon_docker_interface
ceph_mon_docker_username: ceph
ceph_mon_docker_imagename: daemon
ceph_mon_extra_envs: "MON_NAME={{ ansible_hostname }}" # comma separated variables
---
# You can override vars by using host or group vars
###########
# GENERAL #
###########
fetch_directory: fetch/
mon_group_name: mons
# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
fsid: "{{ cluster_uuid.stdout }}"
monitor_secret: "{{ monitor_keyring.stdout }}"
cephx: true
# CephFS
pool_default_pg_num: 128
cephfs_data: cephfs_data
cephfs_metadata: cephfs_metadata
cephfs: cephfs
# Secure your cluster
# This will set the following flags on all the pools:
# * nosizechange
# * nopgchange
# * nodelete
secure_cluster: false
secure_cluster_flags:
- nopgchange
- nodelete
- nosizechange
#############
# OPENSTACK #
#############
openstack_config: false
openstack_glance_pool:
name: images
pg_num: "{{ pool_default_pg_num }}"
openstack_cinder_pool:
name: volumes
pg_num: "{{ pool_default_pg_num }}"
openstack_nova_pool:
name: vms
pg_num: "{{ pool_default_pg_num }}"
openstack_cinder_backup_pool:
name: backups
pg_num: "{{ pool_default_pg_num }}"
openstack_keys:
- { name: client.glance, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}'" }
- { name: client.cinder, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rx pool={{ openstack_glance_pool.name }}'" }
- { name: client.cinder-backup, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_backup_pool.name }}'" }
##########
# DOCKER #
##########
mon_containerized_deployment: false
ceph_mon_docker_interface: eth0
#ceph_mon_docker_subnet: # subnet of the ceph_mon_docker_interface
ceph_mon_docker_username: ceph
ceph_mon_docker_imagename: daemon
ceph_mon_extra_envs: "MON_NAME={{ ansible_hostname }}" # comma separated variables

View File

@ -1,122 +1,122 @@
---
# You can override default vars defined in defaults/main.yml here,
# but I would advice to use host or group vars instead
###########
# GENERAL #
###########
fetch_directory: fetch/
# Even though OSD nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on OSD nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
copy_admin_key: false
####################
# OSD CRUSH LOCATION
####################
# The following options will build a ceph.conf with OSD sections
# Example:
# [osd.X]
# osd crush location = "root=location"
#
# This works with your inventory file
# To match the following 'osd_crush_location' option the inventory must look like:
#
# [osds]
# osd0 ceph_crush_root=foo ceph_crush_rack=bar
crush_location: false
osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"
##############
# CEPH OPTIONS
##############
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
fsid: "{{ cluster_uuid.stdout }}"
cephx: true
# Devices to be used as OSDs
# You can pre-provision disks that are not present yet.
# Ansible will just skip them. Newly added disk will be
# automatically configured during the next run.
#
# !! WARNING !!
#
# /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
#
# !! WARNING !!
# Declare devices
# All the scenarii inherit from the following device declaration
#
#devices:
# - /dev/sdb
# - /dev/sdc
# Device discovery is based on the Ansible fact 'ansible_devices'
# which reports all the devices on a system. If chosen all the disks
# found will be passed to ceph-disk. You should not be worried on using
# this option since ceph-disk has a built-in check which looks for empty devices.
# Thus devices with existing partition tables will not be used.
# This mode prevents you from filling out the 'devices' variable above.
#
osd_auto_discovery: false
# I. First scenario: journal and osd_data on the same device
# Use 'true' to enable this scenario
# This will collocate both journal and data on the same disk
# creating a partition at the beginning of the device
journal_collocation: false
# II. Second scenario: single journal device for N OSDs
# Use 'true' to enable this scenario
# deprecated, please use scenario III with a single raw_journal_device
# III. Third scenario: N journal devices for N OSDs
# Use 'true' to enable this scenario
#
# In the following example:
# * sdd and sde will get sdb as a journal
# * sdf and sdg will get sdc as a journal
# While starting you have 2 options:
# 1. Pre-allocate all the devices
# 2. Progressively add new devices
raw_multi_journal: false
#raw_journal_devices:
# - /dev/sdb
# - /dev/sdb
# - /dev/sdc
# - /dev/sdc
# IV. Fourth scenario: use directory instead of disk for OSDs
# Use 'true' to enable this scenario
osd_directory: false
#osd_directories:
# - /var/lib/ceph/osd/mydir1
# - /var/lib/ceph/osd/mydir2
##########
# DOCKER #
##########
osd_containerized_deployment: false
ceph_osd_docker_username: ceph
ceph_osd_docker_imagename: daemon
ceph_osd_docker_extra_env: "CEPH_DAEMON=OSD_CEPH_DISK" # comma separated variables
#ceph_osd_docker_devices:
# - /dev/sdb
---
# You can override default vars defined in defaults/main.yml here,
# but I would advice to use host or group vars instead
###########
# GENERAL #
###########
fetch_directory: fetch/
# Even though OSD nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on OSD nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
copy_admin_key: false
####################
# OSD CRUSH LOCATION
####################
# The following options will build a ceph.conf with OSD sections
# Example:
# [osd.X]
# osd crush location = "root=location"
#
# This works with your inventory file
# To match the following 'osd_crush_location' option the inventory must look like:
#
# [osds]
# osd0 ceph_crush_root=foo ceph_crush_rack=bar
crush_location: false
osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"
##############
# CEPH OPTIONS
##############
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
fsid: "{{ cluster_uuid.stdout }}"
cephx: true
# Devices to be used as OSDs
# You can pre-provision disks that are not present yet.
# Ansible will just skip them. Newly added disk will be
# automatically configured during the next run.
#
# !! WARNING !!
#
# /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
#
# !! WARNING !!
# Declare devices
# All the scenarii inherit from the following device declaration
#
#devices:
# - /dev/sdb
# - /dev/sdc
# Device discovery is based on the Ansible fact 'ansible_devices'
# which reports all the devices on a system. If chosen all the disks
# found will be passed to ceph-disk. You should not be worried on using
# this option since ceph-disk has a built-in check which looks for empty devices.
# Thus devices with existing partition tables will not be used.
# This mode prevents you from filling out the 'devices' variable above.
#
osd_auto_discovery: false
# I. First scenario: journal and osd_data on the same device
# Use 'true' to enable this scenario
# This will collocate both journal and data on the same disk
# creating a partition at the beginning of the device
journal_collocation: false
# II. Second scenario: single journal device for N OSDs
# Use 'true' to enable this scenario
# deprecated, please use scenario III with a single raw_journal_device
# III. Third scenario: N journal devices for N OSDs
# Use 'true' to enable this scenario
#
# In the following example:
# * sdd and sde will get sdb as a journal
# * sdf and sdg will get sdc as a journal
# While starting you have 2 options:
# 1. Pre-allocate all the devices
# 2. Progressively add new devices
raw_multi_journal: false
#raw_journal_devices:
# - /dev/sdb
# - /dev/sdb
# - /dev/sdc
# - /dev/sdc
# IV. Fourth scenario: use directory instead of disk for OSDs
# Use 'true' to enable this scenario
osd_directory: false
#osd_directories:
# - /var/lib/ceph/osd/mydir1
# - /var/lib/ceph/osd/mydir2
##########
# DOCKER #
##########
osd_containerized_deployment: false
ceph_osd_docker_username: ceph
ceph_osd_docker_imagename: daemon
ceph_osd_docker_extra_env: "CEPH_DAEMON=OSD_CEPH_DISK" # comma separated variables
#ceph_osd_docker_devices:
# - /dev/sdb

View File

@ -1,33 +1,33 @@
---
# You can override vars by using host or group vars
###########
# GENERAL #
###########
fetch_directory: fetch/
# Even though RGW nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
copy_admin_key: false
## Ceph options
#
cephx: true
# Used for the sudo exception while starting the radosgw process
# a new entry /etc/sudoers.d/ceph will be created
# allowing root to not require tty
radosgw_user: root
##########
# DOCKER #
##########
rgw_containerized_deployment: false
ceph_rgw_civetweb_port: 80
ceph_rgw_docker_username: ceph
ceph_rgw_docker_imagename: daemon
ceph_rgw_docker_extra_env: "RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}" # comma separated variables
---
# You can override vars by using host or group vars
###########
# GENERAL #
###########
fetch_directory: fetch/
# Even though RGW nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
copy_admin_key: false
## Ceph options
#
cephx: true
# Used for the sudo exception while starting the radosgw process
# a new entry /etc/sudoers.d/ceph will be created
# allowing root to not require tty
radosgw_user: root
##########
# DOCKER #
##########
rgw_containerized_deployment: false
ceph_rgw_civetweb_port: 80
ceph_rgw_docker_username: ceph
ceph_rgw_docker_imagename: daemon
ceph_rgw_docker_extra_env: "RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}" # comma separated variables