merge docker-common and common defaults vars

Merge `ceph-docker-common` and `ceph-common` defaults vars in
`ceph-defaults` role.
Remove redundant variables declaration in `ceph-mon` and `ceph-osd` roles.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/1727/head
Guillaume Abrioux 2017-07-28 22:35:23 +02:00
parent 206c7a16d0
commit 1d003aa887
7 changed files with 396 additions and 421 deletions

View File

@ -13,12 +13,9 @@ dummy:
# GENERAL #
###########
#fetch_directory: fetch/
#mon_group_name: mons
# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
#fsid: "{{ cluster_uuid.stdout }}"
#monitor_secret: "{{ monitor_keyring.stdout }}"
#admin_secret: 'admin_secret'

View File

@ -15,8 +15,6 @@ dummy:
# GENERAL #
###########
#fetch_directory: fetch/
# Even though OSD nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on OSD nodes. Setting 'copy_admin_key' to 'true'
@ -58,9 +56,6 @@ dummy:
# CEPH OPTIONS
##############
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
#fsid: "{{ cluster_uuid.stdout }}"
# Devices to be used as OSDs
# You can pre-provision disks that are not present yet.
# Ansible will just skip them. Newly added disk will be
@ -224,7 +219,6 @@ dummy:
# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1
#
#
#ceph_osd_docker_devices: "{{ devices }}"
#ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
# ACTIVATE DEVICE

View File

@ -1,374 +1 @@
---
# You can override vars by using host or group vars
###########
# GENERAL #
###########
fetch_directory: fetch/
# The 'cluster' variable determines the name of the cluster.
# Changing the default value to something else means that you will
# need to change all the command line calls as well, for example if
# your cluster name is 'foo':
# "ceph health" will become "ceph --cluster foo health"
#
# An easier way to handle this is to use the environment variable CEPH_ARGS
# So run: "export CEPH_ARGS="--cluster foo"
# With that you will be able to run "ceph health" normally
cluster: ceph
###########
# INSTALL #
###########
mon_group_name: mons
osd_group_name: osds
rgw_group_name: rgws
mds_group_name: mdss
nfs_group_name: nfss
restapi_group_name: restapis
rbdmirror_group_name: rbdmirrors
client_group_name: clients
iscsi_group_name: iscsigws
mgr_group_name: mgrs
# If check_firewall is true, then ansible will try to determine if the
# Ceph ports are blocked by a firewall. If the machine running ansible
# cannot reach the Ceph ports for some other reason, you may need or
# want to set this to False to skip those checks.
check_firewall: False
# This variable determines if ceph packages can be updated. If False, the
# package resources will use "state=present". If True, they will use
# "state=latest".
upgrade_ceph_packages: False
# /!\ EITHER ACTIVE ceph_stable OR ceph_stable_uca OR ceph_dev OR ceph_custom /!\
debian_package_dependencies:
- python-pycurl
- hdparm
centos_package_dependencies:
- python-pycurl
- hdparm
- epel-release
- python-setuptools
- libselinux-python
redhat_package_dependencies:
- python-pycurl
- hdparm
- python-setuptools
# Enable the ntp service by default to avoid clock skew on
# ceph nodes
ntp_service_enabled: true
# Whether or not to install the ceph-test package.
ceph_test: False
## Configure package origin
#
ceph_origin: 'upstream' # or 'distro' or 'local'
# 'distro' means that no separate repo file will be added
# you will get whatever version of Ceph is included in your Linux distro.
# 'local' means that the ceph binaries will be copied over from the local machine
# LOCAL CEPH INSTALLATION (ceph_origin==local)
#
# Path to DESTDIR of the ceph install
#ceph_installation_dir: "/path/to/ceph_installation/"
# Whether or not to use installer script rundep_installer.sh
# This script takes in rundep and installs the packages line by line onto the machine
# If this is set to false then it is assumed that the machine ceph is being copied onto will already have
# all runtime dependencies installed
#use_installer: false
# Root directory for ceph-ansible
#ansible_dir: "/path/to/ceph-ansible"
ceph_use_distro_backports: false # DEBIAN ONLY
# STABLE
########
# COMMUNITY VERSION
ceph_stable: false # use ceph stable branch
ceph_mirror: http://download.ceph.com
ceph_stable_key: https://download.ceph.com/keys/release.asc
ceph_stable_release: dummy
ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}"
######################################
# Releases name to number dictionary #
######################################
ceph_release_num:
dumpling: 0.67
emperor: 0.72
firefly: 0.80
giant: 0.87
hammer: 0.94
infernalis: 9
jewel: 10
kraken: 11
luminous: 12
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source:
# This option is needed for _both_ stable and dev version, so please always fill the right version
# # for supported distros, see http://download.ceph.com/rpm-{{ ceph_stable_release }}/
ceph_stable_redhat_distro: el7
# ENTERPRISE VERSION RED HAT STORAGE (from 1.3)
# This version is only supported on RHEL >= 7.1
# As of RHEL 7.1, libceph.ko and rbd.ko are now included in Red Hat's kernel
# packages natively. The RHEL 7.1 kernel packages are more stable and secure than
# using these 3rd-party kmods with RHEL 7.0. Please update your systems to RHEL
# 7.1 or later if you want to use the kernel RBD client.
#
# The CephFS kernel client is undergoing rapid development upstream, and we do
# not recommend running the CephFS kernel module on RHEL 7's 3.10 kernel at this
# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS
# on RHEL 7.
#
#
# Backward compatibility of variable names
# Commit 492518a2 changed variable names of rhcs installations
# to not break backward compatiblity we re-declare these variables
# with the content of the new variable
ceph_rhcs: "{{ ceph_stable_rh_storage | default(false) }}"
# This will affect how/what repositories are enabled depending on the desired
# version. The previous version was 1.3. The current version is 2.
ceph_rhcs_version: "{{ ceph_stable_rh_storage_version | default(2) }}"
ceph_rhcs_cdn_install: "{{ ceph_stable_rh_storage_cdn_install | default(false) }}" # assumes all the nodes can connect to cdn.redhat.com
ceph_rhcs_iso_install: "{{ ceph_stable_rh_storage_iso_install | default(false) }}" # usually used when nodes don't have access to cdn.redhat.com
ceph_rhcs_iso_path: "{{ ceph_stable_rh_storage_iso_path | default('') }}"
ceph_rhcs_mount_path: "{{ ceph_stable_rh_storage_mount_path | default('/tmp/rh-storage-mount') }}"
ceph_rhcs_repository_path: "{{ ceph_stable_rh_storage_repository_path | default('/tmp/rh-storage-repo') }}" # where to copy iso's content
# UBUNTU CLOUD ARCHIVE
# This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive
# usually has newer Ceph releases than the normal distro repository.
#
ceph_stable_uca: false
#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
#ceph_stable_openstack_release_uca: liberty
#ceph_stable_release_uca: "{{ansible_lsb.codename}}-updates/{{ceph_stable_openstack_release_uca}}"
# DEV
# ###
ceph_dev: false # use ceph development branch
ceph_dev_branch: master # development branch you would like to use e.g: master, wip-hack
ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built)
# CUSTOM
# ###
# Use a custom repository to install ceph. For RPM, ceph_custom_repo should be
# a URL to the .repo file to be installed on the targets. For deb,
# ceph_custom_repo should be the URL to the repo base.
ceph_custom: false # use custom ceph repository
ceph_custom_repo: https://server.domain.com/ceph-custom-repo
######################
# CEPH CONFIGURATION #
######################
## Ceph options
#
# Each cluster requires a unique, consistent filesystem ID. By
# default, the playbook generates one for you and stores it in a file
# in `fetch_directory`. If you want to customize how the fsid is
# generated, you may find it useful to disable fsid generation to
# avoid cluttering up your ansible repo. If you set `generate_fsid` to
# false, you *must* generate `fsid` in another way.
fsid: "{{ cluster_uuid.stdout }}"
generate_fsid: true
cephx: true
max_open_files: 131072
## Client options
#
rbd_cache: "true"
rbd_cache_writethrough_until_flush: "true"
rbd_concurrent_management_ops: 20
rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions
# Permissions for the rbd_client_log_path and
# rbd_client_admin_socket_path. Depending on your use case for Ceph
# you may want to change these values. The default, which is used if
# any of the variables are unset or set to a false value (like `null`
# or `false`) is to automatically determine what is appropriate for
# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770
# for infernalis releases, and root:root and 1777 for pre-infernalis
# releases.
#
# For other use cases, including running Ceph with OpenStack, you'll
# want to set these differently:
#
# For OpenStack on RHEL, you'll want:
# rbd_client_directory_owner: "qemu"
# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt)
# rbd_client_directory_mode: "0755"
#
# For OpenStack on Ubuntu or Debian, set:
# rbd_client_directory_owner: "libvirt-qemu"
# rbd_client_directory_group: "kvm"
# rbd_client_directory_mode: "0755"
#
# If you set rbd_client_directory_mode, you must use a string (e.g.,
# 'rbd_client_directory_mode: "0755"', *not*
# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode
# must be in octal or symbolic form
rbd_client_directory_owner: null
rbd_client_directory_group: null
rbd_client_directory_mode: null
rbd_client_log_path: /var/log/ceph
rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor
rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor
## Monitor options
#
# You must define either monitor_interface, monitor_address or monitor_address_block.
# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml).
# Eg. If you want to specify for each monitor which address the monitor will bind to you can set it in your **inventory host file** by using 'monitor_address' variable.
# Preference will go to monitor_address if both monitor_address and monitor_interface are defined.
# To use an IPv6 address, use the monitor_address setting instead (and set ip_version to ipv6)
monitor_interface: interface
monitor_address: 0.0.0.0
monitor_address_block: []
# set to either ipv4 or ipv6, whichever your network is using
ip_version: ipv4
mon_use_fqdn: false # if set to true, the MON name used will be the fqdn in the ceph.conf
## OSD options
#
journal_size: 5120 # OSD journal size in MB
public_network: 0.0.0.0/0
cluster_network: "{{ public_network }}"
osd_mkfs_type: xfs
osd_mkfs_options_xfs: -f -i size=2048
osd_mount_options_xfs: noatime,largeio,inode64,swalloc
osd_objectstore: filestore
# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can
# be set to 'true' or 'false' to explicitly override those
# defaults. Leave it 'null' to use the default for your chosen mkfs
# type.
filestore_xattr_use_omap: null
## MDS options
#
mds_use_fqdn: false # if set to true, the MDS name used will be the fqdn in the ceph.conf
mds_allow_multimds: false
mds_max_mds: 3
## Rados Gateway options
#
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names
radosgw_civetweb_port: 8080
radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}" # when using ipv6 enclose with brackets: "[{{ ansible_default_ipv6.address }}]"
radosgw_civetweb_num_threads: 100
# For additional civetweb configuration options available such as SSL, logging,
# keepalive, and timeout settings, please see the civetweb docs at
# https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md
radosgw_civetweb_options: "port={{ radosgw_civetweb_bind_ip }}:{{ radosgw_civetweb_port }} num_threads={{ radosgw_civetweb_num_threads }}"
radosgw_keystone: false # activate OpenStack Keystone options full detail here: http://ceph.com/docs/master/radosgw/keystone/
# Rados Gateway options
email_address: foo@bar.com
## REST API options
#
restapi_interface: "{{ monitor_interface }}"
restapi_address: "{{ monitor_address }}"
restapi_port: 5000
## Testing mode
# enable this mode _only_ when you have a single node
# if you don't want it keep the option commented
#common_single_host_mode: true
## Handlers - restarting daemons after a config change
# if for whatever reasons the content of your ceph configuration changes
# ceph daemons will be restarted as well. At the moment, we can not detect
# which config option changed so all the daemons will be restarted. Although
# this restart will be serialized for each node, in between a health check
# will be performed so we make sure we don't move to the next node until
# ceph is not healthy
# Obviously between the checks (for monitors to be in quorum and for osd's pgs
# to be clean) we have to wait. These retries and delays can be configurable
# for both monitors and osds.
handler_health_mon_check_retries: 5
handler_health_mon_check_delay: 10
handler_health_osd_check_retries: 40
handler_health_osd_check_delay: 30
handler_health_osd_check: true
###################
# CONFIG OVERRIDE #
###################
# Ceph configuration file override.
# This allows you to specify more configuration options
# using an INI style format.
# The following sections are supported: [global], [mon], [osd], [mds], [rgw]
#
# Example:
# ceph_conf_overrides:
# global:
# foo: 1234
# bar: 5678
#
ceph_conf_overrides: {}
#############
# OS TUNING #
#############
disable_transparent_hugepage: true
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
- { name: vm.zone_reclaim_mode, value: 0 }
- { name: vm.swappiness, value: 10 }
- { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" }
##########
# DOCKER #
##########
docker: false
ceph_docker_image: "ceph/daemon"
ceph_docker_image_tag: latest
# Do not comment the following variables containerized_deployment_* here. These variables are being used
# by ceph.conf.j2 template. so it should always be defined
containerized_deployment_with_kv: false
containerized_deployment: false
mon_containerized_default_ceph_conf_with_kv: false
# Confiure the type of NFS gatway access. At least one must be enabled for an
# NFS role to be useful
#
# Set this to true to enable File access via NFS. Requires an MDS role.
nfs_file_gw: true
# Set this to true to enable Object access via NFS. Requires an RGW role.
nfs_obj_gw: false
# this is only here for usage with the rolling_update.yml playbook
# do not ever change this here
rolling_update: false

View File

@ -0,0 +1,396 @@
---
# You can override vars by using host or group vars
###########
# GENERAL #
###########
fetch_directory: fetch/
# The 'cluster' variable determines the name of the cluster.
# Changing the default value to something else means that you will
# need to change all the command line calls as well, for example if
# your cluster name is 'foo':
# "ceph health" will become "ceph --cluster foo health"
#
# An easier way to handle this is to use the environment variable CEPH_ARGS
# So run: "export CEPH_ARGS="--cluster foo"
# With that you will be able to run "ceph health" normally
cluster: ceph
###########
# INSTALL #
###########
# Set uid/gid to default '64045' for bootstrap directories.
# '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros.
# These values have to be set according to the base OS used by the container image, NOT the host.
bootstrap_dirs_owner: "64045"
bootstrap_dirs_group: "64045"
mon_group_name: mons
osd_group_name: osds
rgw_group_name: rgws
mds_group_name: mdss
nfs_group_name: nfss
restapi_group_name: restapis
rbdmirror_group_name: rbdmirrors
client_group_name: clients
iscsi_group_name: iscsigws
mgr_group_name: mgrs
# If check_firewall is true, then ansible will try to determine if the
# Ceph ports are blocked by a firewall. If the machine running ansible
# cannot reach the Ceph ports for some other reason, you may need or
# want to set this to False to skip those checks.
check_firewall: False
# This variable determines if ceph packages can be updated. If False, the
# package resources will use "state=present". If True, they will use
# "state=latest".
upgrade_ceph_packages: False
# /!\ EITHER ACTIVE ceph_stable OR ceph_stable_uca OR ceph_dev OR ceph_custom /!\
debian_package_dependencies:
- python-pycurl
- hdparm
centos_package_dependencies:
- python-pycurl
- hdparm
- epel-release
- python-setuptools
- libselinux-python
redhat_package_dependencies:
- python-pycurl
- hdparm
- python-setuptools
# Enable the ntp service by default to avoid clock skew on
# ceph nodes
ntp_service_enabled: true
# Whether or not to install the ceph-test package.
ceph_test: False
## Configure package origin
#
ceph_origin: 'upstream' # or 'distro' or 'local'
# 'distro' means that no separate repo file will be added
# you will get whatever version of Ceph is included in your Linux distro.
# 'local' means that the ceph binaries will be copied over from the local machine
# LOCAL CEPH INSTALLATION (ceph_origin==local)
#
# Path to DESTDIR of the ceph install
#ceph_installation_dir: "/path/to/ceph_installation/"
# Whether or not to use installer script rundep_installer.sh
# This script takes in rundep and installs the packages line by line onto the machine
# If this is set to false then it is assumed that the machine ceph is being copied onto will already have
# all runtime dependencies installed
#use_installer: false
# Root directory for ceph-ansible
#ansible_dir: "/path/to/ceph-ansible"
ceph_use_distro_backports: false # DEBIAN ONLY
# STABLE
########
# COMMUNITY VERSION
ceph_stable: false # use ceph stable branch
ceph_mirror: http://download.ceph.com
ceph_stable_key: https://download.ceph.com/keys/release.asc
ceph_stable_release: dummy
ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}"
######################################
# Releases name to number dictionary #
######################################
ceph_release_num:
dumpling: 0.67
emperor: 0.72
firefly: 0.80
giant: 0.87
hammer: 0.94
infernalis: 9
jewel: 10
kraken: 11
luminous: 12
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source:
# This option is needed for _both_ stable and dev version, so please always fill the right version
# # for supported distros, see http://download.ceph.com/rpm-{{ ceph_stable_release }}/
ceph_stable_redhat_distro: el7
# ENTERPRISE VERSION RED HAT STORAGE (from 1.3)
# This version is only supported on RHEL >= 7.1
# As of RHEL 7.1, libceph.ko and rbd.ko are now included in Red Hat's kernel
# packages natively. The RHEL 7.1 kernel packages are more stable and secure than
# using these 3rd-party kmods with RHEL 7.0. Please update your systems to RHEL
# 7.1 or later if you want to use the kernel RBD client.
#
# The CephFS kernel client is undergoing rapid development upstream, and we do
# not recommend running the CephFS kernel module on RHEL 7's 3.10 kernel at this
# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS
# on RHEL 7.
#
#
# Backward compatibility of variable names
# Commit 492518a2 changed variable names of rhcs installations
# to not break backward compatiblity we re-declare these variables
# with the content of the new variable
ceph_rhcs: "{{ ceph_stable_rh_storage | default(false) }}"
# This will affect how/what repositories are enabled depending on the desired
# version. The previous version was 1.3. The current version is 2.
ceph_rhcs_version: "{{ ceph_stable_rh_storage_version | default(2) }}"
ceph_rhcs_cdn_install: "{{ ceph_stable_rh_storage_cdn_install | default(false) }}" # assumes all the nodes can connect to cdn.redhat.com
ceph_rhcs_iso_install: "{{ ceph_stable_rh_storage_iso_install | default(false) }}" # usually used when nodes don't have access to cdn.redhat.com
ceph_rhcs_iso_path: "{{ ceph_stable_rh_storage_iso_path | default('') }}"
ceph_rhcs_mount_path: "{{ ceph_stable_rh_storage_mount_path | default('/tmp/rh-storage-mount') }}"
ceph_rhcs_repository_path: "{{ ceph_stable_rh_storage_repository_path | default('/tmp/rh-storage-repo') }}" # where to copy iso's content
# UBUNTU CLOUD ARCHIVE
# This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive
# usually has newer Ceph releases than the normal distro repository.
#
ceph_stable_uca: false
#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
#ceph_stable_openstack_release_uca: liberty
#ceph_stable_release_uca: "{{ansible_lsb.codename}}-updates/{{ceph_stable_openstack_release_uca}}"
# DEV
# ###
ceph_dev: false # use ceph development branch
ceph_dev_branch: master # development branch you would like to use e.g: master, wip-hack
ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built)
# CUSTOM
# ###
# Use a custom repository to install ceph. For RPM, ceph_custom_repo should be
# a URL to the .repo file to be installed on the targets. For deb,
# ceph_custom_repo should be the URL to the repo base.
ceph_custom: false # use custom ceph repository
ceph_custom_repo: https://server.domain.com/ceph-custom-repo
######################
# CEPH CONFIGURATION #
######################
## Ceph options
#
# Each cluster requires a unique, consistent filesystem ID. By
# default, the playbook generates one for you and stores it in a file
# in `fetch_directory`. If you want to customize how the fsid is
# generated, you may find it useful to disable fsid generation to
# avoid cluttering up your ansible repo. If you set `generate_fsid` to
# false, you *must* generate `fsid` in another way.
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
fsid: "{{ cluster_uuid.stdout }}"
generate_fsid: true
ceph_conf_key_directory: /etc/ceph
cephx: true
max_open_files: 131072
## Client options
#
rbd_cache: "true"
rbd_cache_writethrough_until_flush: "true"
rbd_concurrent_management_ops: 20
rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions
# Permissions for the rbd_client_log_path and
# rbd_client_admin_socket_path. Depending on your use case for Ceph
# you may want to change these values. The default, which is used if
# any of the variables are unset or set to a false value (like `null`
# or `false`) is to automatically determine what is appropriate for
# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770
# for infernalis releases, and root:root and 1777 for pre-infernalis
# releases.
#
# For other use cases, including running Ceph with OpenStack, you'll
# want to set these differently:
#
# For OpenStack on RHEL, you'll want:
# rbd_client_directory_owner: "qemu"
# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt)
# rbd_client_directory_mode: "0755"
#
# For OpenStack on Ubuntu or Debian, set:
# rbd_client_directory_owner: "libvirt-qemu"
# rbd_client_directory_group: "kvm"
# rbd_client_directory_mode: "0755"
#
# If you set rbd_client_directory_mode, you must use a string (e.g.,
# 'rbd_client_directory_mode: "0755"', *not*
# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode
# must be in octal or symbolic form
rbd_client_directory_owner: null
rbd_client_directory_group: null
rbd_client_directory_mode: null
rbd_client_log_path: /var/log/ceph
rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor
rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor
## Monitor options
#
# You must define either monitor_interface, monitor_address or monitor_address_block.
# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml).
# Eg. If you want to specify for each monitor which address the monitor will bind to you can set it in your **inventory host file** by using 'monitor_address' variable.
# Preference will go to monitor_address if both monitor_address and monitor_interface are defined.
# To use an IPv6 address, use the monitor_address setting instead (and set ip_version to ipv6)
monitor_interface: interface
monitor_address: 0.0.0.0
monitor_address_block: []
# set to either ipv4 or ipv6, whichever your network is using
ip_version: ipv4
mon_use_fqdn: false # if set to true, the MON name used will be the fqdn in the ceph.conf
## OSD options
#
journal_size: 5120 # OSD journal size in MB
public_network: 0.0.0.0/0
cluster_network: "{{ public_network }}"
osd_mkfs_type: xfs
osd_mkfs_options_xfs: -f -i size=2048
osd_mount_options_xfs: noatime,largeio,inode64,swalloc
osd_objectstore: filestore
# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can
# be set to 'true' or 'false' to explicitly override those
# defaults. Leave it 'null' to use the default for your chosen mkfs
# type.
filestore_xattr_use_omap: null
## MDS options
#
mds_use_fqdn: false # if set to true, the MDS name used will be the fqdn in the ceph.conf
mds_allow_multimds: false
mds_max_mds: 3
## Rados Gateway options
#
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names
radosgw_civetweb_port: 8080
radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}" # when using ipv6 enclose with brackets: "[{{ ansible_default_ipv6.address }}]"
radosgw_civetweb_num_threads: 100
# For additional civetweb configuration options available such as SSL, logging,
# keepalive, and timeout settings, please see the civetweb docs at
# https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md
radosgw_civetweb_options: "port={{ radosgw_civetweb_bind_ip }}:{{ radosgw_civetweb_port }} num_threads={{ radosgw_civetweb_num_threads }}"
radosgw_keystone: false # activate OpenStack Keystone options full detail here: http://ceph.com/docs/master/radosgw/keystone/
# Rados Gateway options
email_address: foo@bar.com
## REST API options
#
restapi_interface: "{{ monitor_interface }}"
restapi_address: "{{ monitor_address }}"
restapi_port: 5000
## Testing mode
# enable this mode _only_ when you have a single node
# if you don't want it keep the option commented
#common_single_host_mode: true
## Handlers - restarting daemons after a config change
# if for whatever reasons the content of your ceph configuration changes
# ceph daemons will be restarted as well. At the moment, we can not detect
# which config option changed so all the daemons will be restarted. Although
# this restart will be serialized for each node, in between a health check
# will be performed so we make sure we don't move to the next node until
# ceph is not healthy
# Obviously between the checks (for monitors to be in quorum and for osd's pgs
# to be clean) we have to wait. These retries and delays can be configurable
# for both monitors and osds.
handler_health_mon_check_retries: 5
handler_health_mon_check_delay: 10
handler_health_osd_check_retries: 40
handler_health_osd_check_delay: 30
handler_health_osd_check: true
# Confiure the type of NFS gatway access. At least one must be enabled for an
# NFS role to be useful
#
# Set this to true to enable File access via NFS. Requires an MDS role.
nfs_file_gw: true
# Set this to true to enable Object access via NFS. Requires an RGW role.
nfs_obj_gw: false
###################
# CONFIG OVERRIDE #
###################
# Ceph configuration file override.
# This allows you to specify more configuration options
# using an INI style format.
# The following sections are supported: [global], [mon], [osd], [mds], [rgw]
#
# Example:
# ceph_conf_overrides:
# global:
# foo: 1234
# bar: 5678
#
ceph_conf_overrides: {}
#############
# OS TUNING #
#############
disable_transparent_hugepage: true
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
- { name: vm.zone_reclaim_mode, value: 0 }
- { name: vm.swappiness, value: 10 }
- { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" }
##########
# DOCKER #
##########
docker: false
ceph_docker_image: "ceph/daemon"
ceph_docker_image_tag: latest
# Do not comment the following variables containerized_deployment_* here. These variables are being used
# by ceph.conf.j2 template. so it should always be defined
containerized_deployment_with_kv: false
containerized_deployment: false
mon_containerized_default_ceph_conf_with_kv: false
ceph_docker_registry: docker.io
ceph_docker_enable_centos_extra_repo: false
ceph_docker_on_openstack: false
############
# KV store #
############
kv_type: etcd
kv_endpoint: 127.0.0.1
kv_port: 2379
containerized_deployment_with_kv: false
# this is only here for usage with the rolling_update.yml playbook
# do not ever change this here
rolling_update: false

View File

@ -1,32 +1 @@
---
fsid: "{{ cluster_uuid.stdout }}"
generate_fsid: true
ceph_docker_registry: docker.io
ceph_docker_enable_centos_extra_repo: false
ceph_docker_on_openstack: false
mon_use_fqdn: false # if set to true, the MON name used will be the fqdn
# Set uid/gid to default '64045' for bootstrap directories.
# '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros.
# These values have to be set according to the base OS used by the container image, NOT the host.
bootstrap_dirs_owner: "64045"
bootstrap_dirs_group: "64045"
ceph_conf_key_directory: /etc/ceph
###########
# Network #
###########
monitor_interface: 'interface'
monitor_address: '0.0.0.0'
monitor_address_block: []
############
# KV store #
############
kv_type: etcd
kv_endpoint: 127.0.0.1
kv_port: 2379
containerized_deployment_with_kv: false

View File

@ -5,12 +5,9 @@
# GENERAL #
###########
fetch_directory: fetch/
mon_group_name: mons
# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
fsid: "{{ cluster_uuid.stdout }}"
monitor_secret: "{{ monitor_keyring.stdout }}"
admin_secret: 'admin_secret'

View File

@ -7,8 +7,6 @@
# GENERAL #
###########
fetch_directory: fetch/
# Even though OSD nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on OSD nodes. Setting 'copy_admin_key' to 'true'
@ -50,9 +48,6 @@ osd_crush_location: "\"root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} hos
# CEPH OPTIONS
##############
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
fsid: "{{ cluster_uuid.stdout }}"
# Devices to be used as OSDs
# You can pre-provision disks that are not present yet.
# Ansible will just skip them. Newly added disk will be