mirror of https://github.com/ceph/ceph-ansible.git
171 lines
5.2 KiB
YAML
171 lines
5.2 KiB
YAML
---
|
|
# You can override vars by using host or group vars
|
|
|
|
###########
|
|
# GENERAL #
|
|
###########
|
|
mon_group_name: mons
|
|
|
|
# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
|
|
monitor_secret: "{{ monitor_keyring.stdout }}"
|
|
admin_secret: 'admin_secret'
|
|
|
|
# Secure your cluster
|
|
# This will set the following flags on all the pools:
|
|
# * nosizechange
|
|
# * nopgchange
|
|
# * nodelete
|
|
|
|
secure_cluster: false
|
|
secure_cluster_flags:
|
|
- nopgchange
|
|
- nodelete
|
|
- nosizechange
|
|
|
|
# Enable the Calamari-backed REST API on a Monitor
|
|
calamari: false
|
|
|
|
# Enable debugging for Calamari
|
|
calamari_debug: false
|
|
|
|
|
|
##########
|
|
# CEPHFS #
|
|
##########
|
|
cephfs: cephfs # name of the ceph filesystem
|
|
cephfs_data: cephfs_data # name of the data pool for a given filesystem
|
|
cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
|
|
|
|
cephfs_pools:
|
|
- { name: "{{ cephfs_data }}", pgs: "" }
|
|
- { name: "{{ cephfs_metadata }}", pgs: "" }
|
|
|
|
|
|
###############
|
|
# CRUSH RULES #
|
|
###############
|
|
crush_rule_config: false
|
|
|
|
crush_rule_hdd:
|
|
name: HDD
|
|
root: HDD
|
|
type: host
|
|
default: false
|
|
|
|
crush_rule_ssd:
|
|
name: SSD
|
|
root: SSD
|
|
type: host
|
|
default: false
|
|
|
|
crush_rules:
|
|
- "{{ crush_rule_hdd }}"
|
|
- "{{ crush_rule_ssd }}"
|
|
|
|
# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }}
|
|
# and will move hosts into them which might lead to significant data movement in the cluster!
|
|
#
|
|
# In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so:
|
|
#
|
|
# [osds]
|
|
# ceph-osd-01 osd_crush_location="{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }"
|
|
#
|
|
# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host)
|
|
create_crush_tree: false
|
|
|
|
|
|
#############
|
|
# OPENSTACK #
|
|
#############
|
|
openstack_config: false
|
|
openstack_glance_pool:
|
|
name: "images"
|
|
pg_num: "{{ osd_pool_default_pg_num }}"
|
|
pgp_num: "{{ osd_pool_default_pg_num }}"
|
|
rule_name: "replicated_rule"
|
|
type: 1
|
|
erasure_profile: ""
|
|
expected_num_objects: ""
|
|
openstack_cinder_pool:
|
|
name: "volumes"
|
|
pg_num: "{{ osd_pool_default_pg_num }}"
|
|
pgp_num: "{{ osd_pool_default_pg_num }}"
|
|
rule_name: "replicated_rule"
|
|
type: 1
|
|
erasure_profile: ""
|
|
expected_num_objects: ""
|
|
openstack_nova_pool:
|
|
name: "vms"
|
|
pg_num: "{{ osd_pool_default_pg_num }}"
|
|
pgp_num: "{{ osd_pool_default_pg_num }}"
|
|
rule_name: "replicated_rule"
|
|
type: 1
|
|
erasure_profile: ""
|
|
expected_num_objects: ""
|
|
openstack_cinder_backup_pool:
|
|
name: "backups"
|
|
pg_num: "{{ osd_pool_default_pg_num }}"
|
|
pgp_num: "{{ osd_pool_default_pg_num }}"
|
|
rule_name: "replicated_rule"
|
|
type: 1
|
|
erasure_profile: ""
|
|
expected_num_objects: ""
|
|
openstack_gnocchi_pool:
|
|
name: "metrics"
|
|
pg_num: "{{ osd_pool_default_pg_num }}"
|
|
pgp_num: "{{ osd_pool_default_pg_num }}"
|
|
rule_name: "replicated_rule"
|
|
type: 1
|
|
erasure_profile: ""
|
|
expected_num_objects: ""
|
|
|
|
openstack_pools:
|
|
- "{{ openstack_glance_pool }}"
|
|
- "{{ openstack_cinder_pool }}"
|
|
- "{{ openstack_nova_pool }}"
|
|
- "{{ openstack_cinder_backup_pool }}"
|
|
- "{{ openstack_gnocchi_pool }}"
|
|
|
|
|
|
# The value for 'key' can be a pre-generated key,
|
|
# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
|
|
# By default, keys will be auto-generated.
|
|
#
|
|
openstack_keys:
|
|
- { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
|
|
- { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
|
|
- { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
|
|
- { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
|
|
- { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
|
|
|
|
|
|
##########
|
|
# DOCKER #
|
|
##########
|
|
|
|
# Resource limitation
|
|
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
|
|
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
|
|
# These options can be passed using the 'ceph_mon_docker_extra_env' variable.
|
|
ceph_mon_docker_memory_limit: 1g
|
|
ceph_mon_docker_cpu_limit: 1
|
|
|
|
# Use this variable to add extra env configuration to run your mon container.
|
|
# If you want to set a custom admin keyring you can set this variable like following:
|
|
# ceph_mon_docker_extra_env: -e ADMIN_SECRET={{ admin_secret }}
|
|
ceph_mon_docker_extra_env:
|
|
mon_docker_privileged: false
|
|
mon_docker_net_host: true
|
|
ceph_config_keys: [] # DON'T TOUCH ME
|
|
|
|
|
|
###########
|
|
# SYSTEMD #
|
|
###########
|
|
# ceph_mon_systemd_overrides will override the systemd settings
|
|
# for the ceph-mon services.
|
|
# For example,to set "PrivateDevices=false" you can specify:
|
|
#ceph_mon_systemd_overrides:
|
|
# Service:
|
|
# PrivateDevices: False
|