mirror of https://github.com/ceph/ceph-ansible.git
234 lines
9.5 KiB
Plaintext
234 lines
9.5 KiB
Plaintext
---
|
|
# Variables here are applicable to all host groups NOT roles
|
|
|
|
# This sample file generated by generate_group_vars_sample.sh
|
|
|
|
# Dummy variable to avoid error because ansible does not recognize the
|
|
# file as a good configuration file when no variable in it.
|
|
dummy:
|
|
|
|
# You can override default vars defined in defaults/main.yml here,
|
|
# but I would advice to use host or group vars instead
|
|
|
|
|
|
###########
|
|
# GENERAL #
|
|
###########
|
|
|
|
# Even though OSD nodes should not have the admin key
|
|
# at their disposal, some people might want to have it
|
|
# distributed on OSD nodes. Setting 'copy_admin_key' to 'true'
|
|
# will copy the admin key to the /etc/ceph/ directory
|
|
#copy_admin_key: false
|
|
|
|
|
|
####################
|
|
# OSD CRUSH LOCATION
|
|
####################
|
|
|
|
# /!\
|
|
#
|
|
# BE EXTREMELY CAREFUL WITH THIS OPTION
|
|
# DO NOT USE IT UNLESS YOU KNOW WHAT YOU ARE DOING
|
|
#
|
|
# /!\
|
|
#
|
|
# It is probably best to keep this option to 'false' as the default
|
|
# suggests it. This option should only be used while doing some complex
|
|
# CRUSH map. It allows you to force a specific location for a set of OSDs.
|
|
#
|
|
# The following options will build a ceph.conf with OSD sections
|
|
# Example:
|
|
# [osd.X]
|
|
# osd crush location = "root=location"
|
|
#
|
|
# This works with your inventory file
|
|
# To match the following 'osd_crush_location' option the inventory must look like:
|
|
#
|
|
# [osds]
|
|
# osd0 ceph_crush_root=foo ceph_crush_rack=bar
|
|
|
|
#crush_location: false
|
|
#osd_crush_location: "\"root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}\""
|
|
|
|
|
|
##############
|
|
# CEPH OPTIONS
|
|
##############
|
|
|
|
# Devices to be used as OSDs
|
|
# You can pre-provision disks that are not present yet.
|
|
# Ansible will just skip them. Newly added disk will be
|
|
# automatically configured during the next run.
|
|
#
|
|
|
|
|
|
# Declare devices to be used as OSDs
|
|
# All scenario(except 3rd) inherit from the following device declaration
|
|
|
|
#devices:
|
|
# - /dev/sdb
|
|
# - /dev/sdc
|
|
# - /dev/sdd
|
|
# - /dev/sde
|
|
|
|
#devices: []
|
|
|
|
|
|
#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
|
|
# You can use this option with First and Forth and Fifth OSDS scenario.
|
|
# Device discovery is based on the Ansible fact 'ansible_devices'
|
|
# which reports all the devices on a system. If chosen all the disks
|
|
# found will be passed to ceph-disk. You should not be worried on using
|
|
# this option since ceph-disk has a built-in check which looks for empty devices.
|
|
# Thus devices with existing partition tables will not be used.
|
|
#
|
|
#osd_auto_discovery: false
|
|
|
|
|
|
# !! WARNING !!
|
|
# #
|
|
# # /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
|
|
# #
|
|
# # !! WARNING !!
|
|
#
|
|
|
|
# I. First scenario: journal and osd_data on the same device
|
|
# Use 'true' to enable this scenario
|
|
# This will collocate both journal and data on the same disk
|
|
# creating a partition at the beginning of the device
|
|
# List devices under 'devices' variable above or choose 'osd_auto_discovery'
|
|
#
|
|
#
|
|
# If osd_objectstore: bluestore is enabled both rocksdb DB and WAL will be stored
|
|
# on the device. So the device will get 2 partitions:
|
|
# - One for 'data', also called 'block'
|
|
# - One for block, db, and wal data
|
|
#
|
|
# Example of what you will get:
|
|
# [root@ceph-osd0 ~]# blkid /dev/sda*
|
|
# /dev/sda: PTTYPE="gpt"
|
|
# /dev/sda1: UUID="9c43e346-dd6e-431f-92d8-cbed4ccb25f6" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="749c71c9-ed8f-4930-82a7-a48a3bcdb1c7"
|
|
# /dev/sda2: PARTLABEL="ceph block" PARTUUID="e6ca3e1d-4702-4569-abfa-e285de328e9d"
|
|
#journal_collocation: false
|
|
|
|
|
|
# II. Second scenario: N journal devices for N OSDs
|
|
# Use 'true' for 'raw_multi_journal' to enable this scenario
|
|
# List devices under 'devices' variable above and
|
|
# write journal devices for those under 'raw_journal_devices'
|
|
# In the following example:
|
|
# * sdb and sdc will get sdf as a journal
|
|
# * sdd and sde will get sdg as a journal
|
|
|
|
# While starting you have 2 options:
|
|
# 1. Pre-allocate all the devices
|
|
# 2. Progressively add new devices
|
|
#raw_multi_journal: false
|
|
#raw_journal_devices:
|
|
# - /dev/sdf
|
|
# - /dev/sdf
|
|
# - /dev/sdg
|
|
# - /dev/sdg
|
|
#
|
|
# NOTE(leseb):
|
|
# On a containerized scenario we only support A SINGLE journal
|
|
# for all the OSDs on a given machine. If you don't, bad things will happen
|
|
# This is a limitation we plan to fix at some point.
|
|
#
|
|
#
|
|
# If osd_objectstore: bluestore is enabled, both rocksdb DB and WAL will be stored
|
|
# on a dedicated device. So the following will happen:
|
|
# - The devices listed in 'devices' will get 2 partitions, one for 'block' and one for 'data'.
|
|
# 'data' is only 100MB big and do not store any of your data, it's just a bunch of Ceph metadata.
|
|
# 'block' will store all your data.
|
|
# - The devices in 'raw_journal_devices' will get 1 partition for RocksDB DB, called 'block.db'
|
|
# and one for RocksDB WAL, called 'block.wal'
|
|
#
|
|
# By default raw_journal_devices will represent block.db
|
|
#
|
|
# Example of what you will get:
|
|
# [root@ceph-osd0 ~]# blkid /dev/sd*
|
|
# /dev/sda: PTTYPE="gpt"
|
|
# /dev/sda1: UUID="c6821801-2f21-4980-add0-b7fc8bd424d5" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="f2cc6fa8-5b41-4428-8d3f-6187453464d0"
|
|
# /dev/sda2: PARTLABEL="ceph block" PARTUUID="ea454807-983a-4cf2-899e-b2680643bc1c"
|
|
# /dev/sdb: PTTYPE="gpt"
|
|
# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="af5b2d74-4c08-42cf-be57-7248c739e217"
|
|
# /dev/sdb2: PARTLABEL="ceph block.wal" PARTUUID="af3f8327-9aa9-4c2b-a497-cf0fe96d126a"
|
|
#raw_journal_devices: []
|
|
|
|
|
|
# IV. This will partition disks for BlueStore
|
|
# To enable bluestore just set:
|
|
# osd_objectstore: bluestore
|
|
#
|
|
# If osd_objectstore: bluestore is enabled.
|
|
# By default, if bluestore_wal_devices is empty, it will get the content of raw_journal_devices.
|
|
# If set, then you will have a dedicated partition on a specific device (bluestore_wal_devices)
|
|
# for block.wal
|
|
#
|
|
# Example of what you will get:
|
|
# [root@ceph-osd0 ~]# blkid /dev/sd*
|
|
# /dev/sda: PTTYPE="gpt"
|
|
# /dev/sda1: UUID="39241ae9-d119-4335-96b3-0898da8f45ce" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="961e7313-bdb7-49e7-9ae7-077d65c4c669"
|
|
# /dev/sda2: PARTLABEL="ceph block" PARTUUID="bff8e54e-b780-4ece-aa16-3b2f2b8eb699"
|
|
# /dev/sdb: PTTYPE="gpt"
|
|
# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="0734f6b6-cc94-49e9-93de-ba7e1d5b79e3"
|
|
# /dev/sdc: PTTYPE="gpt"
|
|
# /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3"
|
|
#bluestore_wal_devices: "{{ raw_journal_devices }}"
|
|
|
|
|
|
# V. Encrypt osd data and/or journal devices with dm-crypt.
|
|
# Keys are stored into the monitors k/v store
|
|
# Use 'true' to enable this scenario
|
|
# Both journal and data are stored on the same dm-crypt encrypted device
|
|
#dmcrypt_journal_collocation: false
|
|
|
|
|
|
# VI. Encrypt osd data and/or journal devices with dm-crypt.
|
|
# Keys are stored into the monitors k/v store
|
|
# Use 'true' to enable this scenario
|
|
# Journal and osd data are separated, each with their own dm-crypt device
|
|
# You must use raw_journal_devices and set your journal devices
|
|
#dmcrypt_dedicated_journal: false
|
|
|
|
|
|
##########
|
|
# DOCKER #
|
|
##########
|
|
|
|
#ceph_config_keys: [] # DON'T TOUCH ME
|
|
|
|
# PREPARE DEVICE
|
|
# Make sure you only pass a single device to raw_journal_devices, otherwise this will fail horribly.
|
|
# This is why we use [0] in the example.
|
|
#
|
|
# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
|
|
#
|
|
# Examples:
|
|
# Journal collocated on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_FILESTORE=1
|
|
# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1
|
|
# Encrypted OSD on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
|
|
# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1
|
|
#
|
|
# Bluestore OSD collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1
|
|
# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }}
|
|
# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }}
|
|
# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
|
|
# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1
|
|
#
|
|
#
|
|
#ceph_osd_docker_devices: "{{ devices }}"
|
|
#ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
|
|
|
|
# ACTIVATE DEVICE
|
|
# Examples:
|
|
# Journal collocated or Dedicated journal on Filesore: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FILESTORE=1
|
|
# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
|
|
# Bluestore OSD: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
|
|
#
|
|
#ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }}
|
|
#ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
|
|
|