ceph-ansible/group_vars/osds.yml.sample

300 lines
10 KiB
Plaintext
Raw Normal View History

---
# Variables here are applicable to all host groups NOT roles
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# You can override default vars defined in defaults/main.yml here,
# but I would advice to use host or group vars instead
#raw_journal_devices: [] # backward compatibility with stable-2.2, will disappear in stable 3.1
#journal_collocation: False # backward compatibility with stable-2.2, will disappear in stable 3.1
#raw_multi_journal: False # backward compatibility with stable-2.2, will disappear in stable 3.1
#dmcrypt_journal_collocation: False # backward compatibility with stable-2.2, will disappear in stable 3.1
#dmcrypt_dedicated_journal: False # backward compatibility with stable-2.2, will disappear in stable 3.1
###########
# GENERAL #
###########
# Even though OSD nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on OSD nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
#copy_admin_key: false
####################
# OSD CRUSH LOCATION
####################
# /!\
#
# BE EXTREMELY CAREFUL WITH THIS OPTION
# DO NOT USE IT UNLESS YOU KNOW WHAT YOU ARE DOING
#
# /!\
#
# It is probably best to keep this option to 'false' as the default
# suggests it. This option should only be used while doing some complex
# CRUSH map. It allows you to force a specific location for a set of OSDs.
#
# The following options will build a ceph.conf with OSD sections
# Example:
# [osd.X]
# osd crush location = "root=location"
#
# This works with your inventory file
# To match the following 'osd_crush_location' option the inventory must look like:
#
# [osds]
# osd0 ceph_crush_root=foo ceph_crush_rack=bar
#crush_location: false
#osd_crush_location: "\"root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}\""
##############
# CEPH OPTIONS
##############
# Devices to be used as OSDs
# You can pre-provision disks that are not present yet.
# Ansible will just skip them. Newly added disk will be
# automatically configured during the next run.
#
# Declare devices to be used as OSDs
# All scenario(except 3rd) inherit from the following device declaration
#devices:
# - /dev/sdb
# - /dev/sdc
# - /dev/sdd
# - /dev/sde
#devices: []
#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
# You can use this option with First and Forth and Fifth OSDS scenario.
# Device discovery is based on the Ansible fact 'ansible_devices'
# which reports all the devices on a system. If chosen all the disks
# found will be passed to ceph-disk. You should not be worried on using
# this option since ceph-disk has a built-in check which looks for empty devices.
# Thus devices with existing partition tables will not be used.
#
#osd_auto_discovery: false
# Encrypt your OSD device using dmcrypt
# If set to True, no matter which osd_objecstore and osd_scenario you use the data will be encrypted
#dmcrypt: "{{ True if dmcrypt_journal_collocation or dmcrypt_dedicated_journal else False }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
# I. First scenario: collocated
#
# To enable this scenario do: osd_scenario: collocated
#
#
# If osd_objectstore: filestore is enabled both 'ceph data' and 'ceph journal' partitions
# will be stored on the same device.
#
# If osd_objectstore: bluestore is enabled 'ceph data', 'ceph block', 'ceph block.db', 'ceph block.wal' will be stored
# on the same device. The device will get 2 partitions:
# - One for 'data', called 'ceph data'
# - One for 'ceph block', 'ceph block.db', 'ceph block.wal' called 'ceph block'
#
# Example of what you will get:
# [root@ceph-osd0 ~]# blkid /dev/sda*
# /dev/sda: PTTYPE="gpt"
# /dev/sda1: UUID="9c43e346-dd6e-431f-92d8-cbed4ccb25f6" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="749c71c9-ed8f-4930-82a7-a48a3bcdb1c7"
# /dev/sda2: PARTLABEL="ceph block" PARTUUID="e6ca3e1d-4702-4569-abfa-e285de328e9d"
#
#osd_scenario: "{{ 'collocated' if journal_collocation or dmcrypt_journal_collocation else 'non-collocated' if raw_multi_journal or dmcrypt_dedicated_journal else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
#valid_osd_scenarios:
# - collocated
# - non-collocated
# - lvm
# II. Second scenario: non-collocated
#
# To enable this scenario do: osd_scenario: non-collocated
#
# If osd_objectstore: filestore is enabled 'ceph data' and 'ceph journal' partitions
# will be stored on different devices:
# - 'ceph data' will be stored on the device listed in 'devices'
# - 'ceph journal' will be stored on the device listed in 'dedicated_devices'
#
# Let's take an example, imagine 'devices' was declared like this:
#
# devices:
# - /dev/sda
# - /dev/sdb
# - /dev/sdc
# - /dev/sdd
#
# And 'dedicated_devices' was declared like this:
#
# dedicated_devices:
# - /dev/sdf
# - /dev/sdf
# - /dev/sdg
# - /dev/sdg
#
# This will result in the following mapping:
2017-10-25 16:39:08 +08:00
# - /dev/sda will have /dev/sdf1 as a journal
# - /dev/sdb will have /dev/sdf2 as a journal
# - /dev/sdc will have /dev/sdg1 as a journal
# - /dev/sdd will have /dev/sdg2 as a journal
#
#
# If osd_objectstore: bluestore is enabled, both 'ceph block.db' and 'ceph block.wal' partitions will be stored
# on a dedicated device.
#
# So the following will happen:
# - The devices listed in 'devices' will get 2 partitions, one for 'block' and one for 'data'.
# 'data' is only 100MB big and do not store any of your data, it's just a bunch of Ceph metadata.
# 'block' will store all your actual data.
# - The devices in 'dedicated_devices' will get 1 partition for RocksDB DB, called 'block.db'
# and one for RocksDB WAL, called 'block.wal'
#
# By default dedicated_devices will represent block.db
#
# Example of what you will get:
# [root@ceph-osd0 ~]# blkid /dev/sd*
# /dev/sda: PTTYPE="gpt"
# /dev/sda1: UUID="c6821801-2f21-4980-add0-b7fc8bd424d5" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="f2cc6fa8-5b41-4428-8d3f-6187453464d0"
# /dev/sda2: PARTLABEL="ceph block" PARTUUID="ea454807-983a-4cf2-899e-b2680643bc1c"
# /dev/sdb: PTTYPE="gpt"
# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="af5b2d74-4c08-42cf-be57-7248c739e217"
# /dev/sdb2: PARTLABEL="ceph block.wal" PARTUUID="af3f8327-9aa9-4c2b-a497-cf0fe96d126a"
#dedicated_devices: "{{ raw_journal_devices if raw_multi_journal or dmcrypt_dedicated_journal else [] }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
# More device granularity for Bluestore
#
# ONLY if osd_objectstore: bluestore is enabled.
#
# By default, if 'bluestore_wal_devices' is empty, it will get the content of 'dedicated_devices'.
# If set, then you will have a dedicated partition on a specific device for block.wal.
#
# Example of what you will get:
# [root@ceph-osd0 ~]# blkid /dev/sd*
# /dev/sda: PTTYPE="gpt"
# /dev/sda1: UUID="39241ae9-d119-4335-96b3-0898da8f45ce" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="961e7313-bdb7-49e7-9ae7-077d65c4c669"
# /dev/sda2: PARTLABEL="ceph block" PARTUUID="bff8e54e-b780-4ece-aa16-3b2f2b8eb699"
# /dev/sdb: PTTYPE="gpt"
# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="0734f6b6-cc94-49e9-93de-ba7e1d5b79e3"
# /dev/sdc: PTTYPE="gpt"
# /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3"
#bluestore_wal_devices: "{{ dedicated_devices }}"
# III. Use ceph-volume to create OSDs from logical volumes.
# Use 'osd_scenario: lvm' to enable this scenario.
# when using lvm, not collocated journals.
# lvm_volumes is a list of dictionaries.
#
# Filestore: Each dictionary must contain a data, journal and vg_name key. Any
# logical volume or logical group used must be a name and not a path. data
# can be a logical volume, device or partition. journal can be either a lv or partition.
# You can not use the same journal for many data lvs.
# data_vg must be the volume group name of the data lv, only applicable when data is an lv.
# journal_vg is optional and must be the volume group name of the journal lv, if applicable.
# For example:
# lvm_volumes:
# - data: data-lv1
# data_vg: vg1
# journal: journal-lv1
# journal_vg: vg2
# crush_device_class: foo
# - data: data-lv2
# journal: /dev/sda1
# data_vg: vg1
# - data: data-lv3
# journal: /dev/sdb1
# data_vg: vg2
# - data: /dev/sda
# journal: /dev/sdb1
# - data: /dev/sda1
# journal: /dev/sdb1
#
# Bluestore: Each dictionary must contain at least data. When defining wal or
# db, it must have both the lv name and vg group (db and wal are not required).
# This allows for four combinations: just data, data and wal, data and wal and
# db, data and db.
# For example:
# lvm_volumes:
# - data: data-lv1
# data_vg: vg1
# wal: wal-lv1
# wal_vg: vg1
# crush_device_class: foo
# - data: data-lv2
# db: db-lv2
# db_vg: vg2
# - data: data-lv3
# wal: wal-lv1
# wal_vg: vg3
# db: db-lv3
# db_vg: vg3
# - data: data-lv4
# data_vg: vg4
# - data: /dev/sda
# - data: /dev/sdb1
#lvm_volumes: []
##########
# DOCKER #
##########
#ceph_config_keys: [] # DON'T TOUCH ME
# Resource limitation
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_osd_docker_extra_env' variable.
#ceph_osd_docker_memory_limit: 3g
#ceph_osd_docker_cpu_limit: 1
# The next two variables are undefined, and thus, unused by default.
# If `lscpu | grep NUMA` returned the following:
# NUMA node0 CPU(s): 0,2,4,6,8,10,12,14,16
# NUMA node1 CPU(s): 1,3,5,7,9,11,13,15,17
# then, the following would run the OSD on the first NUMA node only.
#ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
#ceph_osd_docker_cpuset_mems: "0"
# PREPARE DEVICE
#
# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
#
#ceph_osd_docker_devices: "{{ devices }}"
#ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}
# ACTIVATE DEVICE
#
#ceph_osd_docker_extra_env:
#ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
###########
# SYSTEMD #
###########
# ceph_osd_systemd_overrides will override the systemd settings
# for the ceph-osd services.
# For example,to set "PrivateDevices=false" you can specify:
#ceph_osd_systemd_overrides:
# Service:
# PrivateDevices: False