--- # Variables here are applicable to all host groups NOT roles # This sample file generated by generate_group_vars_sample.sh # Dummy variable to avoid error because ansible does not recognize the # file as a good configuration file when no variable in it. dummy: # You can override vars by using host or group vars ########### # GENERAL # ########### #mon_group_name: mons # ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT #monitor_secret: "{{ monitor_keyring.stdout }}" #admin_secret: 'admin_secret' # Secure your cluster # This will set the following flags on all the pools: # * nosizechange # * nopgchange # * nodelete #secure_cluster: false #secure_cluster_flags: # - nopgchange # - nodelete # - nosizechange # Enable the Calamari-backed REST API on a Monitor #calamari: false # Enable debugging for Calamari #calamari_debug: false ########## # CEPHFS # ########## #cephfs: cephfs # name of the ceph filesystem #cephfs_data: cephfs_data # name of the data pool for a given filesystem #cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem #cephfs_pools: # - { name: "{{ cephfs_data }}", pgs: "" } # - { name: "{{ cephfs_metadata }}", pgs: "" } ############### # CRUSH RULES # ############### #crush_rule_config: false #crush_rule_hdd: # name: HDD # root: HDD # type: host # default: false #crush_rule_ssd: # name: SSD # root: SSD # type: host # default: false #crush_rules: # - "{{ crush_rule_hdd }}" # - "{{ crush_rule_ssd }}" # Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }} # and will move hosts into them which might lead to significant data movement in the cluster! # # In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so: # # [osds] # ceph-osd-01 osd_crush_location="{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }" # # Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host) #create_crush_tree: false ############# # OPENSTACK # ############# #openstack_config: false #openstack_glance_pool: # name: images # pg_num: "{{ osd_pool_default_pg_num }}" # rule_name: "" #openstack_cinder_pool: # name: volumes # pg_num: "{{ osd_pool_default_pg_num }}" # rule_name: "" #openstack_nova_pool: # name: vms # pg_num: "{{ osd_pool_default_pg_num }}" # rule_name: "" #openstack_cinder_backup_pool: # name: backups # pg_num: "{{ osd_pool_default_pg_num }}" # rule_name: "" #openstack_gnocchi_pool: # name: metrics # pg_num: "{{ osd_pool_default_pg_num }}" # rule_name: "" #openstack_pools: # - "{{ openstack_glance_pool }}" # - "{{ openstack_cinder_pool }}" # - "{{ openstack_nova_pool }}" # - "{{ openstack_cinder_backup_pool }}" # - "{{ openstack_gnocchi_pool }}" # The value for 'key' can be a pre-generated key, # e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ==" # By default, keys will be auto-generated. # # To have have ansible setfacl the generated key, set the acls var like so: # acls: ["u:nova:r--", "u:cinder:r--", "u:glance:r--", "u:gnocchi:r--"] #openstack_keys: # - { name: client.glance, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}", mode: "0600", acls: [] } # - { name: client.cinder, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rx pool={{ openstack_glance_pool.name }}", mode: "0600", acls: [] } # - { name: client.cinder-backup, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_backup_pool.name }}", mode: "0600", acls: [] } # - { name: client.gnocchi, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_gnocchi_pool.name }}", mode: "0600", acls: [] } # - { name: client.openstack, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_cinder_backup_pool.name }}", mode: "0600", acls: [] } ########## # DOCKER # ########## # Resource limitation # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations # These options can be passed using the 'ceph_mon_docker_extra_env' variable. #ceph_mon_docker_memory_limit: 1g #ceph_mon_docker_cpu_limit: 1 # Use this variable to add extra env configuration to run your mon container. # If you want to set a custom admin keyring you can set this variable like following: # ceph_mon_docker_extra_env: -e ADMIN_SECRET={{ admin_secret }} #ceph_mon_docker_extra_env: #mon_docker_privileged: false #mon_docker_net_host: true #ceph_config_keys: [] # DON'T TOUCH ME ########### # SYSTEMD # ########### # ceph_mon_systemd_overrides will override the systemd settings # for the ceph-mon services. # For example,to set "PrivateDevices=false" you can specify: #ceph_mon_systemd_overrides: # Service: # PrivateDevices: False