ceph-ansible/tests/functional/all_daemons/group_vars/all

39 lines
1.2 KiB
Plaintext

---
ceph_origin: repository
ceph_repository: community
public_network: "192.168.1.0/24"
cluster_network: "192.168.2.0/24"
radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_conf_overrides:
global:
auth_allow_insecure_global_id_reclaim: false
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
openstack_config: True
openstack_glance_pool:
name: "images"
size: 1
application: rbd
target_size_ratio: 0.2
openstack_cinder_pool:
name: "volumes"
rule_name: "HDD"
size: 1
application: rbd
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
mds_max_mds: 2
dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.io
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
grafana_server_group_name: ceph_monitoring