mirror of https://github.com/ceph/ceph-ansible.git
osd: drop openstack related tasks
All of this should be addressed in custom separate playbooks if needed. Signed-off-by: Guillaume Abrioux <gabrioux@ibm.com>pull/7494/head
parent
05a1465416
commit
9c467e41b3
|
@ -554,64 +554,6 @@ dummy:
|
||||||
#docker_pull_timeout: "300s"
|
#docker_pull_timeout: "300s"
|
||||||
|
|
||||||
|
|
||||||
#############
|
|
||||||
# OPENSTACK #
|
|
||||||
#############
|
|
||||||
#openstack_config: false
|
|
||||||
# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
|
|
||||||
# `pg_num` and `pgp_num` keys will be ignored, even if specified.
|
|
||||||
# eg:
|
|
||||||
# openstack_glance_pool:
|
|
||||||
# name: "images"
|
|
||||||
# rule_name: "my_replicated_rule"
|
|
||||||
# application: "rbd"
|
|
||||||
# pg_autoscale_mode: false
|
|
||||||
# pg_num: 16
|
|
||||||
# pgp_num: 16
|
|
||||||
# target_size_ratio: 0.2
|
|
||||||
#openstack_glance_pool:
|
|
||||||
# name: "images"
|
|
||||||
# application: "rbd"
|
|
||||||
#openstack_cinder_pool:
|
|
||||||
# name: "volumes"
|
|
||||||
# application: "rbd"
|
|
||||||
#openstack_nova_pool:
|
|
||||||
# name: "vms"
|
|
||||||
# application: "rbd"
|
|
||||||
#openstack_cinder_backup_pool:
|
|
||||||
# name: "backups"
|
|
||||||
# application: "rbd"
|
|
||||||
#openstack_gnocchi_pool:
|
|
||||||
# name: "metrics"
|
|
||||||
# application: "rbd"
|
|
||||||
#openstack_cephfs_data_pool:
|
|
||||||
# name: "manila_data"
|
|
||||||
# application: "cephfs"
|
|
||||||
#openstack_cephfs_metadata_pool:
|
|
||||||
# name: "manila_metadata"
|
|
||||||
# application: "cephfs"
|
|
||||||
#openstack_pools:
|
|
||||||
# - "{{ openstack_glance_pool }}"
|
|
||||||
# - "{{ openstack_cinder_pool }}"
|
|
||||||
# - "{{ openstack_nova_pool }}"
|
|
||||||
# - "{{ openstack_cinder_backup_pool }}"
|
|
||||||
# - "{{ openstack_gnocchi_pool }}"
|
|
||||||
# - "{{ openstack_cephfs_data_pool }}"
|
|
||||||
# - "{{ openstack_cephfs_metadata_pool }}"
|
|
||||||
|
|
||||||
|
|
||||||
# The value for 'key' can be a pre-generated key,
|
|
||||||
# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
|
|
||||||
# By default, keys will be auto-generated.
|
|
||||||
#
|
|
||||||
#openstack_keys:
|
|
||||||
# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
|
|
||||||
# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
|
|
||||||
# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
|
|
||||||
# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
|
|
||||||
# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
|
|
||||||
|
|
||||||
|
|
||||||
#############
|
#############
|
||||||
# DASHBOARD #
|
# DASHBOARD #
|
||||||
#############
|
#############
|
||||||
|
|
|
@ -546,64 +546,6 @@ docker_pull_retry: 3
|
||||||
docker_pull_timeout: "300s"
|
docker_pull_timeout: "300s"
|
||||||
|
|
||||||
|
|
||||||
#############
|
|
||||||
# OPENSTACK #
|
|
||||||
#############
|
|
||||||
openstack_config: false
|
|
||||||
# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
|
|
||||||
# `pg_num` and `pgp_num` keys will be ignored, even if specified.
|
|
||||||
# eg:
|
|
||||||
# openstack_glance_pool:
|
|
||||||
# name: "images"
|
|
||||||
# rule_name: "my_replicated_rule"
|
|
||||||
# application: "rbd"
|
|
||||||
# pg_autoscale_mode: false
|
|
||||||
# pg_num: 16
|
|
||||||
# pgp_num: 16
|
|
||||||
# target_size_ratio: 0.2
|
|
||||||
openstack_glance_pool:
|
|
||||||
name: "images"
|
|
||||||
application: "rbd"
|
|
||||||
openstack_cinder_pool:
|
|
||||||
name: "volumes"
|
|
||||||
application: "rbd"
|
|
||||||
openstack_nova_pool:
|
|
||||||
name: "vms"
|
|
||||||
application: "rbd"
|
|
||||||
openstack_cinder_backup_pool:
|
|
||||||
name: "backups"
|
|
||||||
application: "rbd"
|
|
||||||
openstack_gnocchi_pool:
|
|
||||||
name: "metrics"
|
|
||||||
application: "rbd"
|
|
||||||
openstack_cephfs_data_pool:
|
|
||||||
name: "manila_data"
|
|
||||||
application: "cephfs"
|
|
||||||
openstack_cephfs_metadata_pool:
|
|
||||||
name: "manila_metadata"
|
|
||||||
application: "cephfs"
|
|
||||||
openstack_pools:
|
|
||||||
- "{{ openstack_glance_pool }}"
|
|
||||||
- "{{ openstack_cinder_pool }}"
|
|
||||||
- "{{ openstack_nova_pool }}"
|
|
||||||
- "{{ openstack_cinder_backup_pool }}"
|
|
||||||
- "{{ openstack_gnocchi_pool }}"
|
|
||||||
- "{{ openstack_cephfs_data_pool }}"
|
|
||||||
- "{{ openstack_cephfs_metadata_pool }}"
|
|
||||||
|
|
||||||
|
|
||||||
# The value for 'key' can be a pre-generated key,
|
|
||||||
# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
|
|
||||||
# By default, keys will be auto-generated.
|
|
||||||
#
|
|
||||||
openstack_keys:
|
|
||||||
- { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
|
|
||||||
- { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
|
|
||||||
- { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
|
|
||||||
- { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
|
|
||||||
- { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
|
|
||||||
|
|
||||||
|
|
||||||
#############
|
#############
|
||||||
# DASHBOARD #
|
# DASHBOARD #
|
||||||
#############
|
#############
|
||||||
|
|
|
@ -99,13 +99,3 @@
|
||||||
ansible.builtin.include_tasks: crush_rules.yml
|
ansible.builtin.include_tasks: crush_rules.yml
|
||||||
when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(crush_rule_config) | bool
|
when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(crush_rule_config) | bool
|
||||||
tags: wait_all_osds_up
|
tags: wait_all_osds_up
|
||||||
|
|
||||||
# Create the pools listed in openstack_pools
|
|
||||||
- name: Include openstack_config.yml
|
|
||||||
ansible.builtin.include_tasks: openstack_config.yml
|
|
||||||
when:
|
|
||||||
- not add_osd | bool
|
|
||||||
- not rolling_update | default(False) | bool
|
|
||||||
- openstack_config | bool
|
|
||||||
- inventory_hostname == groups[osd_group_name] | last
|
|
||||||
tags: wait_all_osds_up
|
|
||||||
|
|
|
@ -1,68 +0,0 @@
|
||||||
---
|
|
||||||
- name: Pool related tasks
|
|
||||||
block:
|
|
||||||
- name: Create openstack pool(s)
|
|
||||||
ceph_pool:
|
|
||||||
name: "{{ item.name }}"
|
|
||||||
cluster: "{{ cluster }}"
|
|
||||||
pg_num: "{{ item.pg_num | default(omit) }}"
|
|
||||||
pgp_num: "{{ item.pgp_num | default(omit) }}"
|
|
||||||
size: "{{ item.size | default(omit) }}"
|
|
||||||
min_size: "{{ item.min_size | default(omit) }}"
|
|
||||||
pool_type: "{{ item.type | default('replicated') }}"
|
|
||||||
rule_name: "{{ item.rule_name | default(omit) }}"
|
|
||||||
erasure_profile: "{{ item.erasure_profile | default(omit) }}"
|
|
||||||
pg_autoscale_mode: "{{ item.pg_autoscale_mode | default(omit) }}"
|
|
||||||
target_size_ratio: "{{ item.target_size_ratio | default(omit) }}"
|
|
||||||
application: "{{ item.application | default(omit) }}"
|
|
||||||
with_items: "{{ openstack_pools }}"
|
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
|
||||||
environment:
|
|
||||||
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
|
||||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
|
||||||
|
|
||||||
- name: Create openstack cephx key(s)
|
|
||||||
when:
|
|
||||||
- cephx | bool
|
|
||||||
- openstack_config | bool
|
|
||||||
block:
|
|
||||||
- name: Generate keys
|
|
||||||
ceph_key:
|
|
||||||
name: "{{ item.name }}"
|
|
||||||
caps: "{{ item.caps }}"
|
|
||||||
secret: "{{ item.key | default('') }}"
|
|
||||||
cluster: "{{ cluster }}"
|
|
||||||
mode: "{{ item.mode | default(ceph_keyring_permissions) }}"
|
|
||||||
environment:
|
|
||||||
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
|
||||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
|
||||||
with_items: "{{ openstack_keys }}"
|
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
|
||||||
no_log: "{{ no_log_on_ceph_key_tasks }}"
|
|
||||||
|
|
||||||
- name: Get keys from monitors
|
|
||||||
ceph_key:
|
|
||||||
name: "{{ item.name }}"
|
|
||||||
cluster: "{{ cluster }}"
|
|
||||||
output_format: plain
|
|
||||||
state: info
|
|
||||||
environment:
|
|
||||||
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
|
||||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
|
||||||
register: _osp_keys
|
|
||||||
with_items: "{{ openstack_keys }}"
|
|
||||||
delegate_to: "{{ groups.get(mon_group_name)[0] }}"
|
|
||||||
no_log: "{{ no_log_on_ceph_key_tasks }}"
|
|
||||||
|
|
||||||
- name: Copy ceph key(s) if needed
|
|
||||||
ansible.builtin.copy:
|
|
||||||
dest: "/etc/ceph/{{ cluster }}.{{ item.0.item.name }}.keyring"
|
|
||||||
content: "{{ item.0.stdout + '\n' }}"
|
|
||||||
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
|
|
||||||
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
|
|
||||||
mode: "{{ item.0.item.mode | default(ceph_keyring_permissions) }}"
|
|
||||||
with_nested:
|
|
||||||
- "{{ _osp_keys.results }}"
|
|
||||||
- "{{ groups[mon_group_name] }}"
|
|
||||||
delegate_to: "{{ item.1 }}"
|
|
||||||
no_log: "{{ no_log_on_ceph_key_tasks }}"
|
|
|
@ -3,7 +3,6 @@
|
||||||
ansible.builtin.fail:
|
ansible.builtin.fail:
|
||||||
msg: "You must set a target_size_ratio value on following pool: {{ item.name }}."
|
msg: "You must set a target_size_ratio value on following pool: {{ item.name }}."
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ openstack_pools | default([]) }}"
|
|
||||||
- "{{ cephfs_pools | default([]) }}"
|
- "{{ cephfs_pools | default([]) }}"
|
||||||
- "{{ pools | default([]) }}"
|
- "{{ pools | default([]) }}"
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -11,7 +11,6 @@ public_network: "192.168.19.0/24"
|
||||||
cluster_network: "192.168.20.0/24"
|
cluster_network: "192.168.20.0/24"
|
||||||
rgw_override_bucket_index_max_shards: 16
|
rgw_override_bucket_index_max_shards: 16
|
||||||
rgw_bucket_default_quota_max_objects: 1638400
|
rgw_bucket_default_quota_max_objects: 1638400
|
||||||
openstack_config: True
|
|
||||||
dashboard_enabled: false
|
dashboard_enabled: false
|
||||||
ceph_conf_overrides:
|
ceph_conf_overrides:
|
||||||
global:
|
global:
|
||||||
|
|
|
@ -4,7 +4,6 @@ ceph_origin: repository
|
||||||
ceph_repository: community
|
ceph_repository: community
|
||||||
radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
|
radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
|
||||||
ceph_mon_docker_subnet: "{{ public_network }}"
|
ceph_mon_docker_subnet: "{{ public_network }}"
|
||||||
openstack_config: True
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
||||||
public_network: "192.168.17.0/24"
|
public_network: "192.168.17.0/24"
|
||||||
cluster_network: "192.168.18.0/24"
|
cluster_network: "192.168.18.0/24"
|
||||||
|
|
|
@ -17,18 +17,6 @@ ceph_conf_overrides:
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
mon_max_pg_per_osd: 300
|
mon_max_pg_per_osd: 300
|
||||||
openstack_config: True
|
|
||||||
openstack_glance_pool:
|
|
||||||
name: "images"
|
|
||||||
size: 1
|
|
||||||
target_size_ratio: 0.2
|
|
||||||
openstack_cinder_pool:
|
|
||||||
name: "volumes"
|
|
||||||
rule_name: "HDD"
|
|
||||||
size: 1
|
|
||||||
openstack_pools:
|
|
||||||
- "{{ openstack_glance_pool }}"
|
|
||||||
- "{{ openstack_cinder_pool }}"
|
|
||||||
docker_pull_timeout: 600s
|
docker_pull_timeout: 600s
|
||||||
handler_health_mon_check_delay: 10
|
handler_health_mon_check_delay: 10
|
||||||
handler_health_osd_check_delay: 10
|
handler_health_osd_check_delay: 10
|
||||||
|
|
|
@ -11,20 +11,6 @@ ceph_conf_overrides:
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
mon_max_pg_per_osd: 300
|
mon_max_pg_per_osd: 300
|
||||||
openstack_config: True
|
|
||||||
openstack_glance_pool:
|
|
||||||
name: "images"
|
|
||||||
size: 1
|
|
||||||
application: rbd
|
|
||||||
target_size_ratio: 0.2
|
|
||||||
openstack_cinder_pool:
|
|
||||||
name: "volumes"
|
|
||||||
rule_name: "HDD"
|
|
||||||
size: 1
|
|
||||||
application: rbd
|
|
||||||
openstack_pools:
|
|
||||||
- "{{ openstack_glance_pool }}"
|
|
||||||
- "{{ openstack_cinder_pool }}"
|
|
||||||
handler_health_mon_check_delay: 10
|
handler_health_mon_check_delay: 10
|
||||||
handler_health_osd_check_delay: 10
|
handler_health_osd_check_delay: 10
|
||||||
mds_max_mds: 2
|
mds_max_mds: 2
|
||||||
|
|
|
@ -18,18 +18,6 @@ ceph_conf_overrides:
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
mon_max_pg_per_osd: 300
|
mon_max_pg_per_osd: 300
|
||||||
openstack_config: True
|
|
||||||
openstack_glance_pool:
|
|
||||||
name: "images"
|
|
||||||
size: 1
|
|
||||||
target_size_ratio: 0.2
|
|
||||||
openstack_cinder_pool:
|
|
||||||
name: "volumes"
|
|
||||||
rule_name: "HDD"
|
|
||||||
size: 1
|
|
||||||
openstack_pools:
|
|
||||||
- "{{ openstack_glance_pool }}"
|
|
||||||
- "{{ openstack_cinder_pool }}"
|
|
||||||
docker_pull_timeout: 600s
|
docker_pull_timeout: 600s
|
||||||
handler_health_mon_check_delay: 10
|
handler_health_mon_check_delay: 10
|
||||||
handler_health_osd_check_delay: 10
|
handler_health_osd_check_delay: 10
|
||||||
|
|
|
@ -12,20 +12,6 @@ ceph_conf_overrides:
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
mon_max_pg_per_osd: 300
|
mon_max_pg_per_osd: 300
|
||||||
openstack_config: True
|
|
||||||
openstack_glance_pool:
|
|
||||||
name: "images"
|
|
||||||
size: 1
|
|
||||||
application: rbd
|
|
||||||
target_size_ratio: 0.2
|
|
||||||
openstack_cinder_pool:
|
|
||||||
name: "volumes"
|
|
||||||
rule_name: "HDD"
|
|
||||||
size: 1
|
|
||||||
application: rbd
|
|
||||||
openstack_pools:
|
|
||||||
- "{{ openstack_glance_pool }}"
|
|
||||||
- "{{ openstack_cinder_pool }}"
|
|
||||||
handler_health_mon_check_delay: 10
|
handler_health_mon_check_delay: 10
|
||||||
handler_health_osd_check_delay: 10
|
handler_health_osd_check_delay: 10
|
||||||
mds_max_mds: 2
|
mds_max_mds: 2
|
||||||
|
|
|
@ -16,18 +16,6 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
openstack_config: False
|
|
||||||
openstack_glance_pool:
|
|
||||||
name: "images"
|
|
||||||
rule_name: "HDD"
|
|
||||||
size: 1
|
|
||||||
openstack_cinder_pool:
|
|
||||||
name: "volumes"
|
|
||||||
rule_name: "HDD"
|
|
||||||
size: 1
|
|
||||||
openstack_pools:
|
|
||||||
- "{{ openstack_glance_pool }}"
|
|
||||||
- "{{ openstack_cinder_pool }}"
|
|
||||||
handler_health_mon_check_delay: 10
|
handler_health_mon_check_delay: 10
|
||||||
handler_health_osd_check_delay: 10
|
handler_health_osd_check_delay: 10
|
||||||
dashboard_admin_password: $sX!cD$rYU6qR^B!
|
dashboard_admin_password: $sX!cD$rYU6qR^B!
|
||||||
|
|
|
@ -11,7 +11,6 @@ public_network: "192.168.31.0/24"
|
||||||
cluster_network: "192.168.32.0/24"
|
cluster_network: "192.168.32.0/24"
|
||||||
rgw_override_bucket_index_max_shards: 16
|
rgw_override_bucket_index_max_shards: 16
|
||||||
rgw_bucket_default_quota_max_objects: 1638400
|
rgw_bucket_default_quota_max_objects: 1638400
|
||||||
openstack_config: True
|
|
||||||
dashboard_enabled: false
|
dashboard_enabled: false
|
||||||
ceph_conf_overrides:
|
ceph_conf_overrides:
|
||||||
global:
|
global:
|
||||||
|
|
|
@ -4,7 +4,6 @@ ceph_origin: repository
|
||||||
ceph_repository: community
|
ceph_repository: community
|
||||||
radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
|
radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
|
||||||
ceph_mon_docker_subnet: "{{ public_network }}"
|
ceph_mon_docker_subnet: "{{ public_network }}"
|
||||||
openstack_config: True
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
||||||
public_network: "192.168.31.0/24"
|
public_network: "192.168.31.0/24"
|
||||||
cluster_network: "192.168.32.0/24"
|
cluster_network: "192.168.32.0/24"
|
||||||
|
|
|
@ -21,20 +21,6 @@ ceph_conf_overrides:
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
||||||
handler_health_mon_check_delay: 10
|
handler_health_mon_check_delay: 10
|
||||||
handler_health_osd_check_delay: 10
|
handler_health_osd_check_delay: 10
|
||||||
openstack_config: True
|
|
||||||
openstack_glance_pool:
|
|
||||||
name: "images"
|
|
||||||
type: 3
|
|
||||||
size: 1
|
|
||||||
application: rbd
|
|
||||||
target_size_ratio: 0.2
|
|
||||||
openstack_cinder_pool:
|
|
||||||
name: "volumes"
|
|
||||||
size: 1
|
|
||||||
application: rbd
|
|
||||||
openstack_pools:
|
|
||||||
- "{{ openstack_glance_pool }}"
|
|
||||||
- "{{ openstack_cinder_pool }}"
|
|
||||||
ceph_docker_registry: quay.io
|
ceph_docker_registry: quay.io
|
||||||
ceph_docker_image: ceph/daemon-base
|
ceph_docker_image: ceph/daemon-base
|
||||||
ceph_docker_image_tag: latest-main
|
ceph_docker_image_tag: latest-main
|
|
@ -16,17 +16,4 @@ ceph_conf_overrides:
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
||||||
handler_health_mon_check_delay: 10
|
handler_health_mon_check_delay: 10
|
||||||
handler_health_osd_check_delay: 10
|
handler_health_osd_check_delay: 10
|
||||||
openstack_config: True
|
|
||||||
openstack_glance_pool:
|
|
||||||
name: "images"
|
|
||||||
type: 3
|
|
||||||
size: 1
|
|
||||||
application: rbd
|
|
||||||
target_size_ratio: 0.2
|
|
||||||
openstack_cinder_pool:
|
|
||||||
name: "volumes"
|
|
||||||
size: 1
|
|
||||||
application: rbd
|
|
||||||
openstack_pools:
|
|
||||||
- "{{ openstack_glance_pool }}"
|
|
||||||
- "{{ openstack_cinder_pool }}"
|
|
||||||
|
|
|
@ -15,18 +15,6 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
openstack_config: True
|
|
||||||
openstack_glance_pool:
|
|
||||||
name: "images"
|
|
||||||
rule_name: "HDD"
|
|
||||||
size: 1
|
|
||||||
openstack_cinder_pool:
|
|
||||||
name: "volumes"
|
|
||||||
rule_name: "HDD"
|
|
||||||
size: 1
|
|
||||||
openstack_pools:
|
|
||||||
- "{{ openstack_glance_pool }}"
|
|
||||||
- "{{ openstack_cinder_pool }}"
|
|
||||||
handler_health_mon_check_delay: 10
|
handler_health_mon_check_delay: 10
|
||||||
handler_health_osd_check_delay: 10
|
handler_health_osd_check_delay: 10
|
||||||
dashboard_admin_password: $sX!cD$rYU6qR^B!
|
dashboard_admin_password: $sX!cD$rYU6qR^B!
|
||||||
|
|
|
@ -12,7 +12,6 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
openstack_config: False
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
||||||
copy_admin_key: True
|
copy_admin_key: True
|
||||||
ceph_docker_registry: quay.io
|
ceph_docker_registry: quay.io
|
||||||
|
|
|
@ -12,7 +12,6 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
openstack_config: False
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
||||||
ceph_docker_registry: quay.io
|
ceph_docker_registry: quay.io
|
||||||
ceph_docker_image: ceph/daemon-base
|
ceph_docker_image: ceph/daemon-base
|
||||||
|
|
|
@ -12,7 +12,6 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
openstack_config: False
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
||||||
ceph_docker_registry: quay.io
|
ceph_docker_registry: quay.io
|
||||||
ceph_docker_image: ceph/daemon-base
|
ceph_docker_image: ceph/daemon-base
|
||||||
|
|
|
@ -12,7 +12,6 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
openstack_config: False
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
||||||
copy_admin_key: True
|
copy_admin_key: True
|
||||||
ceph_docker_registry: quay.io
|
ceph_docker_registry: quay.io
|
||||||
|
|
|
@ -6,6 +6,5 @@ cluster_network: "192.168.72.0/24"
|
||||||
ceph_conf_overrides:
|
ceph_conf_overrides:
|
||||||
global:
|
global:
|
||||||
osd_pool_default_size: 3
|
osd_pool_default_size: 3
|
||||||
openstack_config: False
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
||||||
copy_admin_key: True
|
copy_admin_key: True
|
|
@ -11,7 +11,6 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
openstack_config: False
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
||||||
copy_admin_key: True
|
copy_admin_key: True
|
||||||
ceph_docker_registry: quay.io
|
ceph_docker_registry: quay.io
|
||||||
|
|
|
@ -13,7 +13,6 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
openstack_config: False
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
||||||
copy_admin_key: True
|
copy_admin_key: True
|
||||||
ceph_docker_registry: quay.io
|
ceph_docker_registry: quay.io
|
||||||
|
|
|
@ -17,7 +17,6 @@ ceph_conf_overrides:
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
mon_max_pg_per_osd: 300
|
mon_max_pg_per_osd: 300
|
||||||
openstack_config: false
|
|
||||||
docker_pull_timeout: 600s
|
docker_pull_timeout: 600s
|
||||||
handler_health_mon_check_delay: 10
|
handler_health_mon_check_delay: 10
|
||||||
handler_health_osd_check_delay: 10
|
handler_health_osd_check_delay: 10
|
||||||
|
|
|
@ -11,7 +11,6 @@ ceph_conf_overrides:
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
mon_max_pg_per_osd: 300
|
mon_max_pg_per_osd: 300
|
||||||
openstack_config: false
|
|
||||||
handler_health_mon_check_delay: 10
|
handler_health_mon_check_delay: 10
|
||||||
handler_health_osd_check_delay: 10
|
handler_health_osd_check_delay: 10
|
||||||
mds_max_mds: 2
|
mds_max_mds: 2
|
||||||
|
|
Loading…
Reference in New Issue