osd: add pg autoscaler support

This commit adds the pg autoscaler support.

The structure for pool definition has now two additional attributes
`pg_autoscale_mode` and `target_size_ratio`, eg:

```
test:
  name: "test"
  pg_num: "{{ osd_pool_default_pg_num }}"
  pgp_num: "{{ osd_pool_default_pg_num }}"
  rule_name: "replicated_rule"
  application: "rbd"
  type: 1
  erasure_profile: ""
  expected_num_objects: ""
  size: "{{ osd_pool_default_size }}"
  min_size: "{{ osd_pool_default_min_size }}"
  pg_autoscale_mode: False
  target_size_ratio": 0.1
```

when `pg_autoscale_mode` is `True` user has to set a decent value in
`target_size_ratio`.

Given that it's a new feature, it's still disabled by default.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1782253

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 47adc2bb08)
pull/5133/head
Guillaume Abrioux 2020-02-28 16:03:15 +01:00
parent ae06d684b8
commit 98783a17b3
9 changed files with 212 additions and 15 deletions

View File

@ -332,6 +332,22 @@ dummy:
##########
# CEPHFS #
##########
# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
# `pg_num` and `pgp_num` keys will be ignored, even if specified.
# eg:
# cephfs_data_pool:
# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
# application: "cephfs"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
# target_size_ratio: 0.2
#cephfs: cephfs # name of the ceph filesystem
#cephfs_data_pool:
# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
@ -344,6 +360,7 @@ dummy:
# application: "cephfs"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#cephfs_metadata_pool:
# name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -355,6 +372,7 @@ dummy:
# application: "cephfs"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#cephfs_pools:
# - "{{ cephfs_data_pool }}"
# - "{{ cephfs_metadata_pool }}"
@ -600,6 +618,9 @@ dummy:
# OPENSTACK #
#############
#openstack_config: false
# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
# `pg_num` and `pgp_num` keys will be ignored, even if specified.
# eg:
# openstack_glance_pool:
# name: "images"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -611,6 +632,20 @@ dummy:
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
# target_size_ratio: 0.2
#openstack_glance_pool:
# name: "images"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#openstack_cinder_pool:
# name: "volumes"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -622,6 +657,7 @@ dummy:
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#openstack_nova_pool:
# name: "vms"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -633,6 +669,7 @@ dummy:
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#openstack_cinder_backup_pool:
# name: "backups"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -644,6 +681,7 @@ dummy:
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#openstack_gnocchi_pool:
# name: "metrics"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -655,6 +693,7 @@ dummy:
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#openstack_cephfs_data_pool:
# name: "manila_data"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -666,6 +705,7 @@ dummy:
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#openstack_cephfs_metadata_pool:
# name: "manila_metadata"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -677,7 +717,7 @@ dummy:
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#openstack_pools:
# - "{{ openstack_glance_pool }}"
# - "{{ openstack_cinder_pool }}"

View File

@ -18,6 +18,9 @@ dummy:
#copy_admin_key: false
#user_config: false
# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
# `pg_num` and `pgp_num` keys will be ignored, even if specified.
# eg:
# test:
# name: "test"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -29,6 +32,20 @@ dummy:
# expected_num_objects: ""
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
# target_size_ratio: 0.2
#test:
# name: "test"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# application: "rbd"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#test2:
# name: "test2"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -40,6 +57,7 @@ dummy:
# expected_num_objects: ""
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#pools:
# - "{{ test }}"
# - "{{ test2 }}"

View File

@ -332,6 +332,22 @@ ceph_iscsi_config_dev: false
##########
# CEPHFS #
##########
# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
# `pg_num` and `pgp_num` keys will be ignored, even if specified.
# eg:
# cephfs_data_pool:
# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
# application: "cephfs"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
# target_size_ratio: 0.2
#cephfs: cephfs # name of the ceph filesystem
#cephfs_data_pool:
# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
@ -344,6 +360,7 @@ ceph_iscsi_config_dev: false
# application: "cephfs"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#cephfs_metadata_pool:
# name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -355,6 +372,7 @@ ceph_iscsi_config_dev: false
# application: "cephfs"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#cephfs_pools:
# - "{{ cephfs_data_pool }}"
# - "{{ cephfs_metadata_pool }}"
@ -600,6 +618,9 @@ ceph_docker_registry_auth: true
# OPENSTACK #
#############
#openstack_config: false
# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
# `pg_num` and `pgp_num` keys will be ignored, even if specified.
# eg:
# openstack_glance_pool:
# name: "images"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -611,6 +632,20 @@ ceph_docker_registry_auth: true
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
# target_size_ratio: 0.2
#openstack_glance_pool:
# name: "images"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#openstack_cinder_pool:
# name: "volumes"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -622,6 +657,7 @@ ceph_docker_registry_auth: true
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#openstack_nova_pool:
# name: "vms"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -633,6 +669,7 @@ ceph_docker_registry_auth: true
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#openstack_cinder_backup_pool:
# name: "backups"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -644,6 +681,7 @@ ceph_docker_registry_auth: true
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#openstack_gnocchi_pool:
# name: "metrics"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -655,6 +693,7 @@ ceph_docker_registry_auth: true
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#openstack_cephfs_data_pool:
# name: "manila_data"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -666,6 +705,7 @@ ceph_docker_registry_auth: true
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#openstack_cephfs_metadata_pool:
# name: "manila_metadata"
# pg_num: "{{ osd_pool_default_pg_num }}"
@ -677,7 +717,7 @@ ceph_docker_registry_auth: true
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
#openstack_pools:
# - "{{ openstack_glance_pool }}"
# - "{{ openstack_cinder_pool }}"

View File

@ -10,6 +10,22 @@
copy_admin_key: false
user_config: false
# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
# `pg_num` and `pgp_num` keys will be ignored, even if specified.
# eg:
# test:
# name: "test"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# application: "rbd"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
# target_size_ratio: 0.2
test:
name: "test"
pg_num: "{{ osd_pool_default_pg_num }}"
@ -21,6 +37,7 @@ test:
expected_num_objects: ""
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
pg_autoscale_mode: False
test2:
name: "test2"
pg_num: "{{ osd_pool_default_pg_num }}"
@ -32,6 +49,7 @@ test2:
expected_num_objects: ""
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
pg_autoscale_mode: False
pools:
- "{{ test }}"
- "{{ test2 }}"

View File

@ -83,17 +83,16 @@
command: >
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }}
{{ item.0.pg_num | default(osd_pool_default_pg_num) }}
{{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }}
{{ item.0.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}
{{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else '' }}
{%- if item.0.type | default(1) | int == 1 or item.0.type | default('replicated') == 'replicated' %}
replicated
{{ item.0.rule_name | default(osd_pool_default_crush_rule) }}
{{ item.0.expected_num_objects | default(0) }}
{%- else %}
erasure
{{ item.0.erasure_profile }}
{{ item.0.rule_name | default('erasure-code') }}
{%- endif %}
{{ item.0.expected_num_objects | default(0) }}
with_together:
- "{{ pools }}"
- "{{ created_pools.results }}"
@ -103,6 +102,17 @@
- pools | length > 0
- item.1.rc != 0
- name: set the target ratio on pool(s)
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}"
with_items: "{{ pools | unique }}"
delegate_to: "{{ delegated_node }}"
when: item.pg_autoscale_mode | default(False) | bool
- name: set pg_autoscale_mode value on pool(s)
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}"
delegate_to: "{{ delegated_node }}"
with_items: "{{ pools | unique }}"
- name: customize pool size
command: >
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}

View File

@ -324,6 +324,22 @@ mon_host_v2:
##########
# CEPHFS #
##########
# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
# `pg_num` and `pgp_num` keys will be ignored, even if specified.
# eg:
# cephfs_data_pool:
# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
# application: "cephfs"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
# target_size_ratio: 0.2
cephfs: cephfs # name of the ceph filesystem
cephfs_data_pool:
name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
@ -336,6 +352,7 @@ cephfs_data_pool:
application: "cephfs"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
pg_autoscale_mode: False
cephfs_metadata_pool:
name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}"
pg_num: "{{ osd_pool_default_pg_num }}"
@ -347,6 +364,7 @@ cephfs_metadata_pool:
application: "cephfs"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
pg_autoscale_mode: False
cephfs_pools:
- "{{ cephfs_data_pool }}"
- "{{ cephfs_metadata_pool }}"
@ -592,6 +610,22 @@ docker_pull_timeout: "300s"
# OPENSTACK #
#############
openstack_config: false
# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
# `pg_num` and `pgp_num` keys will be ignored, even if specified.
# eg:
# openstack_glance_pool:
# name: "images"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# pg_autoscale_mode: False
# target_size_ratio: 0.2
openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
@ -603,6 +637,7 @@ openstack_glance_pool:
application: "rbd"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
pg_autoscale_mode: False
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ osd_pool_default_pg_num }}"
@ -614,6 +649,7 @@ openstack_cinder_pool:
application: "rbd"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
pg_autoscale_mode: False
openstack_nova_pool:
name: "vms"
pg_num: "{{ osd_pool_default_pg_num }}"
@ -625,6 +661,7 @@ openstack_nova_pool:
application: "rbd"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
pg_autoscale_mode: False
openstack_cinder_backup_pool:
name: "backups"
pg_num: "{{ osd_pool_default_pg_num }}"
@ -636,6 +673,7 @@ openstack_cinder_backup_pool:
application: "rbd"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
pg_autoscale_mode: False
openstack_gnocchi_pool:
name: "metrics"
pg_num: "{{ osd_pool_default_pg_num }}"
@ -647,6 +685,7 @@ openstack_gnocchi_pool:
application: "rbd"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
pg_autoscale_mode: False
openstack_cephfs_data_pool:
name: "manila_data"
pg_num: "{{ osd_pool_default_pg_num }}"
@ -658,6 +697,7 @@ openstack_cephfs_data_pool:
application: "rbd"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
pg_autoscale_mode: False
openstack_cephfs_metadata_pool:
name: "manila_metadata"
pg_num: "{{ osd_pool_default_pg_num }}"
@ -669,7 +709,7 @@ openstack_cephfs_metadata_pool:
application: "rbd"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
pg_autoscale_mode: False
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"

View File

@ -25,21 +25,31 @@
command: >
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.name }}
{{ item.pg_num | default(osd_pool_default_pg_num) }}
{{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) }}
{{ item.pg_num | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else 16 }}
{{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else '' }}
{%- if item.type | default(1) | int == 1 or item.type | default('replicated') == 'replicated' %}
replicated
{{ item.rule_name | default(osd_pool_default_crush_rule) }}
{{ item.expected_num_objects | default(0) }}
{%- else %}
erasure
{{ item.erasure_profile }}
{{ item.rule_name | default('erasure-code') }}
{%- endif %}
{{ item.expected_num_objects | default(0) }}
changed_when: false
with_items:
- "{{ cephfs_pools }}"
- name: set the target ratio on pool(s)
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}"
with_items: "{{ cephfs_pools | unique }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: item.pg_autoscale_mode | default(False) | bool
- name: set pg_autoscale_mode value on pool(s)
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ cephfs_pools | unique }}"
- name: customize pool size
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}"
with_items: "{{ cephfs_pools | unique }}"

View File

@ -15,17 +15,16 @@
command: >
{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }}
{{ item.0.pg_num | default(osd_pool_default_pg_num) }}
{{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }}
{{ item.0.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}
{{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else '' }}
{%- if item.0.type | default(1) | int == 1 or item.0.type | default('replicated') == 'replicated' %}
replicated
{{ item.0.rule_name | default(osd_pool_default_crush_rule) }}
{{ item.0.expected_num_objects | default(0) }}
{%- else %}
erasure
{{ item.0.erasure_profile }}
{{ item.0.rule_name | default('erasure-code') }}
{%- endif %}
{{ item.0.expected_num_objects | default(0) }}
with_together:
- "{{ openstack_pools | unique }}"
- "{{ created_pools.results }}"
@ -33,6 +32,17 @@
delegate_to: "{{ groups[mon_group_name][0] }}"
when: item.1.get('rc', 0) != 0
- name: set the target ratio on pool(s)
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}"
with_items: "{{ openstack_pools | unique }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: item.pg_autoscale_mode | default(False) | bool
- name: set pg_autoscale_mode value on pool(s)
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ openstack_pools | unique }}"
- name: customize pool size
command: >
{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}

View File

@ -0,0 +1,11 @@
---
- name: fail if target_size_ratio is not set when pg_autoscale_mode is True
fail:
msg: "You must set a target_size_ratio value on following pool: {{ item.name }}."
with_items:
- "{{ openstack_pools | default([]) }}"
- "{{ cephfs_pools | default([]) }}"
- "{{ pools | default([]) }}"
when:
- item.pg_autoscale_mode | default(False) | bool
- item.target_size_ratio is undefined