From 98783a17b32d5f746c3d1ac04986242eb01ae318 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Fri, 28 Feb 2020 16:03:15 +0100 Subject: [PATCH] osd: add pg autoscaler support This commit adds the pg autoscaler support. The structure for pool definition has now two additional attributes `pg_autoscale_mode` and `target_size_ratio`, eg: ``` test: name: "test" pg_num: "{{ osd_pool_default_pg_num }}" pgp_num: "{{ osd_pool_default_pg_num }}" rule_name: "replicated_rule" application: "rbd" type: 1 erasure_profile: "" expected_num_objects: "" size: "{{ osd_pool_default_size }}" min_size: "{{ osd_pool_default_min_size }}" pg_autoscale_mode: False target_size_ratio": 0.1 ``` when `pg_autoscale_mode` is `True` user has to set a decent value in `target_size_ratio`. Given that it's a new feature, it's still disabled by default. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1782253 Signed-off-by: Guillaume Abrioux (cherry picked from commit 47adc2bb08b18845c34d90bd0dafb43298e6bee5) --- group_vars/all.yml.sample | 42 ++++++++++++++++++- group_vars/clients.yml.sample | 18 ++++++++ group_vars/rhcs.yml.sample | 42 ++++++++++++++++++- roles/ceph-client/defaults/main.yml | 18 ++++++++ roles/ceph-client/tasks/create_users_keys.yml | 18 ++++++-- roles/ceph-defaults/defaults/main.yml | 42 ++++++++++++++++++- .../ceph-mds/tasks/create_mds_filesystems.yml | 18 ++++++-- roles/ceph-osd/tasks/openstack_config.yml | 18 ++++++-- roles/ceph-validate/tasks/check_pools.yml | 11 +++++ 9 files changed, 212 insertions(+), 15 deletions(-) create mode 100644 roles/ceph-validate/tasks/check_pools.yml diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index 867c90363..272c04273 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -332,6 +332,22 @@ dummy: ########## # CEPHFS # ########## +# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value +# `pg_num` and `pgp_num` keys will be ignored, even if specified. +# eg: +# cephfs_data_pool: +# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}" +# pg_num: "{{ osd_pool_default_pg_num }}" +# pgp_num: "{{ osd_pool_default_pg_num }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +# application: "cephfs" +# size: "{{ osd_pool_default_size }}" +# min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False +# target_size_ratio: 0.2 #cephfs: cephfs # name of the ceph filesystem #cephfs_data_pool: # name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}" @@ -344,6 +360,7 @@ dummy: # application: "cephfs" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #cephfs_metadata_pool: # name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -355,6 +372,7 @@ dummy: # application: "cephfs" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #cephfs_pools: # - "{{ cephfs_data_pool }}" # - "{{ cephfs_metadata_pool }}" @@ -600,6 +618,22 @@ dummy: # OPENSTACK # ############# #openstack_config: false +# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value +# `pg_num` and `pgp_num` keys will be ignored, even if specified. +# eg: +# openstack_glance_pool: +# name: "images" +# pg_num: "{{ osd_pool_default_pg_num }}" +# pgp_num: "{{ osd_pool_default_pg_num }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +# application: "rbd" +# size: "{{ osd_pool_default_size }}" +# min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False +# target_size_ratio: 0.2 #openstack_glance_pool: # name: "images" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -611,6 +645,7 @@ dummy: # application: "rbd" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #openstack_cinder_pool: # name: "volumes" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -622,6 +657,7 @@ dummy: # application: "rbd" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #openstack_nova_pool: # name: "vms" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -633,6 +669,7 @@ dummy: # application: "rbd" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #openstack_cinder_backup_pool: # name: "backups" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -644,6 +681,7 @@ dummy: # application: "rbd" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #openstack_gnocchi_pool: # name: "metrics" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -655,6 +693,7 @@ dummy: # application: "rbd" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #openstack_cephfs_data_pool: # name: "manila_data" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -666,6 +705,7 @@ dummy: # application: "rbd" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #openstack_cephfs_metadata_pool: # name: "manila_metadata" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -677,7 +717,7 @@ dummy: # application: "rbd" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" - +# pg_autoscale_mode: False #openstack_pools: # - "{{ openstack_glance_pool }}" # - "{{ openstack_cinder_pool }}" diff --git a/group_vars/clients.yml.sample b/group_vars/clients.yml.sample index cd9c7abbc..0f6b29312 100644 --- a/group_vars/clients.yml.sample +++ b/group_vars/clients.yml.sample @@ -18,6 +18,22 @@ dummy: #copy_admin_key: false #user_config: false +# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value +# `pg_num` and `pgp_num` keys will be ignored, even if specified. +# eg: +# test: +# name: "test" +# pg_num: "{{ osd_pool_default_pg_num }}" +# pgp_num: "{{ osd_pool_default_pg_num }}" +# rule_name: "replicated_rule" +# application: "rbd" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +# size: "{{ osd_pool_default_size }}" +# min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False +# target_size_ratio: 0.2 #test: # name: "test" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -29,6 +45,7 @@ dummy: # expected_num_objects: "" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #test2: # name: "test2" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -40,6 +57,7 @@ dummy: # expected_num_objects: "" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #pools: # - "{{ test }}" # - "{{ test2 }}" diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index 4a1c225c7..d4d5ef5ed 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -332,6 +332,22 @@ ceph_iscsi_config_dev: false ########## # CEPHFS # ########## +# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value +# `pg_num` and `pgp_num` keys will be ignored, even if specified. +# eg: +# cephfs_data_pool: +# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}" +# pg_num: "{{ osd_pool_default_pg_num }}" +# pgp_num: "{{ osd_pool_default_pg_num }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +# application: "cephfs" +# size: "{{ osd_pool_default_size }}" +# min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False +# target_size_ratio: 0.2 #cephfs: cephfs # name of the ceph filesystem #cephfs_data_pool: # name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}" @@ -344,6 +360,7 @@ ceph_iscsi_config_dev: false # application: "cephfs" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #cephfs_metadata_pool: # name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -355,6 +372,7 @@ ceph_iscsi_config_dev: false # application: "cephfs" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #cephfs_pools: # - "{{ cephfs_data_pool }}" # - "{{ cephfs_metadata_pool }}" @@ -600,6 +618,22 @@ ceph_docker_registry_auth: true # OPENSTACK # ############# #openstack_config: false +# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value +# `pg_num` and `pgp_num` keys will be ignored, even if specified. +# eg: +# openstack_glance_pool: +# name: "images" +# pg_num: "{{ osd_pool_default_pg_num }}" +# pgp_num: "{{ osd_pool_default_pg_num }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +# application: "rbd" +# size: "{{ osd_pool_default_size }}" +# min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False +# target_size_ratio: 0.2 #openstack_glance_pool: # name: "images" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -611,6 +645,7 @@ ceph_docker_registry_auth: true # application: "rbd" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #openstack_cinder_pool: # name: "volumes" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -622,6 +657,7 @@ ceph_docker_registry_auth: true # application: "rbd" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #openstack_nova_pool: # name: "vms" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -633,6 +669,7 @@ ceph_docker_registry_auth: true # application: "rbd" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #openstack_cinder_backup_pool: # name: "backups" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -644,6 +681,7 @@ ceph_docker_registry_auth: true # application: "rbd" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #openstack_gnocchi_pool: # name: "metrics" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -655,6 +693,7 @@ ceph_docker_registry_auth: true # application: "rbd" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #openstack_cephfs_data_pool: # name: "manila_data" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -666,6 +705,7 @@ ceph_docker_registry_auth: true # application: "rbd" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False #openstack_cephfs_metadata_pool: # name: "manila_metadata" # pg_num: "{{ osd_pool_default_pg_num }}" @@ -677,7 +717,7 @@ ceph_docker_registry_auth: true # application: "rbd" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" - +# pg_autoscale_mode: False #openstack_pools: # - "{{ openstack_glance_pool }}" # - "{{ openstack_cinder_pool }}" diff --git a/roles/ceph-client/defaults/main.yml b/roles/ceph-client/defaults/main.yml index ed18fb620..318b9fc1b 100644 --- a/roles/ceph-client/defaults/main.yml +++ b/roles/ceph-client/defaults/main.yml @@ -10,6 +10,22 @@ copy_admin_key: false user_config: false +# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value +# `pg_num` and `pgp_num` keys will be ignored, even if specified. +# eg: +# test: +# name: "test" +# pg_num: "{{ osd_pool_default_pg_num }}" +# pgp_num: "{{ osd_pool_default_pg_num }}" +# rule_name: "replicated_rule" +# application: "rbd" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +# size: "{{ osd_pool_default_size }}" +# min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False +# target_size_ratio: 0.2 test: name: "test" pg_num: "{{ osd_pool_default_pg_num }}" @@ -21,6 +37,7 @@ test: expected_num_objects: "" size: "{{ osd_pool_default_size }}" min_size: "{{ osd_pool_default_min_size }}" + pg_autoscale_mode: False test2: name: "test2" pg_num: "{{ osd_pool_default_pg_num }}" @@ -32,6 +49,7 @@ test2: expected_num_objects: "" size: "{{ osd_pool_default_size }}" min_size: "{{ osd_pool_default_min_size }}" + pg_autoscale_mode: False pools: - "{{ test }}" - "{{ test2 }}" diff --git a/roles/ceph-client/tasks/create_users_keys.yml b/roles/ceph-client/tasks/create_users_keys.yml index df44b505c..04942f178 100644 --- a/roles/ceph-client/tasks/create_users_keys.yml +++ b/roles/ceph-client/tasks/create_users_keys.yml @@ -83,17 +83,16 @@ command: > {{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.0.name }} - {{ item.0.pg_num | default(osd_pool_default_pg_num) }} - {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }} + {{ item.0.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }} + {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else '' }} {%- if item.0.type | default(1) | int == 1 or item.0.type | default('replicated') == 'replicated' %} replicated {{ item.0.rule_name | default(osd_pool_default_crush_rule) }} + {{ item.0.expected_num_objects | default(0) }} {%- else %} erasure {{ item.0.erasure_profile }} - {{ item.0.rule_name | default('erasure-code') }} {%- endif %} - {{ item.0.expected_num_objects | default(0) }} with_together: - "{{ pools }}" - "{{ created_pools.results }}" @@ -103,6 +102,17 @@ - pools | length > 0 - item.1.rc != 0 + - name: set the target ratio on pool(s) + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}" + with_items: "{{ pools | unique }}" + delegate_to: "{{ delegated_node }}" + when: item.pg_autoscale_mode | default(False) | bool + + - name: set pg_autoscale_mode value on pool(s) + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}" + delegate_to: "{{ delegated_node }}" + with_items: "{{ pools | unique }}" + - name: customize pool size command: > {{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml index b0c0c7cfc..2fd6f9937 100644 --- a/roles/ceph-defaults/defaults/main.yml +++ b/roles/ceph-defaults/defaults/main.yml @@ -324,6 +324,22 @@ mon_host_v2: ########## # CEPHFS # ########## +# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value +# `pg_num` and `pgp_num` keys will be ignored, even if specified. +# eg: +# cephfs_data_pool: +# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}" +# pg_num: "{{ osd_pool_default_pg_num }}" +# pgp_num: "{{ osd_pool_default_pg_num }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +# application: "cephfs" +# size: "{{ osd_pool_default_size }}" +# min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False +# target_size_ratio: 0.2 cephfs: cephfs # name of the ceph filesystem cephfs_data_pool: name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}" @@ -336,6 +352,7 @@ cephfs_data_pool: application: "cephfs" size: "{{ osd_pool_default_size }}" min_size: "{{ osd_pool_default_min_size }}" + pg_autoscale_mode: False cephfs_metadata_pool: name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}" pg_num: "{{ osd_pool_default_pg_num }}" @@ -347,6 +364,7 @@ cephfs_metadata_pool: application: "cephfs" size: "{{ osd_pool_default_size }}" min_size: "{{ osd_pool_default_min_size }}" + pg_autoscale_mode: False cephfs_pools: - "{{ cephfs_data_pool }}" - "{{ cephfs_metadata_pool }}" @@ -592,6 +610,22 @@ docker_pull_timeout: "300s" # OPENSTACK # ############# openstack_config: false +# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value +# `pg_num` and `pgp_num` keys will be ignored, even if specified. +# eg: +# openstack_glance_pool: +# name: "images" +# pg_num: "{{ osd_pool_default_pg_num }}" +# pgp_num: "{{ osd_pool_default_pg_num }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +# application: "rbd" +# size: "{{ osd_pool_default_size }}" +# min_size: "{{ osd_pool_default_min_size }}" +# pg_autoscale_mode: False +# target_size_ratio: 0.2 openstack_glance_pool: name: "images" pg_num: "{{ osd_pool_default_pg_num }}" @@ -603,6 +637,7 @@ openstack_glance_pool: application: "rbd" size: "{{ osd_pool_default_size }}" min_size: "{{ osd_pool_default_min_size }}" + pg_autoscale_mode: False openstack_cinder_pool: name: "volumes" pg_num: "{{ osd_pool_default_pg_num }}" @@ -614,6 +649,7 @@ openstack_cinder_pool: application: "rbd" size: "{{ osd_pool_default_size }}" min_size: "{{ osd_pool_default_min_size }}" + pg_autoscale_mode: False openstack_nova_pool: name: "vms" pg_num: "{{ osd_pool_default_pg_num }}" @@ -625,6 +661,7 @@ openstack_nova_pool: application: "rbd" size: "{{ osd_pool_default_size }}" min_size: "{{ osd_pool_default_min_size }}" + pg_autoscale_mode: False openstack_cinder_backup_pool: name: "backups" pg_num: "{{ osd_pool_default_pg_num }}" @@ -636,6 +673,7 @@ openstack_cinder_backup_pool: application: "rbd" size: "{{ osd_pool_default_size }}" min_size: "{{ osd_pool_default_min_size }}" + pg_autoscale_mode: False openstack_gnocchi_pool: name: "metrics" pg_num: "{{ osd_pool_default_pg_num }}" @@ -647,6 +685,7 @@ openstack_gnocchi_pool: application: "rbd" size: "{{ osd_pool_default_size }}" min_size: "{{ osd_pool_default_min_size }}" + pg_autoscale_mode: False openstack_cephfs_data_pool: name: "manila_data" pg_num: "{{ osd_pool_default_pg_num }}" @@ -658,6 +697,7 @@ openstack_cephfs_data_pool: application: "rbd" size: "{{ osd_pool_default_size }}" min_size: "{{ osd_pool_default_min_size }}" + pg_autoscale_mode: False openstack_cephfs_metadata_pool: name: "manila_metadata" pg_num: "{{ osd_pool_default_pg_num }}" @@ -669,7 +709,7 @@ openstack_cephfs_metadata_pool: application: "rbd" size: "{{ osd_pool_default_size }}" min_size: "{{ osd_pool_default_min_size }}" - + pg_autoscale_mode: False openstack_pools: - "{{ openstack_glance_pool }}" - "{{ openstack_cinder_pool }}" diff --git a/roles/ceph-mds/tasks/create_mds_filesystems.yml b/roles/ceph-mds/tasks/create_mds_filesystems.yml index 60aad2f55..e571bd6ed 100644 --- a/roles/ceph-mds/tasks/create_mds_filesystems.yml +++ b/roles/ceph-mds/tasks/create_mds_filesystems.yml @@ -25,21 +25,31 @@ command: > {{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} - {{ item.pg_num | default(osd_pool_default_pg_num) }} - {{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) }} + {{ item.pg_num | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else 16 }} + {{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else '' }} {%- if item.type | default(1) | int == 1 or item.type | default('replicated') == 'replicated' %} replicated {{ item.rule_name | default(osd_pool_default_crush_rule) }} + {{ item.expected_num_objects | default(0) }} {%- else %} erasure {{ item.erasure_profile }} - {{ item.rule_name | default('erasure-code') }} {%- endif %} - {{ item.expected_num_objects | default(0) }} changed_when: false with_items: - "{{ cephfs_pools }}" + - name: set the target ratio on pool(s) + command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}" + with_items: "{{ cephfs_pools | unique }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + when: item.pg_autoscale_mode | default(False) | bool + + - name: set pg_autoscale_mode value on pool(s) + command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + with_items: "{{ cephfs_pools | unique }}" + - name: customize pool size command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}" with_items: "{{ cephfs_pools | unique }}" diff --git a/roles/ceph-osd/tasks/openstack_config.yml b/roles/ceph-osd/tasks/openstack_config.yml index 9f351a574..2dcfbd6e1 100644 --- a/roles/ceph-osd/tasks/openstack_config.yml +++ b/roles/ceph-osd/tasks/openstack_config.yml @@ -15,17 +15,16 @@ command: > {{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.0.name }} - {{ item.0.pg_num | default(osd_pool_default_pg_num) }} - {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }} + {{ item.0.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }} + {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else '' }} {%- if item.0.type | default(1) | int == 1 or item.0.type | default('replicated') == 'replicated' %} replicated {{ item.0.rule_name | default(osd_pool_default_crush_rule) }} + {{ item.0.expected_num_objects | default(0) }} {%- else %} erasure {{ item.0.erasure_profile }} - {{ item.0.rule_name | default('erasure-code') }} {%- endif %} - {{ item.0.expected_num_objects | default(0) }} with_together: - "{{ openstack_pools | unique }}" - "{{ created_pools.results }}" @@ -33,6 +32,17 @@ delegate_to: "{{ groups[mon_group_name][0] }}" when: item.1.get('rc', 0) != 0 + - name: set the target ratio on pool(s) + command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}" + with_items: "{{ openstack_pools | unique }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + when: item.pg_autoscale_mode | default(False) | bool + + - name: set pg_autoscale_mode value on pool(s) + command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + with_items: "{{ openstack_pools | unique }}" + - name: customize pool size command: > {{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} diff --git a/roles/ceph-validate/tasks/check_pools.yml b/roles/ceph-validate/tasks/check_pools.yml new file mode 100644 index 000000000..d4441fda0 --- /dev/null +++ b/roles/ceph-validate/tasks/check_pools.yml @@ -0,0 +1,11 @@ +--- +- name: fail if target_size_ratio is not set when pg_autoscale_mode is True + fail: + msg: "You must set a target_size_ratio value on following pool: {{ item.name }}." + with_items: + - "{{ openstack_pools | default([]) }}" + - "{{ cephfs_pools | default([]) }}" + - "{{ pools | default([]) }}" + when: + - item.pg_autoscale_mode | default(False) | bool + - item.target_size_ratio is undefined