mds: support enabling pg autoscaler on rerun

This commit add the pg autoscaler enablement support on ceph-ansible
rerun.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1836431

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/5862/merge
Guillaume Abrioux 2020-09-28 17:31:08 +02:00
parent 7ffd3baa95
commit f9a6f775e9
1 changed files with 53 additions and 56 deletions

View File

@ -18,68 +18,65 @@
fs_pools_created: True
when: osd_pool_ls.stdout_lines | intersect(cephfs_pool_names) | length > 0
- name: deploy filesystem pools
- import_role:
name: ceph-facts
tasks_from: get_def_crush_rule_name.yml
- name: create filesystem pools
command: >
{{ ceph_run_cmd }} --cluster {{ cluster }}
osd pool create {{ item.name }}
{{ item.pg_num | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else 16 }}
{{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else '' }}
{%- if item.type | default(1) | int == 1 or item.type | default('replicated') == 'replicated' %}
replicated
{{ item.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}
{{ item.expected_num_objects | default(0) }}
{%- else %}
erasure
{{ item.erasure_profile }}
{%- endif %}
changed_when: false
with_items: "{{ cephfs_pools }}"
when: fs_pools_created is not defined
block:
- import_role:
name: ceph-facts
tasks_from: get_def_crush_rule_name.yml
- name: create filesystem pools
command: >
{{ ceph_run_cmd }} --cluster {{ cluster }}
osd pool create {{ item.name }}
{{ item.pg_num | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else 16 }}
{{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else '' }}
{%- if item.type | default(1) | int == 1 or item.type | default('replicated') == 'replicated' %}
replicated
{{ item.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}
{{ item.expected_num_objects | default(0) }}
{%- else %}
erasure
{{ item.erasure_profile }}
{%- endif %}
changed_when: false
with_items:
- "{{ cephfs_pools }}"
- name: set the target ratio on pool(s)
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}"
with_items: "{{ cephfs_pools | unique }}"
when:
- item.pg_autoscale_mode | default(False) | bool
- item.target_size_ratio is defined
- name: set the target ratio on pool(s)
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}"
with_items: "{{ cephfs_pools | unique }}"
when:
- item.pg_autoscale_mode | default(False) | bool
- item.target_size_ratio is defined
- name: set pg_autoscale_mode value on pool(s)
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}"
with_items: "{{ cephfs_pools | unique }}"
- name: set pg_autoscale_mode value on pool(s)
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}"
with_items: "{{ cephfs_pools | unique }}"
- name: customize pool size
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}"
with_items: "{{ cephfs_pools | unique }}"
changed_when: false
when:
- item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
- item.type | default(1) | int != 3
- item.type | default('replicated') != 'erasure'
- name: customize pool size
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}"
with_items: "{{ cephfs_pools | unique }}"
changed_when: false
when:
- item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
- item.type | default(1) | int != 3
- item.type | default('replicated') != 'erasure'
- name: customize pool min_size
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}"
with_items: "{{ cephfs_pools | unique }}"
changed_when: false
when:
- (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
- item.type | default(1) | int != 3
- item.type | default('replicated') != 'erasure'
- name: customize pool min_size
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}"
with_items: "{{ cephfs_pools | unique }}"
changed_when: false
when:
- (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
- item.type | default(1) | int != 3
- item.type | default('replicated') != 'erasure'
- name: customize pool crush_rule
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} crush_rule {{ item.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}"
with_items: "{{ cephfs_pools | unique }}"
changed_when: false
when:
- item.rule_name | default(ceph_osd_pool_default_crush_rule_name)
- item.type | default(1) | int != 3
- item.type | default('replicated') != 'erasure'
- name: customize pool crush_rule
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} crush_rule {{ item.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}"
with_items: "{{ cephfs_pools | unique }}"
changed_when: false
when:
- item.rule_name | default(ceph_osd_pool_default_crush_rule_name)
- item.type | default(1) | int != 3
- item.type | default('replicated') != 'erasure'
- name: check and create ceph filesystem
delegate_to: "{{ groups[mon_group_name][0] }}"