common: introduce ceph_pool module calls

This commits calls the `ceph_pool` module for creating ceph pools
everywhere it's needed in the playbook.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit af9f6684f2)
pull/5389/head
Guillaume Abrioux 2020-04-28 18:08:59 +02:00
parent 9303f15c5b
commit 4453028862
4 changed files with 64 additions and 232 deletions

View File

@ -73,83 +73,27 @@
name: ceph-facts
tasks_from: get_def_crush_rule_name.yml
- name: list existing pool(s)
command: >
{{ ceph_admin_command }} --cluster {{ cluster }}
osd pool get {{ item.name }} size
with_items: "{{ pools }}"
register: created_pools
failed_when: false
delegate_to: "{{ delegated_node }}"
- name: create ceph pool(s)
command: >
{{ ceph_admin_command | default('') }} --cluster {{ cluster }}
osd pool create {{ item.0.name }}
{{ item.0.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}
{{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else '' }}
{%- if item.0.type | default(1) | int == 1 or item.0.type | default('replicated') == 'replicated' %}
replicated
{{ item.0.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}
{{ item.0.expected_num_objects | default(0) }}
{%- else %}
erasure
{{ item.0.erasure_profile }}
{%- endif %}
with_together:
- "{{ pools }}"
- "{{ created_pools.results }}"
ceph_pool:
name: "{{ item.name }}"
state: present
cluster: "{{ cluster }}"
pg_num: "{{ item.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}"
pgp_num: "{{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else omit }}"
size: "{{ item.size | default(omit) }}"
min_size: "{{ item.min_size | default(omit) }}"
pool_type: "{{ item.type | default('replicated') }}"
rule_name: "{{ item.rule_name | default(omit) }}"
erasure_profile: "{{ item.erasure_profile | default(omit) }}"
pg_autoscale_mode: "{{ item.pg_autoscale_mode | default(omit) }}"
target_size_ratio: "{{ item.target_size_ratio | default(omit) }}"
application: "{{ item.application | default(omit) }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items: "{{ pools }}"
changed_when: false
delegate_to: "{{ delegated_node }}"
when:
- pools | length > 0
- item.1.rc != 0
- name: set the target ratio on pool(s)
command: "{{ ceph_admin_command | default('') }} --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}"
with_items: "{{ pools | unique }}"
delegate_to: "{{ delegated_node }}"
when:
- item.pg_autoscale_mode | default(False) | bool
- item.target_size_ratio is defined
- name: set pg_autoscale_mode value on pool(s)
command: "{{ ceph_admin_command | default('') }} --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}"
delegate_to: "{{ delegated_node }}"
with_items: "{{ pools | unique }}"
- name: customize pool size
command: >
{{ ceph_admin_command | default('') }} --cluster {{ cluster }}
osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }} {{ '--yes-i-really-mean-it' if item.size | default(osd_pool_default_size) | int == 1 else '' }}
with_items: "{{ pools | unique }}"
delegate_to: "{{ delegated_node }}"
changed_when: false
when:
- pools | length > 0
- item.type | default(1) | int != 3
- item.type | default('replicated') != 'erasure'
- item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
- name: customize pool min_size
command: >
{{ ceph_admin_command | default('') }} --cluster {{ cluster }}
osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}
with_items: "{{ pools | unique }}"
delegate_to: "{{ delegated_node }}"
changed_when: false
when:
- pools | length > 0
- (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
- item.type | default(1) | int != 3
- item.type | default('replicated') != 'erasure'
- name: assign application to pool(s)
command: "{{ ceph_admin_command | default('') }} --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
with_items: "{{ pools | unique }}"
changed_when: false
delegate_to: "{{ delegated_node }}"
when: item.application is defined
- name: get client cephx keys
copy:

View File

@ -52,27 +52,14 @@
delegate_to: "{{ groups[mon_group_name][0] }}"
when: containerized_deployment | bool
- name: check if a rbd pool exists
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool ls --format json"
changed_when: false
register: iscsi_pool_exists
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: iscsi pool related tasks
when: "iscsi_pool_name not in (iscsi_pool_exists.stdout | from_json)"
block:
- name: create a iscsi pool if it doesn't exist
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create {{ iscsi_pool_name }} {{ osd_pool_default_pg_num }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: assign application rbd to iscsi pool
command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool application enable {{ iscsi_pool_name }} rbd"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: True
- name: customize pool size
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ iscsi_pool_name }} size {{ iscsi_pool_size | default(osd_pool_default_size) }} {{ '--yes-i-really-mean-it' if iscsi_pool_size | default(osd_pool_default_size) | int == 1 else '' }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when: iscsi_pool_size | default(osd_pool_default_size) != ceph_osd_pool_default_size
- name: create iscsi pool
ceph_pool:
name: "{{ iscsi_pool_name }}"
state: present
cluster: "{{ cluster }}"
pg_num: "{{ osd_pool_default_pg_num }}"
size: "{{ iscsi_pool_size | default(osd_pool_default_size) }}"
application: "rbd"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"

View File

@ -2,22 +2,6 @@
- name: check and deploy filesystem pools
delegate_to: "{{ groups[mon_group_name][0] }}"
block:
- name: check if filesystem pool already exists
block:
- name: compile a list of pool names
set_fact:
cephfs_pool_names: "{{ cephfs_pools | map(attribute='name') | list }}"
- name: get and store list of filesystem pools
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool ls"
changed_when: false
register: osd_pool_ls
- name: look whether pools to be created are present in the output
set_fact:
fs_pools_created: True
when: osd_pool_ls.stdout_lines | intersect(cephfs_pool_names) | length > 0
- name: deploy filesystem pools
when: fs_pools_created is not defined
block:
@ -26,52 +10,23 @@
tasks_from: get_def_crush_rule_name.yml
- name: create filesystem pools
command: >
{{ ceph_run_cmd }} --cluster {{ cluster }}
osd pool create {{ item.name }}
{{ item.pg_num | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else 16 }}
{{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else '' }}
{%- if item.type | default(1) | int == 1 or item.type | default('replicated') == 'replicated' %}
replicated
{{ item.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}
{{ item.expected_num_objects | default(0) }}
{%- else %}
erasure
{{ item.erasure_profile }}
{%- endif %}
changed_when: false
with_items:
- "{{ cephfs_pools }}"
- name: set the target ratio on pool(s)
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}"
with_items: "{{ cephfs_pools | unique }}"
when:
- item.pg_autoscale_mode | default(False) | bool
- item.target_size_ratio is defined
- name: set pg_autoscale_mode value on pool(s)
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}"
with_items: "{{ cephfs_pools | unique }}"
- name: customize pool size
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }} {{ '--yes-i-really-mean-it' if item.size | default(osd_pool_default_size) | int == 1 else '' }}"
with_items: "{{ cephfs_pools | unique }}"
changed_when: false
when:
- item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
- item.type | default(1) | int != 3
- item.type | default('replicated') != 'erasure'
- name: customize pool min_size
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}"
with_items: "{{ cephfs_pools | unique }}"
changed_when: false
when:
- (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
- item.type | default(1) | int != 3
- item.type | default('replicated') != 'erasure'
ceph_pool:
name: "{{ item.name }}"
state: present
cluster: "{{ cluster }}"
pg_num: "{{ item.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}"
pgp_num: "{{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else omit }}"
size: "{{ item.size | default(omit) }}"
min_size: "{{ item.min_size | default(omit) }}"
pool_type: "{{ item.type | default('replicated') }}"
rule_name: "{{ item.rule_name | default(omit) }}"
erasure_profile: "{{ item.erasure_profile | default(omit) }}"
pg_autoscale_mode: "{{ item.pg_autoscale_mode | default(omit) }}"
target_size_ratio: "{{ item.target_size_ratio | default(omit) }}"
with_items: "{{ cephfs_pools }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- name: check and create ceph filesystem
delegate_to: "{{ groups[mon_group_name][0] }}"

View File

@ -1,80 +1,26 @@
---
- name: pool related tasks
block:
- name: list existing pool(s)
command: >
{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool get {{ item.name }} size
with_items: "{{ openstack_pools | unique }}"
register: created_pools
delegate_to: "{{ groups[mon_group_name][0] }}"
failed_when: created_pools.rc in [1, 125]
changed_when: false
- name: create openstack pool(s)
command: >
{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }}
{{ item.0.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}
{{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else '' }}
{%- if item.0.type | default(1) | int == 1 or item.0.type | default('replicated') == 'replicated' %}
replicated
{{ item.0.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}
{{ item.0.expected_num_objects | default(0) }}
{%- else %}
erasure
{{ item.0.erasure_profile }}
{%- endif %}
with_together:
- "{{ openstack_pools | unique }}"
- "{{ created_pools.results }}"
changed_when: false
ceph_pool:
name: "{{ item.name }}"
state: present
cluster: "{{ cluster }}"
pg_num: "{{ item.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}"
pgp_num: "{{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else omit }}"
size: "{{ item.size | default(omit) }}"
min_size: "{{ item.min_size | default(omit) }}"
pool_type: "{{ item.type | default('replicated') }}"
rule_name: "{{ item.rule_name | default(omit) }}"
erasure_profile: "{{ item.erasure_profile | default(omit) }}"
pg_autoscale_mode: "{{ item.pg_autoscale_mode | default(omit) }}"
target_size_ratio: "{{ item.target_size_ratio | default(omit) }}"
application: "{{ item.application | default(omit) }}"
with_items: "{{ openstack_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: item.1.get('rc', 0) != 0
- name: set the target ratio on pool(s)
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}"
with_items: "{{ openstack_pools | unique }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- item.pg_autoscale_mode | default(False) | bool
- item.target_size_ratio is defined
- name: set pg_autoscale_mode value on pool(s)
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ openstack_pools | unique }}"
- name: customize pool size
command: >
{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }} {{ '--yes-i-really-mean-it' if item.size | default(osd_pool_default_size) | int == 1 else '' }}
with_items: "{{ openstack_pools | unique }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when:
- item.size | default(osd_pool_default_size)
- item.type | default(1) | int != 3
- item.type | default('replicated') != 'erasure'
- name: customize pool min_size
command: >
{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}
with_items: "{{ openstack_pools | unique }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when:
- (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
- item.type | default(1) | int != 3
- item.type | default('replicated') != 'erasure'
- name: assign application to pool(s)
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
with_items: "{{ openstack_pools | unique }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when: item.application is defined
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- name: create openstack cephx key(s)
block: