mds: fix --limit run against mds nodes

This commit fixes --limit runs against mds nodes.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 378405e328)
pull/5285/head v4.0.19
Guillaume Abrioux 2020-04-10 01:02:06 +02:00 committed by Dimitri Savineau
parent 945266776b
commit 0ace5f5f2c
2 changed files with 15 additions and 13 deletions

View File

@ -370,6 +370,10 @@
set_fact:
use_new_ceph_iscsi: "{{ (gateway_ip_list == '0.0.0.0' and gateway_iqn | length == 0 and client_connections | length == 0 and rbd_devices | length == 0) | bool | ternary(true, false) }}"
- name: set_fact ceph_run_cmd
set_fact:
ceph_run_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else 'ceph' }}"
- name: set_fact ceph_admin_command
set_fact:
ceph_admin_command: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else 'ceph' }} -n client.admin -k /etc/ceph/{{ cluster }}.client.admin.keyring"
ceph_admin_command: "{{ ceph_run_cmd }} -n client.admin -k /etc/ceph/{{ cluster }}.client.admin.keyring"

View File

@ -9,7 +9,7 @@
cephfs_pool_names: "{{ cephfs_pools | map(attribute='name') | list }}"
- name: get and store list of filesystem pools
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool ls"
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool ls"
changed_when: false
register: osd_pool_ls
@ -27,7 +27,7 @@
- name: create filesystem pools
command: >
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
{{ ceph_run_cmd }} --cluster {{ cluster }}
osd pool create {{ item.name }}
{{ item.pg_num | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else 16 }}
{{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else '' }}
@ -44,18 +44,16 @@
- "{{ cephfs_pools }}"
- name: set the target ratio on pool(s)
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}"
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}"
with_items: "{{ cephfs_pools | unique }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: item.pg_autoscale_mode | default(False) | bool
- name: set pg_autoscale_mode value on pool(s)
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}"
with_items: "{{ cephfs_pools | unique }}"
- name: customize pool size
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}"
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}"
with_items: "{{ cephfs_pools | unique }}"
changed_when: false
when:
@ -64,7 +62,7 @@
- item.type | default('replicated') != 'erasure'
- name: customize pool min_size
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}"
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}"
with_items: "{{ cephfs_pools | unique }}"
changed_when: false
when:
@ -73,7 +71,7 @@
- item.type | default('replicated') != 'erasure'
- name: assign application to cephfs pools
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
with_items:
- "{{ cephfs_data_pool }}"
- "{{ cephfs_metadata_pool }}"
@ -83,18 +81,18 @@
delegate_to: "{{ groups[mon_group_name][0] }}"
block:
- name: check if ceph filesystem already exists
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs get {{ cephfs }}"
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} fs get {{ cephfs }}"
register: check_existing_cephfs
changed_when: false
failed_when: false
- name: create ceph filesystem
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata_pool.name }} {{ cephfs_data_pool.name }}"
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata_pool.name }} {{ cephfs_data_pool.name }}"
changed_when: false
when: check_existing_cephfs.rc != 0
- name: set max_mds
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds {{ mds_max_mds }}"
command: "{{ ceph_run_cmd }} --cluster {{ cluster }} fs set {{ cephfs }} max_mds {{ mds_max_mds }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when: