rgw/rbdmirror: use service dump instead of ceph -s

The ceph status command returns a lot of information stored in variables
and/or facts which could consume resources for nothing.
When checking the rgw/rbdmirror services status, we're only using the
servicmap structure in the ceph status output.
To optimize this, we could use the ceph service dump command which contains
the same needed information.
This command returns less information and is slightly faster than the ceph
status command.

$ ceph status -f json | wc -c
2001
$ ceph service dump -f json | wc -c
1105
$ time ceph status -f json > /dev/null

real	0m0.557s
user	0m0.516s
sys	0m0.040s
$ time ceph service dump -f json > /dev/null

real	0m0.454s
user	0m0.434s
sys	0m0.020s

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
(cherry picked from commit 3f9081931f)
pull/6008/head
Dimitri Savineau 2020-10-26 17:49:47 -04:00 committed by Dimitri Savineau
parent 69b51b5f19
commit bcd2797d11
3 changed files with 20 additions and 21 deletions

View File

@ -67,7 +67,7 @@
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}" container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
- name: exit playbook, if can not connect to the cluster - name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} -s -f json" command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
register: ceph_health register: ceph_health
until: ceph_health is succeeded until: ceph_health is succeeded
retries: 5 retries: 5
@ -80,14 +80,14 @@
- name: set_fact rbdmirror_gids - name: set_fact rbdmirror_gids
set_fact: set_fact:
rbdmirror_gids: "{{ rbdmirror_gids | default([]) + [ item ] }}" rbdmirror_gids: "{{ rbdmirror_gids | default([]) + [ item ] }}"
with_items: "{{ (ceph_health.stdout | from_json)['servicemap']['services']['rbd-mirror']['daemons'].keys() | list }}" with_items: "{{ (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list }}"
when: item != 'summary' when: item != 'summary'
- name: set_fact rbdmirror_to_kill_gid - name: set_fact rbdmirror_to_kill_gid
set_fact: set_fact:
rbdmirror_to_kill_gid: "{{ (ceph_health.stdout | from_json)['servicemap']['services']['rbd-mirror']['daemons'][item]['gid'] }}" rbdmirror_to_kill_gid: "{{ (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['gid'] }}"
with_items: "{{ rbdmirror_gids }}" with_items: "{{ rbdmirror_gids }}"
when: (ceph_health.stdout | from_json)['servicemap']['services']['rbd-mirror']['daemons'][item]['metadata']['id'] == rbdmirror_to_kill_hostname when: (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['metadata']['id'] == rbdmirror_to_kill_hostname
tasks: tasks:
- name: stop rbdmirror service - name: stop rbdmirror service
@ -106,14 +106,14 @@
post_tasks: post_tasks:
- name: get servicemap details - name: get servicemap details
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} -s -f json" command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
register: ceph_health register: ceph_health
failed_when: failed_when:
- "'rbd-mirror' in (ceph_health.stdout | from_json)['servicemap']['services'].keys() | list" - "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list"
- rbdmirror_to_kill_gid in (ceph_health.stdout | from_json)['servicemap']['services']['rbd-mirror']['daemons'].keys() | list - rbdmirror_to_kill_gid in (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list
until: until:
- "'rbd-mirror' in (ceph_health.stdout | from_json)['servicemap']['services'].keys() | list" - "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list"
- rbdmirror_to_kill_gid not in (ceph_health.stdout | from_json)['servicemap']['services']['rbd-mirror']['daemons'].keys() | list - rbdmirror_to_kill_gid not in (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list
when: rbdmirror_to_kill_gid is defined when: rbdmirror_to_kill_gid is defined
retries: 12 retries: 12
delay: 10 delay: 10

View File

@ -76,12 +76,12 @@
delay: 2 delay: 2
- name: get rgw instances - name: get rgw instances
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json" command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
register: rgw_instances register: rgw_instances
- name: exit playbook, if the rgw_to_kill doesn't exist - name: exit playbook, if the rgw_to_kill doesn't exist
when: rgw_to_kill not in (rgw_instances.stdout | from_json).servicemap.services.rgw.daemons.keys() | list when: rgw_to_kill not in (rgw_instances.stdout | from_json).services.rgw.daemons.keys() | list
fail: fail:
msg: > msg: >
It seems that the rgw instance given is not part of the ceph cluster. Please It seems that the rgw instance given is not part of the ceph cluster. Please
@ -111,14 +111,14 @@
delay: 2 delay: 2
- name: exit if rgw_to_kill is reported in ceph status - name: exit if rgw_to_kill is reported in ceph status
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json" command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
register: ceph_status register: ceph_status
failed_when: failed_when:
- (ceph_status.stdout | from_json).servicemap.services.rgw is defined - (ceph_status.stdout | from_json).services.rgw is defined
- rgw_to_kill in (ceph_status.stdout | from_json).servicemap.services.rgw.daemons.keys() | list - rgw_to_kill in (ceph_status.stdout | from_json).services.rgw.daemons.keys() | list
until: until:
- (ceph_status.stdout | from_json).servicemap.services.rgw is defined - (ceph_status.stdout | from_json).services.rgw is defined
- rgw_to_kill not in (ceph_status.stdout | from_json).servicemap.services.rgw.daemons.keys() | list - rgw_to_kill not in (ceph_status.stdout | from_json).services.rgw.daemons.keys() | list
retries: 3 retries: 3
delay: 3 delay: 3

View File

@ -245,7 +245,7 @@
or inventory_hostname in groups.get(nfs_group_name, []) or inventory_hostname in groups.get(nfs_group_name, [])
block: block:
- name: get ceph current status - name: get ceph current status
command: "{{ timeout_command }} {{ _container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json" command: "{{ timeout_command }} {{ _container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
changed_when: false changed_when: false
failed_when: false failed_when: false
check_mode: no check_mode: no
@ -262,16 +262,15 @@
- name: set_fact rgw_hostname - name: set_fact rgw_hostname
set_fact: set_fact:
rgw_hostname: "{% set _value = ansible_hostname -%} rgw_hostname: "{% set _value = ansible_hostname -%}
{% for key in (ceph_current_status['servicemap']['services']['rgw']['daemons'] | list) -%} {% for key in (ceph_current_status['services']['rgw']['daemons'] | list) -%}
{% if key == ansible_fqdn -%} {% if key == ansible_fqdn -%}
{% set _value = key -%} {% set _value = key -%}
{% endif -%} {% endif -%}
{% endfor -%} {% endfor -%}
{{ _value }}" {{ _value }}"
when: when:
- ceph_current_status['servicemap'] is defined - ceph_current_status['services'] is defined
- ceph_current_status['servicemap']['services'] is defined - ceph_current_status['services']['rgw'] is defined
- ceph_current_status['servicemap']['services']['rgw'] is defined
- name: set_fact osd_pool_default_pg_num - name: set_fact osd_pool_default_pg_num
set_fact: set_fact: