infra: use dedicated variables for balancer status

The balancer status is registered during the cephadm-adopt, rolling_update
and swith2container playbooks. But it is also used in the ceph-handler role
which is included in those playbooks too.
Even if the ceph-handler tasks are skipped for rolling_update and
switch2container, the balancer_status variable is erased with the skip task
result.

play1:
  register: balancer_status
play2:
  register: balancer_status <-- skipped
play3:
  when: (balancer_status.stdout | from_json)['active'] | bool

This leads to issue like:

The conditional check '(balancer_status.stdout | from_json)['active'] | bool'
failed. The error was: Unexpected templating type error occurred on
({% if (balancer_status.stdout | from_json)['active'] | bool %} True
{% else %} False {% endif %}): expected string or buffer.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1982054

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
pull/6795/head
Dimitri Savineau 2021-08-03 11:58:49 -04:00 committed by Guillaume Abrioux
parent 9b5d97adb9
commit 386661699b
3 changed files with 9 additions and 9 deletions

View File

@ -453,7 +453,7 @@
- name: get balancer module status
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
register: balancer_status
register: balancer_status_adopt
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
@ -470,7 +470,7 @@
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when: (balancer_status.stdout | from_json)['active'] | bool
when: (balancer_status_adopt.stdout | from_json)['active'] | bool
- name: disable pg autoscale on pools
ceph_pool:
@ -633,7 +633,7 @@
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when: (balancer_status.stdout | from_json)['active'] | bool
when: (balancer_status_adopt.stdout | from_json)['active'] | bool
- name: redeploy mds daemons
hosts: "{{ mds_group_name|default('mdss') }}"

View File

@ -418,7 +418,7 @@
- name: get balancer module status
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
register: balancer_status
register: balancer_status_update
changed_when: false
check_mode: false
@ -430,7 +430,7 @@
- name: disable balancer
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
changed_when: false
when: (balancer_status.stdout | from_json)['active'] | bool
when: (balancer_status_update.stdout | from_json)['active'] | bool
- name: disable pg autoscale on pools
ceph_pool:
@ -575,7 +575,7 @@
- name: re-enable balancer
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
changed_when: false
when: (balancer_status.stdout | from_json)['active'] | bool
when: (balancer_status_update.stdout | from_json)['active'] | bool
- name: upgrade ceph mdss cluster, deactivate all rank > 0
hosts: "{{ mon_group_name | default('mons') }}[0]"

View File

@ -223,7 +223,7 @@
- name: get balancer module status
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
register: balancer_status
register: balancer_status_switch
changed_when: false
check_mode: false
@ -235,7 +235,7 @@
- name: disable balancer
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
changed_when: false
when: (balancer_status.stdout | from_json)['active'] | bool
when: (balancer_status_switch.stdout | from_json)['active'] | bool
- name: disable pg autoscale on pools
ceph_pool:
@ -429,7 +429,7 @@
- name: re-enable balancer
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
changed_when: false
when: (balancer_status.stdout | from_json)['active'] | bool
when: (balancer_status_switch.stdout | from_json)['active'] | bool
- name: switching from non-containerized to containerized ceph mds