rolling_update: fix pre and post osd upgrade play

when using --limit osds, the play before and after osd upgrade are
skipped because we use `hosts: "{{ mon_group_name | default('mons') }}[0]"`
using `hosts: "{{ osds_group_name | default('osds') }}" with
`delegate_to` to the first monitor addresses this issue.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit fc9f87c45f)
pull/6972/head
Guillaume Abrioux 2021-10-25 13:43:25 +02:00
parent dc1a4c29ea
commit 3dd96da652
1 changed files with 83 additions and 75 deletions

View File

@ -405,7 +405,7 @@
- name: set osd flags
hosts: "{{ mon_group_name | default('mons') }}[0]"
hosts: "{{ osd_group_name | default('osds') }}"
tags: osds
become: True
gather_facts: false
@ -416,46 +416,50 @@
name: ceph-facts
tasks_from: container_binary.yml
- name: get pool list
command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
register: pool_list
- name: set osd flags, disable autoscaler and balancer
run_once: true
changed_when: false
check_mode: false
delegate_to: "{{ groups[mon_group_name][0] }}"
block:
- name: get pool list
command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
register: pool_list
changed_when: false
check_mode: false
- name: get balancer module status
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
register: balancer_status_update
changed_when: false
check_mode: false
- name: get balancer module status
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
register: balancer_status_update
run_once: true
changed_when: false
check_mode: false
- name: set_fact pools_pgautoscaler_mode
set_fact:
pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}"
with_items: "{{ pool_list.stdout | default('{}') | from_json }}"
- name: set_fact pools_pgautoscaler_mode
set_fact:
pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}"
with_items: "{{ pool_list.stdout | default('{}') | from_json }}"
- name: disable balancer
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
changed_when: false
when: (balancer_status_update.stdout | from_json)['active'] | bool
- name: disable balancer
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
changed_when: false
when: (balancer_status_update.stdout | from_json)['active'] | bool
- name: disable pg autoscale on pools
command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode off"
with_items: "{{ pools_pgautoscaler_mode }}"
when:
- pools_pgautoscaler_mode is defined
- item.mode == 'on'
- name: disable pg autoscale on pools
command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode off"
with_items: "{{ pools_pgautoscaler_mode }}"
when:
- pools_pgautoscaler_mode is defined
- item.mode == 'on'
- name: set osd flags
ceph_osd_flag:
name: "{{ item }}"
cluster: "{{ cluster }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items:
- noout
- nodeep-scrub
- name: set osd flags
ceph_osd_flag:
name: "{{ item }}"
cluster: "{{ cluster }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items:
- noout
- nodeep-scrub
- name: upgrade ceph osds cluster
vars:
@ -543,7 +547,7 @@
- name: complete osd upgrade
hosts: "{{ mon_group_name|default('mons') }}[0]"
hosts: "{{ osd_group_name | default('osds') }}"
tags: osds
become: True
gather_facts: false
@ -554,51 +558,55 @@
name: ceph-facts
tasks_from: container_binary.yml
- name: re-enable pg autoscale on pools
command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode on"
with_items: "{{ pools_pgautoscaler_mode }}"
when:
- pools_pgautoscaler_mode is defined
- item.mode == 'on'
- name: unset osd flags, re-enable pg autoscaler and balancer
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
block:
- name: re-enable pg autoscale on pools
command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode on"
with_items: "{{ pools_pgautoscaler_mode }}"
when:
- pools_pgautoscaler_mode is defined
- item.mode == 'on'
- name: unset osd flags
ceph_osd_flag:
name: "{{ item }}"
cluster: "{{ cluster }}"
state: absent
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items:
- noout
- nodeep-scrub
- name: unset osd flags
ceph_osd_flag:
name: "{{ item }}"
cluster: "{{ cluster }}"
state: absent
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items:
- noout
- nodeep-scrub
- name: re-enable balancer
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
changed_when: false
when: (balancer_status_update.stdout | from_json)['active'] | bool
- name: re-enable balancer
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
changed_when: false
when: (balancer_status_update.stdout | from_json)['active'] | bool
- name: set_fact container_exec_cmd_osd
set_fact:
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: set_fact container_exec_cmd_osd
set_fact:
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- name: get osd versions
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
register: ceph_versions
changed_when: false
- name: get osd versions
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
register: ceph_versions
changed_when: false
- name: set_fact ceph_versions_osd
set_fact:
ceph_versions_osd: "{{ (ceph_versions.stdout|from_json).osd }}"
- name: set_fact ceph_versions_osd
set_fact:
ceph_versions_osd: "{{ (ceph_versions.stdout|from_json).osd }}"
# length == 1 means there is a single osds versions entry
# thus all the osds are running the same version
- name: complete osds upgrade
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} osd require-osd-release nautilus"
when:
- (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1
- ceph_versions_osd | string is search("ceph version 14")
# length == 1 means there is a single osds versions entry
# thus all the osds are running the same version
- name: complete osds upgrade
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} osd require-osd-release nautilus"
when:
- (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1
- ceph_versions_osd | string is search("ceph version 14")
- name: upgrade ceph mdss cluster, deactivate all rank > 0