mirror of https://github.com/ceph/ceph-ansible.git
update: skip mds deactivation when no mds in inventory
Let's skip this part of the code if there's no mds node in the
inventory.
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 5ec906c3af
)
pull/4683/head
v3.2.34
parent
f3fc97caa0
commit
4b667b2f37
|
@ -465,99 +465,104 @@
|
|||
hosts: "{{ groups[mon_group_name|default('mons')][0] }}"
|
||||
become: true
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-facts
|
||||
- role: ceph-defaults
|
||||
- role: ceph-facts
|
||||
when: groups.get(mds_group_name, []) | length > 1
|
||||
|
||||
post_tasks:
|
||||
- name: get mds cluster status
|
||||
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} fs get {{ cephfs }} -f json"
|
||||
changed_when: false
|
||||
register: _cephfs_status
|
||||
- name: deactivate all mds rank > 0
|
||||
when: groups.get(mds_group_name, []) | length > 1
|
||||
block:
|
||||
- name: get mds cluster status
|
||||
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} fs get {{ cephfs }} -f json"
|
||||
changed_when: false
|
||||
register: _cephfs_status
|
||||
|
||||
- name: get all mds names
|
||||
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} mds dump -f json"
|
||||
changed_when: false
|
||||
register: _all_mds_name
|
||||
- name: get all mds names
|
||||
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} mds dump -f json"
|
||||
changed_when: false
|
||||
register: _all_mds_name
|
||||
|
||||
- name: set_fact all_mds_name
|
||||
set_fact:
|
||||
all_mds_name: "{{ all_mds_name | default([]) + [(_all_mds_name.stdout | from_json)['info'][item.key]['name'] ] }}"
|
||||
with_dict: "{{ ((_all_mds_name.stdout | from_json).info) }}"
|
||||
- name: set_fact all_mds_name
|
||||
set_fact:
|
||||
all_mds_name: "{{ all_mds_name | default([]) + [(_all_mds_name.stdout | from_json)['info'][item.key]['name'] ] }}"
|
||||
with_dict: "{{ ((_all_mds_name.stdout | from_json).info) }}"
|
||||
|
||||
- name: set max_mds 1 on ceph fs
|
||||
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds 1"
|
||||
changed_when: false
|
||||
register: _max_mds_result
|
||||
- name: set max_mds 1 on ceph fs
|
||||
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds 1"
|
||||
changed_when: false
|
||||
register: _max_mds_result
|
||||
|
||||
- name: deactivate all non-zero ranks
|
||||
shell: |
|
||||
#!/bin/bash
|
||||
{{ docker_exec_cmd }} ceph --cluster {{ cluster }} mds deactivate {{ cephfs }}:{{ item }}
|
||||
{{ docker_exec_cmd }} ceph --cluster {{ cluster }} fs get {{ cephfs }} -f json
|
||||
register: deactivate_status
|
||||
retries: 10
|
||||
delay: 1
|
||||
failed_when: false
|
||||
until: item not in (deactivate_status.stdout | from_json).mdsmap.in
|
||||
with_items: "{{ (_cephfs_status.stdout | from_json).mdsmap.in | difference([0]) | sort(reverse=True) }}"
|
||||
- name: deactivate all non-zero ranks
|
||||
shell: |
|
||||
#!/bin/bash
|
||||
{{ docker_exec_cmd }} ceph --cluster {{ cluster }} mds deactivate {{ cephfs }}:{{ item }}
|
||||
{{ docker_exec_cmd }} ceph --cluster {{ cluster }} fs get {{ cephfs }} -f json
|
||||
register: deactivate_status
|
||||
retries: 10
|
||||
delay: 1
|
||||
failed_when: false
|
||||
until: item not in (deactivate_status.stdout | from_json).mdsmap.in
|
||||
with_items: "{{ (_cephfs_status.stdout | from_json).mdsmap.in | difference([0]) | sort(reverse=True) }}"
|
||||
|
||||
- name: get name of remaining active mds
|
||||
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} mds dump -f json"
|
||||
changed_when: false
|
||||
register: _mds_active_name
|
||||
- name: get name of remaining active mds
|
||||
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} mds dump -f json"
|
||||
changed_when: false
|
||||
register: _mds_active_name
|
||||
|
||||
- name: set_fact mds_active_name
|
||||
set_fact:
|
||||
mds_active_name: "{{ (_mds_active_name.stdout | from_json)['info'][item.key]['name'] }}"
|
||||
with_dict: "{{ (_mds_active_name.stdout | from_json).info }}"
|
||||
- name: set_fact mds_active_name
|
||||
set_fact:
|
||||
mds_active_name: "{{ (_mds_active_name.stdout | from_json)['info'][item.key]['name'] }}"
|
||||
with_dict: "{{ (_mds_active_name.stdout | from_json).info }}"
|
||||
|
||||
|
||||
- name: create standby_mdss group
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
groups: standby_mdss
|
||||
ansible_host: "{{ hostvars[item]['ansible_host'] | default(omit) }}"
|
||||
ansible_port: "{{ hostvars[item]['ansible_port'] | default(omit) }}"
|
||||
with_items: "{{ groups[mds_group_name] | difference(mds_active_name) }}"
|
||||
|
||||
- name: stop standby ceph mds
|
||||
systemd:
|
||||
name: "ceph-mds@{{ hostvars[item]['ansible_hostname'] }}"
|
||||
state: stopped
|
||||
enabled: no
|
||||
delegate_to: "{{ item }}"
|
||||
with_items: "{{ groups['standby_mdss'] | default([]) }}"
|
||||
when: groups['standby_mdss'] | default([]) | length > 0
|
||||
|
||||
# dedicated task for masking systemd unit
|
||||
# somehow, having a single task doesn't work in containerized context
|
||||
- name: mask stop standby ceph mds
|
||||
systemd:
|
||||
name: "ceph-mds@{{ hostvars[item]['ansible_hostname'] }}"
|
||||
masked: yes
|
||||
delegate_to: "{{ item }}"
|
||||
with_items: "{{ groups['standby_mdss'] | default([]) }}"
|
||||
when: groups['standby_mdss'] | default([]) | length > 0
|
||||
|
||||
- name: wait until all standbys mds are stopped
|
||||
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json"
|
||||
changed_when: false
|
||||
register: wait_standbys_down
|
||||
retries: 300
|
||||
delay: 5
|
||||
until: (wait_standbys_down.stdout | from_json).standbys | length == 0
|
||||
|
||||
- name: create active_mdss group
|
||||
add_host:
|
||||
name: "{{ mds_active_name }}"
|
||||
name: "{{ mds_active_name if mds_active_name is defined else groups.get(mds_group_name)[0] }}"
|
||||
groups: active_mdss
|
||||
ansible_host: "{{ hostvars[mds_active_name]['ansible_host'] | default(omit) }}"
|
||||
ansible_port: "{{ hostvars[mds_active_name]['ansible_port'] | default(omit) }}"
|
||||
|
||||
- name: create standby_mdss group
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
groups: standby_mdss
|
||||
ansible_host: "{{ hostvars[item]['ansible_host'] | default(omit) }}"
|
||||
ansible_port: "{{ hostvars[item]['ansible_port'] | default(omit) }}"
|
||||
with_items: "{{ groups[mds_group_name] | difference(mds_active_name) }}"
|
||||
|
||||
- name: stop standby ceph mds
|
||||
systemd:
|
||||
name: "ceph-mds@{{ hostvars[item]['ansible_hostname'] }}"
|
||||
state: stopped
|
||||
enabled: no
|
||||
delegate_to: "{{ item }}"
|
||||
with_items: "{{ groups['standby_mdss'] | default([]) }}"
|
||||
when: groups['standby_mdss'] | default([]) | length > 0
|
||||
|
||||
# dedicated task for masking systemd unit
|
||||
# somehow, having a single task doesn't work in containerized context
|
||||
- name: mask stop standby ceph mds
|
||||
systemd:
|
||||
name: "ceph-mds@{{ hostvars[item]['ansible_hostname'] }}"
|
||||
masked: yes
|
||||
delegate_to: "{{ item }}"
|
||||
with_items: "{{ groups['standby_mdss'] | default([]) }}"
|
||||
when: groups['standby_mdss'] | default([]) | length > 0
|
||||
|
||||
- name: wait until all standbys mds are stopped
|
||||
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json"
|
||||
changed_when: false
|
||||
register: wait_standbys_down
|
||||
retries: 300
|
||||
delay: 5
|
||||
until: (wait_standbys_down.stdout | from_json).standbys | length == 0
|
||||
ansible_host: "{{ hostvars[mds_active_name if mds_active_name is defined else groups.get(mds_group_name)[0]]['ansible_host'] | default(omit) }}"
|
||||
ansible_port: "{{ hostvars[mds_active_name if mds_active_name is defined else groups.get(mds_group_name)[0]]['ansible_port'] | default(omit) }}"
|
||||
|
||||
|
||||
- name: upgrade active mds
|
||||
vars:
|
||||
upgrade_ceph_packages: True
|
||||
hosts: active_mdss
|
||||
hosts: active_mdss | default([])
|
||||
become: true
|
||||
pre_tasks:
|
||||
- name: prevent restart from the packaging
|
||||
|
|
Loading…
Reference in New Issue