mirror of https://github.com/ceph/ceph-ansible.git
shrink: don't use localhost node
The ceph-facts are running on localhost so if this node is using a
different OS/release that the ceph node we can have a mismatch between
docker/podman container binary.
This commit also reduces the scope of the ceph-facts role because we only
need the container_binary tasks.
Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
(cherry picked from commit 08ac2e3034
)
pull/5121/head
parent
e037e99bd2
commit
92b671bcbe
|
@ -24,7 +24,7 @@
|
|||
tasks_from: container_binary
|
||||
|
||||
- name: perform checks, remove mds and print cluster health
|
||||
hosts: localhost
|
||||
hosts: "{{ groups[mon_group_name][0] }}"
|
||||
become: true
|
||||
vars_prompt:
|
||||
- name: ireallymeanit
|
||||
|
@ -61,14 +61,13 @@
|
|||
|
||||
- name: set_fact container_exec_cmd for mon0
|
||||
set_fact:
|
||||
container_exec_cmd: "{{ hostvars[groups[mon_group_name][0]]['container_binary'] }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: exit playbook, if can not connect to the cluster
|
||||
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
|
||||
register: ceph_health
|
||||
until: ceph_health is succeeded
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
retries: 5
|
||||
delay: 2
|
||||
|
||||
|
@ -82,12 +81,10 @@
|
|||
- name: exit mds if it the deployment is containerized
|
||||
when: containerized_deployment | bool
|
||||
command: "{{ container_exec_cmd | default('') }} ceph tell mds.{{ mds_to_kill }} exit"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
|
||||
- name: get ceph status
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
|
||||
register: ceph_status
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
|
||||
- name: set_fact current_max_mds
|
||||
set_fact:
|
||||
|
@ -123,7 +120,6 @@
|
|||
- name: get new ceph status
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
|
||||
register: ceph_status
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
|
||||
- name: get active mds nodes list
|
||||
set_fact:
|
||||
|
@ -133,7 +129,6 @@
|
|||
- name: get ceph fs dump status
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json"
|
||||
register: ceph_fs_status
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
|
||||
- name: create a list of standby mdss
|
||||
set_fact:
|
||||
|
@ -148,7 +143,6 @@
|
|||
|
||||
- name: delete the filesystem when killing last mds
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs rm --yes-i-really-mean-it {{ cephfs }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when:
|
||||
- (ceph_status.stdout | from_json)['fsmap']['up'] | int == 0
|
||||
- (ceph_status.stdout | from_json)['fsmap']['up:standby'] | int == 0
|
||||
|
@ -162,4 +156,3 @@
|
|||
post_tasks:
|
||||
- name: show ceph health
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
msg: gather facts on all Ceph hosts for following reference
|
||||
|
||||
- name: confirm if user really meant to remove manager from the ceph cluster
|
||||
hosts: localhost
|
||||
hosts: "{{ groups[mon_group_name][0] }}"
|
||||
become: true
|
||||
vars_prompt:
|
||||
- name: ireallymeanit
|
||||
|
@ -34,17 +34,17 @@
|
|||
|
||||
- import_role:
|
||||
name: ceph-facts
|
||||
tasks_from: container_binary
|
||||
|
||||
- name: set_fact container_exec_cmd
|
||||
when: containerized_deployment | bool
|
||||
set_fact:
|
||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
|
||||
|
||||
- name: exit playbook, if can not connect to the cluster
|
||||
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
|
||||
register: ceph_health
|
||||
until: ceph_health is succeeded
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
retries: 5
|
||||
delay: 2
|
||||
|
||||
|
@ -53,7 +53,6 @@
|
|||
- name: save mgr dump output
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{cluster}} mgr dump"
|
||||
register: mgr_dump
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
|
||||
- name: get a list of names of standby mgrs
|
||||
set_fact:
|
||||
|
@ -120,7 +119,6 @@
|
|||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json | grep {{ mgr_to_kill }}"
|
||||
register: mgr_in_ceph_status
|
||||
failed_when: mgr_in_ceph_status.rc == 0
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
retries: 3
|
||||
delay: 5
|
||||
|
||||
|
@ -133,4 +131,3 @@
|
|||
post_tasks:
|
||||
- name: show ceph health
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
- debug: msg="gather facts on all Ceph hosts for following reference"
|
||||
|
||||
- name: confirm whether user really meant to remove monitor from the ceph cluster
|
||||
hosts: localhost
|
||||
hosts: "{{ groups[mon_group_name][0] }}"
|
||||
become: true
|
||||
vars_prompt:
|
||||
- name: ireallymeanit
|
||||
|
@ -65,6 +65,7 @@
|
|||
|
||||
- import_role:
|
||||
name: ceph-facts
|
||||
tasks_from: container_binary
|
||||
|
||||
tasks:
|
||||
- name: pick a monitor different than the one we want to remove
|
||||
|
|
|
@ -60,12 +60,12 @@
|
|||
|
||||
- import_role:
|
||||
name: ceph-facts
|
||||
tasks_from: container_binary.yml
|
||||
tasks_from: container_binary
|
||||
|
||||
post_tasks:
|
||||
- name: set_fact container_exec_cmd build docker exec command (containerized)
|
||||
set_fact:
|
||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: set_fact container_run_cmd
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
- name: confirm whether user really meant to remove rbd mirror from the ceph
|
||||
cluster
|
||||
hosts: localhost
|
||||
hosts: "{{ groups[mon_group_name][0] }}"
|
||||
become: true
|
||||
vars_prompt:
|
||||
- name: ireallymeanit
|
||||
|
@ -35,6 +35,7 @@
|
|||
|
||||
- import_role:
|
||||
name: ceph-facts
|
||||
tasks_from: container_binary
|
||||
|
||||
- name: exit playbook, if no rbdmirror was given
|
||||
fail:
|
||||
|
@ -63,13 +64,12 @@
|
|||
- name: set_fact container_exec_cmd for mon0
|
||||
when: containerized_deployment | bool
|
||||
set_fact:
|
||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
|
||||
|
||||
- name: exit playbook, if can not connect to the cluster
|
||||
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} -s -f json"
|
||||
register: ceph_health
|
||||
until: ceph_health is succeeded
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
retries: 5
|
||||
delay: 2
|
||||
|
||||
|
@ -108,7 +108,6 @@
|
|||
- name: get servicemap details
|
||||
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} -s -f json"
|
||||
register: ceph_health
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
|
||||
- name: set_fact rbdmirror_gids
|
||||
set_fact:
|
||||
|
@ -120,7 +119,6 @@
|
|||
|
||||
- name: show ceph health
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
|
||||
- name: check presence of "{{ rbdmirror_to_kill_hostname }}"
|
||||
fail:
|
||||
|
|
Loading…
Reference in New Issue