introduce new role ceph-facts

sometimes we play the whole role `ceph-defaults` just to access the
default value of some variables. It means we play the `facts.yml` part
in this role while it's not desired. Splitting this role will speedup
the playbook.

Closes: #3282

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 0eb56e36f8)
pull/3484/head
Guillaume Abrioux 2018-12-10 15:46:32 +01:00 committed by Sébastien Han
parent c3bb76b8e9
commit 416b503476
14 changed files with 284 additions and 199 deletions

View File

@ -44,6 +44,7 @@
roles:
- ceph-defaults
- ceph-validate
- ceph-facts
- hosts: osds
gather_facts: False
@ -67,6 +68,7 @@
- role: ceph-infra
- role: ceph-docker-common
when: containerized_deployment | bool
- role: ceph-facts
- role: ceph-common
when: not containerized_deployment | bool
- role: ceph-config

View File

@ -308,214 +308,213 @@
gather_facts: true
become: true
tasks:
# This is a tricks so we can access 'ceph-defaults' defaults variables in 'ceph-facts
roles:
- ceph-defaults
- import_role:
name: ceph-defaults
private: false
post_tasks:
- name: gather monitors facts
setup:
delegate_to: "{{ item }}"
delegate_facts: True
with_items: "{{ groups.get(mon_group_name | default('mons')) }}"
- name: gather monitors facts
setup:
delegate_to: "{{ item }}"
delegate_facts: True
with_items: "{{ groups.get(mon_group_name | default('mons')) }}"
- import_role:
name: ceph-facts
private: false
- import_role:
name: ceph-facts
private: false
- name: get all the running osds
shell: |
systemctl list-units | grep 'loaded[[:space:]]\+active' | grep -oE "ceph-osd@([0-9]{1,2}|[a-z]+).service"
register: osd_units
ignore_errors: true
- name: get all the running osds
shell: |
systemctl list-units | grep 'loaded[[:space:]]\+active' | grep -oE "ceph-osd@([0-9]{1,2}|[a-z]+).service"
register: osd_units
ignore_errors: true
- name: disable ceph osd service
service:
name: "{{ item }}"
state: stopped
enabled: no
with_items: "{{ osd_units.stdout_lines }}"
- name: disable ceph osd service
service:
name: "{{ item }}"
state: stopped
enabled: no
with_items: "{{ osd_units.stdout_lines }}"
- name: remove osd mountpoint tree
file:
path: /var/lib/ceph/osd/
state: absent
register: remove_osd_mountpoints
ignore_errors: true
- name: remove osd mountpoint tree
file:
path: /var/lib/ceph/osd/
state: absent
register: remove_osd_mountpoints
ignore_errors: true
- name: for ceph-disk based deployment
block:
- name: get prepare container
command: "docker ps -a -q --filter='name=ceph-osd-prepare'"
register: prepare_containers
ignore_errors: true
- name: for ceph-disk based deployment
block:
- name: get prepare container
command: "docker ps -a -q --filter='name=ceph-osd-prepare'"
register: prepare_containers
ignore_errors: true
- name: remove ceph osd prepare container
command: "docker rm -f {{ item }}"
with_items: "{{ prepare_containers.stdout_lines }}"
ignore_errors: true
- name: remove ceph osd prepare container
command: "docker rm -f {{ item }}"
with_items: "{{ prepare_containers.stdout_lines }}"
ignore_errors: true
# NOTE(leseb): hope someone will find a more elegant way one day...
- name: see if encrypted partitions are present
shell: |
blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2
register: encrypted_ceph_partuuid
# NOTE(leseb): hope someone will find a more elegant way one day...
- name: see if encrypted partitions are present
shell: |
blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2
register: encrypted_ceph_partuuid
- name: get ceph data partitions
command: |
blkid -o device -t PARTLABEL="ceph data"
failed_when: false
register: ceph_data_partition_to_erase_path
- name: get ceph data partitions
command: |
blkid -o device -t PARTLABEL="ceph data"
failed_when: false
register: ceph_data_partition_to_erase_path
- name: get ceph lockbox partitions
command: |
blkid -o device -t PARTLABEL="ceph lockbox"
failed_when: false
register: ceph_lockbox_partition_to_erase_path
- name: get ceph lockbox partitions
command: |
blkid -o device -t PARTLABEL="ceph lockbox"
failed_when: false
register: ceph_lockbox_partition_to_erase_path
- name: get ceph block partitions
command: |
blkid -o device -t PARTLABEL="ceph block"
failed_when: false
register: ceph_block_partition_to_erase_path
- name: get ceph block partitions
command: |
blkid -o device -t PARTLABEL="ceph block"
failed_when: false
register: ceph_block_partition_to_erase_path
- name: get ceph journal partitions
command: |
blkid -o device -t PARTLABEL="ceph journal"
failed_when: false
register: ceph_journal_partition_to_erase_path
- name: get ceph journal partitions
command: |
blkid -o device -t PARTLABEL="ceph journal"
failed_when: false
register: ceph_journal_partition_to_erase_path
- name: get ceph db partitions
command: |
blkid -o device -t PARTLABEL="ceph block.db"
failed_when: false
register: ceph_db_partition_to_erase_path
- name: get ceph db partitions
command: |
blkid -o device -t PARTLABEL="ceph block.db"
failed_when: false
register: ceph_db_partition_to_erase_path
- name: get ceph wal partitions
command: |
blkid -o device -t PARTLABEL="ceph block.wal"
failed_when: false
register: ceph_wal_partition_to_erase_path
- name: get ceph wal partitions
command: |
blkid -o device -t PARTLABEL="ceph block.wal"
failed_when: false
register: ceph_wal_partition_to_erase_path
- name: set_fact combined_devices_list
set_fact:
combined_devices_list: "{{ ceph_data_partition_to_erase_path.get('stdout_lines', []) +
ceph_lockbox_partition_to_erase_path.get('stdout_lines', []) +
ceph_block_partition_to_erase_path.get('stdout_lines', []) +
ceph_journal_partition_to_erase_path.get('stdout_lines', []) +
ceph_db_partition_to_erase_path.get('stdout_lines', []) +
ceph_wal_partition_to_erase_path.get('stdout_lines', []) }}"
- name: set_fact combined_devices_list
set_fact:
combined_devices_list: "{{ ceph_data_partition_to_erase_path.get('stdout_lines', []) +
ceph_lockbox_partition_to_erase_path.get('stdout_lines', []) +
ceph_block_partition_to_erase_path.get('stdout_lines', []) +
ceph_journal_partition_to_erase_path.get('stdout_lines', []) +
ceph_db_partition_to_erase_path.get('stdout_lines', []) +
ceph_wal_partition_to_erase_path.get('stdout_lines', []) }}"
- name: resolve parent device
command: lsblk --nodeps -no pkname "{{ item }}"
register: tmp_resolved_parent_device
with_items:
- "{{ combined_devices_list }}"
- name: resolve parent device
command: lsblk --nodeps -no pkname "{{ item }}"
register: tmp_resolved_parent_device
with_items:
- "{{ combined_devices_list }}"
- name: set_fact resolved_parent_device
set_fact:
resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
- name: set_fact resolved_parent_device
set_fact:
resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
- name: zap ceph osd disks
shell: |
docker run --rm \
--privileged=true \
--name ceph-osd-zap-{{ ansible_hostname }}-{{ item }} \
-v /dev/:/dev/ \
-e OSD_DEVICE=/dev/{{ item }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
zap_device
with_items:
- "{{ resolved_parent_device }}"
- name: zap ceph osd disks
shell: |
docker run --rm \
--privileged=true \
--name ceph-osd-zap-{{ ansible_hostname }}-{{ item }} \
-v /dev/:/dev/ \
-e OSD_DEVICE=/dev/{{ item }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
zap_device
with_items:
- "{{ resolved_parent_device }}"
- name: wait until the zap containers die
shell: |
docker ps | grep -sq ceph-osd-zap-{{ ansible_hostname }}
register: zap_alive
failed_when: false
until: zap_alive.rc != 0
retries: 5
delay: 10
- name: wait until the zap containers die
shell: |
docker ps | grep -sq ceph-osd-zap-{{ ansible_hostname }}
register: zap_alive
failed_when: false
until: zap_alive.rc != 0
retries: 5
delay: 10
- name: remove ceph osd zap disk container
docker_container:
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item }}"
state: absent
with_items:
- "{{ resolved_parent_device }}"
- name: remove ceph osd zap disk container
docker_container:
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item }}"
state: absent
with_items:
- "{{ resolved_parent_device }}"
- name: remove ceph osd service
file:
path: /etc/systemd/system/ceph-osd@.service
state: absent
when:
- osd_scenario != "lvm"
- name: remove ceph osd service
file:
path: /etc/systemd/system/ceph-osd@.service
state: absent
when:
- osd_scenario != "lvm"
- name: for ceph-volume based deployments
block:
- name: zap and destroy osds created by ceph-volume with lvm_volumes
ceph_volume:
data: "{{ item.data }}"
data_vg: "{{ item.data_vg|default(omit) }}"
journal: "{{ item.journal|default(omit) }}"
journal_vg: "{{ item.journal_vg|default(omit) }}"
db: "{{ item.db|default(omit) }}"
db_vg: "{{ item.db_vg|default(omit) }}"
wal: "{{ item.wal|default(omit) }}"
wal_vg: "{{ item.wal_vg|default(omit) }}"
action: "zap"
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
CEPH_CONTAINER_BINARY: "docker"
with_items: "{{ lvm_volumes }}"
- name: for ceph-volume based deployments
block:
- name: zap and destroy osds created by ceph-volume with lvm_volumes
ceph_volume:
data: "{{ item.data }}"
data_vg: "{{ item.data_vg|default(omit) }}"
journal: "{{ item.journal|default(omit) }}"
journal_vg: "{{ item.journal_vg|default(omit) }}"
db: "{{ item.db|default(omit) }}"
db_vg: "{{ item.db_vg|default(omit) }}"
wal: "{{ item.wal|default(omit) }}"
wal_vg: "{{ item.wal_vg|default(omit) }}"
action: "zap"
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
CEPH_CONTAINER_BINARY: "docker"
with_items: "{{ lvm_volumes }}"
- name: zap and destroy osds created by ceph-volume with devices
ceph_volume:
data: "{{ item }}"
action: "zap"
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
CEPH_CONTAINER_BINARY: "docker"
with_items: "{{ devices | default([]) }}"
when:
- osd_scenario == "lvm"
- name: zap and destroy osds created by ceph-volume with devices
ceph_volume:
data: "{{ item }}"
action: "zap"
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
CEPH_CONTAINER_BINARY: "docker"
with_items: "{{ devices | default([]) }}"
when:
- osd_scenario == "lvm"
- name: remove ceph osd image
docker_image:
state: absent
repository: "{{ ceph_docker_registry }}"
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
tags:
remove_img
ignore_errors: true
- name: remove ceph osd image
docker_image:
state: absent
repository: "{{ ceph_docker_registry }}"
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
tags:
remove_img
ignore_errors: true
- name: include vars from group_vars/osds.yml
include_vars:
file: "{{ item }}"
with_first_found:
- files:
- "{{ playbook_dir }}/group_vars/osds"
- "{{ playbook_dir }}/group_vars/osds.yml"
skip: true
- name: include vars from group_vars/osds.yml
include_vars:
file: "{{ item }}"
with_first_found:
- files:
- "{{ playbook_dir }}/group_vars/osds"
- "{{ playbook_dir }}/group_vars/osds.yml"
skip: true
- name: find all osd_disk_prepare logs
find:
paths: "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}"
pattern: "ceph-osd-prepare-*.log"
register: osd_disk_prepare_logs
- name: find all osd_disk_prepare logs
find:
paths: "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}"
pattern: "ceph-osd-prepare-*.log"
register: osd_disk_prepare_logs
- name: ensure all osd_disk_prepare logs are removed
file:
path: "{{ item.path }}"
state: absent
with_items:
- "{{ osd_disk_prepare_logs.files }}"
- name: ensure all osd_disk_prepare logs are removed
file:
path: "{{ item.path }}"
state: absent
with_items:
- "{{ osd_disk_prepare_logs.files }}"
- name: purge ceph mon cluster
@ -592,12 +591,11 @@
gather_facts: true
become: true
tasks:
- import_role:
name: ceph-defaults
private: false
# This is a tricks so we can access 'ceph-defaults' defaults variables in 'ceph-facts'
roles:
- ceph-defaults
post_tasks:
- import_role:
name: ceph-facts
private: false

View File

@ -121,6 +121,7 @@
roles:
- ceph-defaults
- ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
@ -299,6 +300,7 @@
roles:
- ceph-defaults
- ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
@ -361,6 +363,7 @@
roles:
- ceph-defaults
- ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
@ -446,6 +449,7 @@
roles:
- ceph-defaults
- ceph-facts
tasks:
- name: set_fact docker_exec_cmd_osd
@ -502,6 +506,7 @@
roles:
- ceph-defaults
- ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
@ -549,6 +554,7 @@
roles:
- ceph-defaults
- ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
@ -604,6 +610,7 @@
roles:
- ceph-defaults
- ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
@ -655,6 +662,7 @@
roles:
- ceph-defaults
- ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
@ -709,6 +717,7 @@
roles:
- ceph-defaults
- ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
@ -737,6 +746,7 @@
roles:
- ceph-defaults
- ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }

View File

@ -72,6 +72,7 @@
roles:
- ceph-defaults
- ceph-facts
post_tasks:
- name: pick a monitor different than the one we want to remove

View File

@ -57,6 +57,7 @@
roles:
- ceph-defaults
- ceph-facts
post_tasks:

View File

@ -118,6 +118,7 @@
roles:
- ceph-defaults
- ceph-facts
- ceph-handler
- ceph-docker-common
- ceph-mon
@ -176,6 +177,7 @@
roles:
- ceph-defaults
- ceph-facts
- ceph-handler
- ceph-docker-common
- ceph-mgr
@ -283,6 +285,7 @@
roles:
- ceph-defaults
- ceph-facts
- ceph-handler
- ceph-docker-common
- ceph-osd
@ -345,6 +348,7 @@
roles:
- ceph-defaults
- ceph-facts
- ceph-handler
- ceph-docker-common
- ceph-mds
@ -388,6 +392,7 @@
roles:
- ceph-defaults
- ceph-facts
- ceph-handler
- ceph-docker-common
- ceph-rgw
@ -431,6 +436,7 @@
roles:
- ceph-defaults
- ceph-facts
- ceph-handler
- ceph-docker-common
- ceph-rbd-mirror
@ -478,6 +484,7 @@
roles:
- ceph-defaults
- ceph-facts
- ceph-handler
- ceph-docker-common
- ceph-nfs

View File

@ -1,3 +1 @@
---
- name: include facts.yml
include_tasks: facts.yml
---

View File

@ -0,0 +1,3 @@
# Ansible role: ceph-facts
Documentation is available at http://docs.ceph.com/ceph-ansible/.

View File

@ -0,0 +1 @@
---

View File

@ -0,0 +1,17 @@
---
galaxy_info:
company: Red Hat
author: Guillaume Abrioux
description: Set some facts for ceph to be deployed
license: Apache
min_ansible_version: 2.7
platforms:
- name: Ubuntu
versions:
- xenial
- name: EL
versions:
- 7
galaxy_tags:
- system
dependencies: []

View File

@ -0,0 +1,3 @@
---
- name: include facts.yml
include_tasks: facts.yml

View File

@ -53,6 +53,8 @@
roles:
- role: ceph-defaults
tags: [with_pkg, fetch_container_image]
- role: ceph-facts
tags: [with_pkg, fetch_container_image]
- role: ceph-validate
- role: ceph-infra
- role: ceph-handler
@ -89,6 +91,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
- role: ceph-config
@ -122,6 +126,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
- role: ceph-config
@ -151,6 +157,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
- role: ceph-config
@ -180,6 +188,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
- role: ceph-config
@ -209,6 +219,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
- role: ceph-config
@ -238,6 +250,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
- role: ceph-config
@ -267,6 +281,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
- role: ceph-config
@ -296,6 +312,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
- role: ceph-config
@ -329,6 +347,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
when:
@ -361,6 +381,7 @@
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
roles:
- { role: ceph-defaults, tags: ['ceph_update_config'] }
- { role: ceph-facts, tags: ['ceph_update_config'] }
- role: ceph-handler
- ceph-docker-common
- { role: ceph-config, tags: ['ceph_update_config'] }
@ -378,18 +399,20 @@
any_errors_fatal: true
gather_facts: false
become: True
roles:
- ceph-defaults
tasks:
- name: get ceph status from the first monitor
command: docker exec ceph-mon-{{ hostvars[groups['mons'][0]]['ansible_hostname'] }} ceph --cluster {{ cluster | default ('ceph') }} -s
command: docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s
register: ceph_status
changed_when: false
delegate_to: "{{ groups['mons'][0] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
ignore_errors: true # we skip the error if mon_group_name is different than 'mons'
- name: "show ceph status for cluster {{ cluster | default ('ceph') }}"
- name: "show ceph status for cluster {{ cluster }}"
debug:
msg: "{{ ceph_status.stdout_lines }}"
delegate_to: "{{ groups['mons'][0] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: not ceph_status.failed

View File

@ -92,6 +92,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
@ -120,6 +122,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
@ -148,6 +152,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
@ -176,6 +182,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
@ -204,6 +212,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
@ -232,6 +242,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
@ -260,6 +272,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
@ -320,6 +334,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
@ -348,6 +364,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
@ -378,6 +396,8 @@
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
- role: ceph-facts
tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
@ -396,18 +416,19 @@
gather_facts: false
become: True
any_errors_fatal: true
tasks:
roles:
- role: ceph-defaults
post_tasks:
- name: get ceph status from the first monitor
command: ceph --cluster {{ cluster | default ('ceph') }} -s
command: ceph --cluster {{ cluster }} -s
register: ceph_status
changed_when: false
delegate_to: "{{ groups['mons'][0] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
ignore_errors: true # we skip the error if mon_group_name is different than 'mons'
- name: "show ceph status for cluster {{ cluster | default ('ceph') }}"
- name: "show ceph status for cluster {{ cluster }}"
debug:
msg: "{{ ceph_status.stdout_lines }}"
delegate_to: "{{ groups['mons'][0] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: not ceph_status.failed