From 8d03b40e5dd3cef66a58bb5eecbaa35bfdd8b4be Mon Sep 17 00:00:00 2001 From: Ben England Date: Wed, 6 Apr 2016 15:58:17 -0400 Subject: [PATCH 01/10] more robust, simpler, idempotent --- purge-cluster.yml | 58 +++++++++++++++++++++++++++++++---------------- 1 file changed, 39 insertions(+), 19 deletions(-) diff --git a/purge-cluster.yml b/purge-cluster.yml index f9ecdcf09..7fef66f9a 100644 --- a/purge-cluster.yml +++ b/purge-cluster.yml @@ -49,8 +49,8 @@ # This can cause problem with qemu-kvm purge_all_packages: true -# When set to true and raw _multi_journal is used then journal disk are also zapped - zap_journal_disks: true +# When set to true and raw _multi_journal is used then block devices are also zapped + zap_block_devs: true ceph_packages: - ceph @@ -107,7 +107,8 @@ shell: "if [ -d /usr/lib/systemd ] ; then find /usr/lib/systemd/system -name 'ceph*' | wc -l ; else echo 0 ; fi" register: systemd_unit_files -# Infernalis +# after Hammer release + - name: stop ceph.target with systemd service: name: ceph.target @@ -156,34 +157,33 @@ systemd_unit_files.stdout != "0" and rgw_group_name in group_names -# before infernalis +# before infernalis release, using sysvinit scripts +# we use this test so we do not have to know which RPM contains the boot script +# or where it is placed. + - name: stop ceph osds - command: service ceph stop osd + shell: "service ceph status osd ; if [ $? == 0 ] ; then service ceph stop osd ; else echo ; fi" when: ansible_os_family == 'RedHat' and - osd_group_name in group_names and - systemd_unit_files.stdout == "0" + osd_group_name in group_names - name: stop ceph mons - command: service ceph stop mon + shell: "service ceph status mon ; if [ $? == 0 ] ; then service ceph stop mon ; else echo ; fi" when: ansible_os_family == 'RedHat' and - mon_group_name in group_names and - systemd_unit_files.stdout == "0" + mon_group_name in group_names - name: stop ceph mdss - command: service ceph stop mds + shell: "service ceph status mds ; if [ $? == 0 ] ; then service ceph stop mds ; else echo ; fi" when: ansible_os_family == 'RedHat' and - mds_group_name in group_names and - systemd_unit_files.stdout == "0" + mds_group_name in group_names - name: stop ceph rgws - command: service ceph-radosgw stop + shell: "service ceph-radosgw status ; if [ $? == 0 ] ; then service ceph-radosgw stop ; else echo ; fi" when: ansible_os_family == 'RedHat' and - rgw_group_name in group_names and - systemd_unit_files.stdout == "0" + rgw_group_name in group_names # Ubuntu 14.04 - name: stop ceph osds on ubuntu @@ -223,6 +223,16 @@ register: check_for_running_ceph failed_when: check_for_running_ceph.rc == 0 + - name: see if ceph-disk-created data partitions are present + shell: "ls /dev/disk/by-partlabel | grep -q 'ceph data'" + failed_when: false + register: ceph_data_partlabels + + - name: see if ceph-disk-created journal partitions are present + shell: "ls /dev/disk/by-partlabel | grep -q 'ceph journal'" + failed_when: false + register: ceph_journal_partlabels + - name: get osd data mount points shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'" register: mounted_osd @@ -263,19 +273,29 @@ osd_group_name in group_names and remove_osd_mountpoints.rc != 0 + - name: see if ceph-disk is installed + shell: "which ceph-disk" + failed_when: false + register: ceph_disk_present + - name: zap osd disks shell: ceph-disk zap "{{ item }}" with_items: devices when: - osd_group_name in group_names + osd_group_name in group_names and + ceph_disk_present.rc == 0 and + ceph_data_partlabels.rc == 0 and + zap_block_devs - name: zap journal devices shell: ceph-disk zap "{{ item }}" with_items: "{{ raw_journal_devices|default([])|unique }}" when: osd_group_name in group_names and - raw_multi_journal and - zap_journal_disks + ceph_disk_present.rc == 0 and + ceph_journal_partlabels.rc == 0 and + zap_block_devs and + raw_multi_journal - name: purge ceph packages with yum yum: From 1efe62252c3c6543f3c05a76fe925c6c3def77e8 Mon Sep 17 00:00:00 2001 From: "Chris St. Pierre" Date: Thu, 7 Apr 2016 09:11:50 -0500 Subject: [PATCH 02/10] Deduplicate RBD client directory creation Instead of creating the RBD client socket path three different places in three different ways, this creates it once. Ceph on OpenStack users have the option to customize the permissions of the RBD client directories. Fixes #687 --- group_vars/all.sample | 36 +++++++++++- roles/ceph-common/defaults/main.yml | 36 +++++++++++- .../tasks/installs/install_on_debian.yml | 12 ---- .../tasks/installs/install_on_redhat.yml | 12 ---- roles/ceph-common/tasks/main.yml | 58 +++++++++++++++---- 5 files changed, 112 insertions(+), 42 deletions(-) diff --git a/group_vars/all.sample b/group_vars/all.sample index 785a8986b..c80db381e 100644 --- a/group_vars/all.sample +++ b/group_vars/all.sample @@ -182,11 +182,41 @@ dummy: #rbd_cache: "true" #rbd_cache_writethrough_until_flush: "true" #rbd_concurrent_management_ops: 20 + #rbd_client_directories: false # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions -#rbd_client_directory_user: qemu -#rbd_client_directory_group: libvirtd -#rbd_client_log_file: /var/log/rbd-clients/qemu-guest-$pid.log # must be writable by QEMU and allowed by SELinux or AppArmor + +# Permissions for the rbd_client_log_path and +# rbd_client_admin_socket_path. Depending on your use case for Ceph +# you may want to change these values. The default, which is used if +# any of the variables are unset or set to a false value (like `null` +# or `false`) is to automatically determine what is appropriate for +# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770 +# for infernalis releases, and root:root and 1777 for pre-infernalis +# releases. +# +# For other use cases, including running Ceph with OpenStack, you'll +# want to set these differently: +# +# For OpenStack on RHEL, you'll want: +# rbd_client_directory_owner: "qemu" +# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt) +# rbd_client_directory_mode: "0755" +# +# For OpenStack on Ubuntu or Debian, set: +# rbd_client_directory_owner: "libvirt-qemu" +# rbd_client_directory_group: "kvm" +# rbd_client_directory_mode: "0755" +# +# If you set rbd_client_directory_mode, you must use a string (e.g., +# 'rbd_client_directory_mode: "0755"', *not* +# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode +# must be in octal or symbolic form +#rbd_client_directory_owner: null +#rbd_client_directory_group: null +#rbd_client_directory_mode: null + #rbd_client_log_path: /var/log/rbd-clients/ +#rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor #rbd_client_admin_socket_path: /var/run/ceph/rbd-clients # must be writable by QEMU and allowed by SELinux or AppArmor #rbd_default_features: 3 #rbd_default_map_options: rw diff --git a/roles/ceph-common/defaults/main.yml b/roles/ceph-common/defaults/main.yml index 8ae560b49..ffe427d32 100644 --- a/roles/ceph-common/defaults/main.yml +++ b/roles/ceph-common/defaults/main.yml @@ -174,11 +174,41 @@ debug_mds_level: 20 rbd_cache: "true" rbd_cache_writethrough_until_flush: "true" rbd_concurrent_management_ops: 20 + rbd_client_directories: false # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions -rbd_client_directory_user: qemu -rbd_client_directory_group: libvirtd -rbd_client_log_file: /var/log/rbd-clients/qemu-guest-$pid.log # must be writable by QEMU and allowed by SELinux or AppArmor + +# Permissions for the rbd_client_log_path and +# rbd_client_admin_socket_path. Depending on your use case for Ceph +# you may want to change these values. The default, which is used if +# any of the variables are unset or set to a false value (like `null` +# or `false`) is to automatically determine what is appropriate for +# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770 +# for infernalis releases, and root:root and 1777 for pre-infernalis +# releases. +# +# For other use cases, including running Ceph with OpenStack, you'll +# want to set these differently: +# +# For OpenStack on RHEL, you'll want: +# rbd_client_directory_owner: "qemu" +# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt) +# rbd_client_directory_mode: "0755" +# +# For OpenStack on Ubuntu or Debian, set: +# rbd_client_directory_owner: "libvirt-qemu" +# rbd_client_directory_group: "kvm" +# rbd_client_directory_mode: "0755" +# +# If you set rbd_client_directory_mode, you must use a string (e.g., +# 'rbd_client_directory_mode: "0755"', *not* +# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode +# must be in octal or symbolic form +rbd_client_directory_owner: null +rbd_client_directory_group: null +rbd_client_directory_mode: null + rbd_client_log_path: /var/log/rbd-clients/ +rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor rbd_client_admin_socket_path: /var/run/ceph/rbd-clients # must be writable by QEMU and allowed by SELinux or AppArmor rbd_default_features: 3 rbd_default_map_options: rw diff --git a/roles/ceph-common/tasks/installs/install_on_debian.yml b/roles/ceph-common/tasks/installs/install_on_debian.yml index 705e295a2..64e950ede 100644 --- a/roles/ceph-common/tasks/installs/install_on_debian.yml +++ b/roles/ceph-common/tasks/installs/install_on_debian.yml @@ -45,15 +45,3 @@ default_release: "{{ ansible_distribution_release }}{{ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else ''}}" when: mds_group_name in group_names - -- name: configure rbd clients directories - file: - path: "{{ item }}" - state: directory - owner: libvirt-qemu - group: kvm - mode: 0755 - with_items: - - rbd_client_log_path - - rbd_client_admin_socket_path - when: rbd_client_directories diff --git a/roles/ceph-common/tasks/installs/install_on_redhat.yml b/roles/ceph-common/tasks/installs/install_on_redhat.yml index 5a346e75d..b098cf951 100644 --- a/roles/ceph-common/tasks/installs/install_on_redhat.yml +++ b/roles/ceph-common/tasks/installs/install_on_redhat.yml @@ -143,15 +143,3 @@ when: rgw_group_name in group_names and ansible_pkg_mgr == "dnf" - -- name: configure rbd clients directories - file: - path: "{{ item }}" - state: directory - owner: "{{ rbd_client_directory_user }}" - group: "{{ rbd_client_directory_group }}" - mode: 0755 - with_items: - - rbd_client_log_path - - rbd_client_admin_socket_path - when: rbd_client_directories diff --git a/roles/ceph-common/tasks/main.yml b/roles/ceph-common/tasks/main.yml index fe91e03b6..bf99f4fe5 100644 --- a/roles/ceph-common/tasks/main.yml +++ b/roles/ceph-common/tasks/main.yml @@ -120,16 +120,46 @@ when: not is_ceph_infernalis - set_fact: - rbd_client_dir_owner: root - rbd_client_dir_group: root - rbd_client_dir_mode: "1777" - when: not is_ceph_infernalis + rbd_client_directory_owner: root + when: + not is_ceph_infernalis and + (rbd_client_directory_owner is not defined or + not rbd_client_directory_owner) - set_fact: - rbd_client_dir_owner: ceph - rbd_client_dir_group: ceph - rbd_client_dir_mode: "0770" - when: is_ceph_infernalis + rbd_client_directory_owner: ceph + when: + is_ceph_infernalis and + (rbd_client_directory_owner is not defined or + not rbd_client_directory_owner) + +- set_fact: + rbd_client_directory_group: root + when: + not is_ceph_infernalis and + (rbd_client_directory_group is not defined or + not rbd_client_directory_group) + +- set_fact: + rbd_client_directory_group: ceph + when: + is_ceph_infernalis and + (rbd_client_directory_group is not defined or + not rbd_client_directory_group) + +- set_fact: + rbd_client_directory_mode: "1777" + when: + not is_ceph_infernalis and + (rbd_client_directory_mode is not defined or + not rbd_client_directory_mode) + +- set_fact: + rbd_client_directory_mode: "0770" + when: + is_ceph_infernalis and + (rbd_client_directory_mode is not defined or + not rbd_client_directory_mode) - name: check for a ceph socket shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1" @@ -202,11 +232,15 @@ - name: create rbd client directory file: - path: "{{ rbd_client_admin_socket_path }}" + path: "{{ item }}" state: directory - owner: "{{ rbd_client_dir_owner }}" - group: "{{ rbd_client_dir_group }}" - mode: "{{ rbd_client_dir_mode }}" + owner: "{{ rbd_client_directory_owner }}" + group: "{{ rbd_client_directory_group }}" + mode: "{{ rbd_client_directory_mode }}" + with_items: + - rbd_client_admin_socket_path + - rbd_client_log_path + when: rbd_client_directories - name: configure cluster name lineinfile: From 441d83fe77bd0351d93efe1e9472014c05648d76 Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Mon, 11 Apr 2016 10:28:03 -0400 Subject: [PATCH 03/10] if ceph-disk fails to activate an OSD then bubble up the error Signed-off-by: Alfredo Deza --- roles/ceph-osd/tasks/activate_osds.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/roles/ceph-osd/tasks/activate_osds.yml b/roles/ceph-osd/tasks/activate_osds.yml index 0ee388b5c..47e488cfc 100644 --- a/roles/ceph-osd/tasks/activate_osds.yml +++ b/roles/ceph-osd/tasks/activate_osds.yml @@ -20,11 +20,19 @@ - devices changed_when: false failed_when: false + register: activate_osd_disk when: not item.0.get("skipped") and item.0.get("rc", 0) != 0 and not osd_auto_discovery +- name: fail if ceph-disk cannot create an OSD + fail: + msg: "ceph-disk failed to create an OSD" + when: + " 'ceph-disk: Error: ceph osd create failed' in item.get('stderr', '') " + with_items: "{{activate_osd_disk.results}}" + # NOTE (leseb): this task is for partitions because we don't explicitly use a partition. - name: activate osd(s) when device is a partition command: "ceph-disk activate {{ item.1 }}" From 3d671cef229c2b4f12cce6395fbc6badfd1c3ef1 Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Mon, 11 Apr 2016 15:48:29 -0400 Subject: [PATCH 04/10] remove agent role, no packages for it upstream yet Signed-off-by: Alfredo Deza --- test.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/test.yml b/test.yml index f5feddb40..fda731c1f 100644 --- a/test.yml +++ b/test.yml @@ -8,6 +8,5 @@ - ceph-mds - ceph-rgw - ceph-fetch-keys - - ceph-agent - ceph-common-coreos - ceph-rbd-mirror From 8de1c32bb632a86f1ebec16ced00366dbacc1d90 Mon Sep 17 00:00:00 2001 From: Ben England Date: Mon, 11 Apr 2016 16:53:45 -0400 Subject: [PATCH 05/10] this is how ceph-disk partition really looks --- purge-cluster.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/purge-cluster.yml b/purge-cluster.yml index c2682455c..496afec28 100644 --- a/purge-cluster.yml +++ b/purge-cluster.yml @@ -241,12 +241,12 @@ failed_when: check_for_running_ceph.rc == 0 - name: see if ceph-disk-created data partitions are present - shell: "ls /dev/disk/by-partlabel | grep -q 'ceph data'" + shell: "ls /dev/disk/by-partlabel | grep -q 'ceph\\\\x20data'" failed_when: false register: ceph_data_partlabels - name: see if ceph-disk-created journal partitions are present - shell: "ls /dev/disk/by-partlabel | grep -q 'ceph journal'" + shell: "ls /dev/disk/by-partlabel | grep -q 'ceph\\\\x20journal'" failed_when: false register: ceph_journal_partlabels From 68460cf3845064ba9ce1275704e73e8862f20986 Mon Sep 17 00:00:00 2001 From: John Fulton Date: Mon, 11 Apr 2016 22:25:53 -0400 Subject: [PATCH 06/10] Balance tags on didn't have a closing tag. This is a tiny change to add the he missing '>'. This will let people quickly update this file with sed. For example: `sed s/\/192.168.1.41/g -i cluster-maintenance.yml`. --- cluster-maintenance.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster-maintenance.yml b/cluster-maintenance.yml index 3b7d2a9c6..c559ed62f 100644 --- a/cluster-maintenance.yml +++ b/cluster-maintenance.yml @@ -27,7 +27,7 @@ - name: Wait for the server to come up local_action: > - wait_for host= port=22 delay=10 timeout=3600 From 7424ad6d683f232838d22243f6988637676e5658 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Tue, 12 Apr 2016 12:01:53 +0200 Subject: [PATCH 07/10] docker: do not hardcode image and user name MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Sébastien Han --- roles/ceph-mds/tasks/docker/dirs_permissions.yml | 2 +- roles/ceph-mon/tasks/docker/dirs_permissions.yml | 2 +- roles/ceph-osd/tasks/docker/dirs_permissions.yml | 2 +- roles/ceph-restapi/tasks/docker/dirs_permissions.yml | 2 +- roles/ceph-rgw/tasks/docker/dirs_permissions.yml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/roles/ceph-mds/tasks/docker/dirs_permissions.yml b/roles/ceph-mds/tasks/docker/dirs_permissions.yml index ba5818294..258a17967 100644 --- a/roles/ceph-mds/tasks/docker/dirs_permissions.yml +++ b/roles/ceph-mds/tasks/docker/dirs_permissions.yml @@ -7,7 +7,7 @@ # NOTE (leseb): we can not use docker inspect with 'format filed' because of # https://github.com/ansible/ansible/issues/10156 - name: inspect ceph version - shell: docker inspect docker.io/ceph/daemon | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq + shell: docker inspect docker.io/"{{ ceph_mds_docker_username }}"/"{{ ceph_mds_docker_imagename }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq changed_when: false failed_when: false run_once: true diff --git a/roles/ceph-mon/tasks/docker/dirs_permissions.yml b/roles/ceph-mon/tasks/docker/dirs_permissions.yml index a33a38edf..99f1e9a4f 100644 --- a/roles/ceph-mon/tasks/docker/dirs_permissions.yml +++ b/roles/ceph-mon/tasks/docker/dirs_permissions.yml @@ -7,7 +7,7 @@ # NOTE (leseb): we can not use docker inspect with 'format filed' because of # https://github.com/ansible/ansible/issues/10156 - name: inspect ceph version - shell: docker inspect docker.io/ceph/daemon | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq + shell: docker inspect docker.io/"{{ ceph_mon_docker_username }}"/"{{ ceph_mon_docker_imagename }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq changed_when: false failed_when: false run_once: true diff --git a/roles/ceph-osd/tasks/docker/dirs_permissions.yml b/roles/ceph-osd/tasks/docker/dirs_permissions.yml index d0c49cf9c..785f06898 100644 --- a/roles/ceph-osd/tasks/docker/dirs_permissions.yml +++ b/roles/ceph-osd/tasks/docker/dirs_permissions.yml @@ -7,7 +7,7 @@ # NOTE (leseb): we can not use docker inspect with 'format filed' because of # https://github.com/ansible/ansible/issues/10156 - name: inspect ceph version - shell: docker inspect docker.io/ceph/daemon | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq + shell: docker inspect docker.io/"{{ ceph_osd_docker_username }}"/"{{ ceph_osd_docker_imagename }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq changed_when: false failed_when: false run_once: true diff --git a/roles/ceph-restapi/tasks/docker/dirs_permissions.yml b/roles/ceph-restapi/tasks/docker/dirs_permissions.yml index 99dbe4094..aba95709f 100644 --- a/roles/ceph-restapi/tasks/docker/dirs_permissions.yml +++ b/roles/ceph-restapi/tasks/docker/dirs_permissions.yml @@ -1,6 +1,6 @@ --- - name: inspect ceph version - shell: "docker inspect --format '{{ index (index .Config.Env) 3 }}' docker.io/{{ ceph_mon_docker_username }}/{{ ceph_mon_docker_imagename }} | cut -d '=' -f '2'" + shell: docker inspect docker.io/"{{ ceph_restapi_docker_username }}"/"{{ ceph_restapi_docker_imagename }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq changed_when: false failed_when: false run_once: true diff --git a/roles/ceph-rgw/tasks/docker/dirs_permissions.yml b/roles/ceph-rgw/tasks/docker/dirs_permissions.yml index d4046e616..8d53d815a 100644 --- a/roles/ceph-rgw/tasks/docker/dirs_permissions.yml +++ b/roles/ceph-rgw/tasks/docker/dirs_permissions.yml @@ -7,7 +7,7 @@ # NOTE (leseb): we can not use docker inspect with 'format filed' because of # https://github.com/ansible/ansible/issues/10156 - name: inspect ceph version - shell: docker inspect docker.io/ceph/daemon | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq + shell: docker inspect docker.io/"{{ ceph_rgw_docker_username }}"/"{{ ceph_rgw_docker_imagename }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq changed_when: false failed_when: false run_once: true From 8df3029d47faa7906f768beb569acdf8e3b756e5 Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Tue, 12 Apr 2016 07:17:35 -0400 Subject: [PATCH 08/10] remove coreos from default testing as well Signed-off-by: Alfredo Deza --- test.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/test.yml b/test.yml index fda731c1f..fb5914de4 100644 --- a/test.yml +++ b/test.yml @@ -8,5 +8,4 @@ - ceph-mds - ceph-rgw - ceph-fetch-keys - - ceph-common-coreos - ceph-rbd-mirror From be752b595c2e3c3e4cda90a04edcb69856b1689d Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Tue, 12 Apr 2016 07:38:13 -0400 Subject: [PATCH 09/10] remove rbd-mirror as well from testing. No packages are available yet Signed-off-by: Alfredo Deza --- test.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/test.yml b/test.yml index fb5914de4..ed6b5d5fd 100644 --- a/test.yml +++ b/test.yml @@ -8,4 +8,3 @@ - ceph-mds - ceph-rgw - ceph-fetch-keys - - ceph-rbd-mirror From 4d70b3621ffcd49184d6ae492bc21cf6b2244d58 Mon Sep 17 00:00:00 2001 From: Ben England Date: Tue, 12 Apr 2016 09:49:44 -0400 Subject: [PATCH 10/10] devices var is defined in context of osds role --- purge-cluster.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/purge-cluster.yml b/purge-cluster.yml index 496afec28..ee68d5c71 100644 --- a/purge-cluster.yml +++ b/purge-cluster.yml @@ -96,6 +96,7 @@ fail: msg: "OSD automatic discovery was detected, purge cluster does not support this scenario. If you want to purge the cluster, manually provide the list of devices in group_vars/osds using the devices variable." when: + osd_group_name in group_names and devices is not defined and osd_auto_discovery