From 0476b24af1e5fe6ddf0123144ae48eca3c60c73f Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Tue, 7 Feb 2017 11:57:38 -0600 Subject: [PATCH 1/3] purge-cluster: do not use ceph-detect-init We can not always ensure that ceph-detect-init will be present on the system. See: https://bugzilla.redhat.com/show_bug.cgi?id=1418980 Signed-off-by: Andrew Schoen --- infrastructure-playbooks/purge-cluster.yml | 44 +++++++++++----------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index e63cf3ab2..083fda996 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -31,7 +31,7 @@ invoking the playbook" when: ireallymeanit != 'yes' -- name: gather facts and check init system +- name: gather facts on all hosts vars: mon_group_name: mons @@ -54,9 +54,7 @@ become: true tasks: - - name: detect init system - command: ceph-detect-init - register: init_system + - debug: msg="gather facts on all Ceph hosts for following reference" - name: purge ceph mds cluster @@ -83,16 +81,16 @@ name: ceph-mds@{{ ansible_hostname }} state: stopped enabled: no - when: init_system.stdout == 'systemd' + when: ansible_service_mgr == 'systemd' - name: stop ceph mdss shell: "service ceph status mds ; if [ $? == 0 ] ; then service ceph stop mds ; else echo ; fi" - when: init_system.stdout == 'sysvinit' + when: ansible_service_mgr == 'sysvinit' - name: stop ceph mdss on ubuntu command: initctl stop ceph-mds cluster={{ cluster }} id={{ ansible_hostname }} failed_when: false - when: init_system.stdout == 'upstart' + when: ansible_service_mgr == 'upstart' - name: purge ceph rgw cluster @@ -120,16 +118,16 @@ name: ceph-radosgw@rgw.{{ ansible_hostname }} state: stopped enabled: no - when: init_system.stdout == 'systemd' + when: ansible_service_mgr == 'systemd' - name: stop ceph rgws shell: "service ceph-radosgw status ; if [ $? == 0 ] ; then service ceph-radosgw stop ; else echo ; fi" - when: init_system.stdout == 'sysvinit' + when: ansible_service_mgr == 'sysvinit' - name: stop ceph rgws on ubuntu command: initctl stop radosgw cluster={{ cluster }} id={{ ansible_hostname }} failed_when: false - when: init_system.stdout == 'upstart' + when: ansible_service_mgr == 'upstart' - name: purge ceph rbd-mirror cluster @@ -156,12 +154,12 @@ service: name: ceph-rbd-mirror@admin.service state: stopped - when: init_system.stdout == 'systemd' + when: ansible_service_mgr == 'systemd' - name: stop ceph rbd mirror on ubuntu command: initctl stop ceph-rbd-mirror cluster={{ cluster }} id=admin failed_when: false - when: init_system.stdout == 'upstart' + when: ansible_service_mgr == 'upstart' - name: purge ceph nfs cluster @@ -188,16 +186,16 @@ service: name: nfs-ganesha state: stopped - when: init_system.stdout == 'systemd' + when: ansible_service_mgr == 'systemd' - name: stop ceph nfss shell: "service nfs-ganesha status ; if [ $? == 0 ] ; then service nfs-ganesha stop ; else echo ; fi" - when: init_system.stdout == 'sysvinit' + when: ansible_service_mgr == 'sysvinit' - name: stop ceph nfss on ubuntu command: initctl stop nfs-ganesha failed_when: false - when: init_system.stdout == 'upstart' + when: ansible_service_mgr == 'upstart' - name: purge ceph osd cluster @@ -252,7 +250,7 @@ state: stopped enabled: no with_items: "{{ osd_ids.stdout_lines }}" - when: init_system.stdout == 'systemd' + when: ansible_service_mgr == 'systemd' # before infernalis release, using sysvinit scripts # we use this test so we do not have to know which RPM contains the boot script @@ -260,7 +258,7 @@ - name: stop ceph osds shell: "service ceph status osd ; if [ $? == 0 ] ; then service ceph stop osd ; else echo ; fi" - when: init_system.stdout == 'sysvinit' + when: ansible_service_mgr == 'sysvinit' - name: stop ceph osds on ubuntu shell: | @@ -268,7 +266,7 @@ initctl stop ceph-osd cluster={{ cluster }} id=$id done failed_when: false - when: init_system.stdout == 'upstart' + when: ansible_service_mgr == 'upstart' with_items: "{{ osd_ids.stdout_lines }}" - name: see if ceph-disk-created data partitions are present @@ -412,16 +410,16 @@ name: ceph-mon@{{ ansible_hostname }} state: stopped enabled: no - when: init_system.stdout == 'systemd' + when: ansible_service_mgr == 'systemd' - name: stop ceph mons shell: "service ceph status mon ; if [ $? == 0 ] ; then service ceph stop mon ; else echo ; fi" - when: init_system.stdout == 'sysvinit' + when: ansible_service_mgr == 'sysvinit' - name: stop ceph mons on ubuntu command: initctl stop ceph-mon cluster={{ cluster }} id={{ ansible_hostname }} failed_when: false - when: init_system.stdout == 'upstart' + when: ansible_service_mgr == 'upstart' - name: remove monitor store and bootstrap keys file: @@ -555,11 +553,11 @@ - name: remove from sysv shell: "update-rc.d -f ceph remove" - when: init_system.stdout == 'sysvinit' + when: ansible_service_mgr == 'sysvinit' - name: remove upstart and sysv files shell: "find /etc -name '*ceph*' -delete" - when: init_system.stdout == 'upstart' + when: ansible_service_mgr == 'upstart' - name: remove upstart and apt logs and cache shell: "find /var -name '*ceph*' -delete" From adf6aee6430b981fa371724367b2d1e2b80482b7 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Tue, 7 Feb 2017 14:38:02 -0600 Subject: [PATCH 2/3] purge-cluster: remove all include tasks Including variables from role defaults or files in a group_vars directory relative to the playbook is a bad practice. We don't want to do this because including these defaults at the task level overrides values that would be set in a group_vars directory relative to the inventory file, which is the correct usage if you wish to override those default values. Signed-off-by: Andrew Schoen --- infrastructure-playbooks/purge-cluster.yml | 54 ---------------------- 1 file changed, 54 deletions(-) diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index 083fda996..64f564685 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -69,12 +69,6 @@ become: true tasks: - - include_vars: roles/ceph-common/defaults/main.yml - - include_vars: roles/ceph-mds/defaults/main.yml - - include_vars: group_vars/all.yml - ignore_errors: true - - include_vars: group_vars/{{ mds_group_name }}.yml - ignore_errors: true - name: stop ceph mdss with systemd service: @@ -106,12 +100,6 @@ become: true tasks: - - include_vars: roles/ceph-common/defaults/main.yml - - include_vars: roles/ceph-rgw/defaults/main.yml - - include_vars: group_vars/all.yml - ignore_errors: true - - include_vars: group_vars/{{ rgw_group_name }}.yml - ignore_errors: true - name: stop ceph rgws with systemd service: @@ -143,12 +131,6 @@ become: true tasks: - - include_vars: roles/ceph-common/defaults/main.yml - - include_vars: roles/ceph-rbd-mirror/defaults/main.yml - - include_vars: group_vars/all.yml - ignore_errors: true - - include_vars: group_vars/{{ rbdmirror_group_name }}.yml - ignore_errors: true - name: stop ceph rbd mirror with systemd service: @@ -175,12 +157,6 @@ become: true tasks: - - include_vars: roles/ceph-common/defaults/main.yml - - include_vars: roles/ceph-nfs/defaults/main.yml - - include_vars: group_vars/all.yml - ignore_errors: true - - include_vars: group_vars/{{ nfs_group_name }}.yml - ignore_errors: true - name: stop ceph nfss with systemd service: @@ -227,10 +203,6 @@ state: absent tasks: - - include_vars: group_vars/all.yml - ignore_errors: true - - include_vars: group_vars/{{ osd_group_name }}.yml - ignore_errors: true - name: check for a device list fail: @@ -395,15 +367,6 @@ become: true tasks: - - include_vars: roles/ceph-common/defaults/main.yml - - include_vars: roles/ceph-mon/defaults/main.yml - - include_vars: roles/ceph-restapi/defaults/main.yml - - include_vars: group_vars/all.yml - ignore_errors: true - - include_vars: group_vars/{{ mon_group_name }}.yml - ignore_errors: true - - include_vars: group_vars/{{ restapi_group_name }}.yml - ignore_errors: true - name: stop ceph mons with systemd service: @@ -607,23 +570,6 @@ gather_facts: false tasks: - - include_vars: roles/ceph-common/defaults/main.yml - - include_vars: group_vars/all.yml - ignore_errors: true - - include_vars: group_vars/{{ mds_group_name }}.yml - ignore_errors: true - - include_vars: group_vars/{{ rgw_group_name }}.yml - ignore_errors: true - - include_vars: group_vars/{{ rbdmirror_group_name }}.yml - ignore_errors: true - - include_vars: group_vars/{{ nfs_group_name }}.yml - ignore_errors: true - - include_vars: group_vars/{{ osd_group_name }}.yml - ignore_errors: true - - include_vars: group_vars/{{ mon_group_name }}.yml - ignore_errors: true - - include_vars: group_vars/{{ restapi_group_name }}.yml - ignore_errors: true - name: purge fetch directory for localhost file: From 865b4500dce733dd3f636f5d7675bb685c6be070 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Tue, 7 Feb 2017 14:42:42 -0600 Subject: [PATCH 3/3] purge-cluster: set a default value for fetch_directory if not defined Signed-off-by: Andrew Schoen --- infrastructure-playbooks/purge-cluster.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index 64f564685..d26880539 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -571,6 +571,11 @@ tasks: + - name: set fetch_directory value if not set + set_fact: + fetch_directory: "fetch/" + when: fetch_directory is not defined + - name: purge fetch directory for localhost file: path: "{{ fetch_directory }}"