switch: disable all ceph units

Prior to this commit we were only disabling ceph-osd units, but forgot
the ceph.target which is controlling everything and will restart the
ceph-osd units at each reboot.
Now that everything gets disabled there won't be any conflicts between
the old non-container and the new container units.

Signed-off-by: Sébastien Han <seb@redhat.com>
(cherry picked from commit cd56dad9fa)
pull/3384/head
Sébastien Han 2018-11-16 16:15:24 +01:00
parent 8d0379b4d9
commit 57ac7b94c0
1 changed files with 12 additions and 2 deletions

View File

@ -199,7 +199,7 @@
pre_tasks:
- name: collect running osds and ceph-disk unit(s)
shell: |
systemctl list-units | grep "loaded active" | grep -Eo 'ceph-osd@[0-9]+.service|ceph-disk@dev-[a-z]{3,4}[0-9]{1}.service'
systemctl list-units | grep "loaded active" | grep -Eo 'ceph-osd@[0-9]+.service|ceph-disk@dev-[a-z]{3,4}[0-9]{1}.service|ceph-volume|ceph\.target'
register: running_osds
changed_when: false
failed_when: false
@ -220,7 +220,7 @@
- not collect_devices.get("skipped")
- collect_devices != []
- name: stop/disable/mask non-containerized ceph osd(s) and ceph-disk units (if any)
- name: stop/disable non-containerized ceph osd(s), ceph-disk units (if any) and ceph-volume units (if any)
systemd:
name: "{{ item }}"
state: stopped
@ -228,6 +228,16 @@
with_items: "{{ running_osds.stdout_lines | default([])}}"
when: running_osds != []
- name: remove old ceph-osd systemd units
file:
path: "{{ item }}"
state: absent
with_items:
- /usr/lib/systemd/system/ceph-osd.target
- /usr/lib/systemd/system/ceph-osd@.service
- /usr/lib/systemd/system/ceph-volume@.service
- /etc/systemd/system/ceph.target.wants
- set_fact:
ceph_uid: 64045
when: ceph_docker_image_tag | string is match("latest") or ceph_docker_image_tag | string is search("ubuntu")