diff --git a/roles/ceph-common/defaults/main.yml b/roles/ceph-common/defaults/main.yml index 5160fe6ee..48d2e9b95 100644 --- a/roles/ceph-common/defaults/main.yml +++ b/roles/ceph-common/defaults/main.yml @@ -80,7 +80,7 @@ rbd_concurrent_management_ops: 20 rbd_client_directories: false # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions, this WON'T work if libvirt and kvm are installed rbd_client_log_file: /var/log/rbd-clients/qemu-guest-$pid.log # must be writable by QEMU and allowed by SELinux or AppArmor rbd_client_log_path: /var/log/rbd-clients/ -rbd_client_admin_socket_path: /var/run/ceph/rbd-clients/ # must be writable by QEMU and allowed by SELinux or AppArmor +rbd_client_admin_socket_path: /var/run/ceph/rbd-clients # must be writable by QEMU and allowed by SELinux or AppArmor ## Monitor options # diff --git a/roles/ceph-common/handlers/main.yml b/roles/ceph-common/handlers/main.yml index 220846663..0fd2f023a 100644 --- a/roles/ceph-common/handlers/main.yml +++ b/roles/ceph-common/handlers/main.yml @@ -6,12 +6,21 @@ command: service ceph restart mon when: socket.rc == 0 and + ansible_distribution != 'Ubuntu' and + mon_group_name in group_names + +- name: restart ceph mons on ubuntu + command: restart ceph-mon-all + when: + socket.rc == 0 and + ansible_distribution == 'Ubuntu' and mon_group_name in group_names - name: restart ceph osds command: service ceph restart osd when: socket.rc == 0 and + ansible_distribution != 'Ubuntu' and osd_group_name in group_names - name: restart ceph osds on ubuntu @@ -21,14 +30,37 @@ ansible_distribution == 'Ubuntu' and osd_group_name in group_names +- name: restart ceph mdss on ubuntu + command: restart ceph-mds-all + when: + socket.rc == 0 and + ansible_distribution == 'Ubuntu' and + mds_group_name in group_names + - name: restart ceph mdss command: service ceph restart mds when: socket.rc == 0 and + ansible_distribution != 'Ubuntu' and mds_group_name in group_names +- name: restart ceph rgws on ubuntu + command: restart ceph-all + when: + socketrgw.rc == 0 and + ansible_distribution == 'Ubuntu' and + rgw_group_name in group_names + - name: restart ceph rgws command: /etc/init.d/radosgw restart when: - socket.rc == 0 and + socketrgw.rc == 0 and + ansible_distribution != 'Ubuntu' and + rgw_group_name in group_names + +- name: restart ceph rgws on red hat + command: /etc/init.d/ceph-radosgw restart + when: + socketrgw.rc == 0 and + ansible_os_family == 'RedHat' and rgw_group_name in group_names diff --git a/roles/ceph-common/tasks/main.yml b/roles/ceph-common/tasks/main.yml index bfac9d0b6..e1eb44bfb 100644 --- a/roles/ceph-common/tasks/main.yml +++ b/roles/ceph-common/tasks/main.yml @@ -18,6 +18,12 @@ ignore_errors: true register: socket +- name: check for a rados gateway socket + shell: "stat {{ rbd_client_admin_socket_path }}*.asok > /dev/null 2>&1" + changed_when: false + ignore_errors: true + register: socketrgw + - name: generate cluster UUID shell: > uuidgen | tee fetch/ceph_cluster_uuid.conf @@ -46,10 +52,14 @@ mode=0644 notify: - restart ceph mons + - restart ceph mons on ubuntu - restart ceph osds - restart ceph osds on ubuntu - restart ceph mdss + - restart ceph mdss on ubuntu - restart ceph rgws + - restart ceph rgws on ubuntu + - restart ceph rgws on red hat - name: create rbd client directory file: >