From 30ce663c22a12b3e28bc1059030a8befdc193d31 Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Mon, 25 Jul 2016 10:17:14 -0400 Subject: [PATCH 01/27] purge-cluster: remove -q flag from grep to prevent broken pipes Signed-off-by: Alfredo Deza Resolves: rhbz#1339576 --- purge-cluster.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/purge-cluster.yml b/purge-cluster.yml index 37333b344..27bce4661 100644 --- a/purge-cluster.yml +++ b/purge-cluster.yml @@ -261,7 +261,7 @@ rbdmirror_group_name in group_names - name: check for anything running ceph - shell: "ps awux | grep -v grep | grep -q -- ceph-" + shell: "ps awux | grep -- [c]eph-" register: check_for_running_ceph failed_when: check_for_running_ceph.rc == 0 From 8c67689d082f03bf3a48829ac7f0a0e1fa4e805f Mon Sep 17 00:00:00 2001 From: Ivan Font Date: Thu, 28 Jul 2016 07:42:19 -0700 Subject: [PATCH 02/27] Add option to enable ntp This fixes #845 for containerized deployments. We now also mount the /etc/localtime volume in the containers in order to synchronize the host timezone with the container timezone. Signed-off-by: Ivan Font --- .../tasks/checks/check_ntp_atomic.yml | 7 ++++ roles/ceph-common/tasks/misc/ntp_atomic.yml | 11 +++++++ roles/ceph-mds/tasks/docker/main.yml | 18 ++++++++++ roles/ceph-mds/tasks/docker/pre_requisite.yml | 33 ++++++++++++++++++- .../tasks/docker/start_docker_mds.yml | 2 +- roles/ceph-mds/templates/ceph-mds.service.j2 | 1 + roles/ceph-mon/tasks/docker/main.yml | 17 ++++++++++ roles/ceph-mon/tasks/docker/pre_requisite.yml | 32 ++++++++++++++++++ .../tasks/docker/start_docker_monitor.yml | 3 +- roles/ceph-mon/templates/ceph-mon.service.j2 | 1 + roles/ceph-nfs/tasks/docker/main.yml | 17 ++++++++++ roles/ceph-nfs/tasks/docker/pre_requisite.yml | 32 ++++++++++++++++++ .../tasks/docker/start_docker_nfs.yml | 4 +-- roles/ceph-nfs/templates/ceph-nfs.service.j2 | 1 + roles/ceph-osd/tasks/docker/main.yml | 17 ++++++++++ roles/ceph-osd/tasks/docker/pre_requisite.yml | 32 ++++++++++++++++++ .../tasks/docker/start_docker_osd.yml | 6 ++-- roles/ceph-osd/templates/ceph-osd.service.j2 | 1 + roles/ceph-rbd-mirror/tasks/docker/main.yml | 18 ++++++++++ .../tasks/docker/pre_requisite.yml | 32 ++++++++++++++++++ .../tasks/docker/start_docker_rbd_mirror.yml | 2 +- .../templates/ceph-rbd-mirror.service.j2 | 1 + roles/ceph-restapi/tasks/docker/main.yml | 26 +++++++++++++++ .../tasks/docker/pre_requisite.yml | 32 ++++++++++++++++++ .../tasks/docker/start_docker_restapi.yml | 2 +- roles/ceph-rgw/tasks/docker/main.yml | 18 ++++++++++ roles/ceph-rgw/tasks/docker/pre_requisite.yml | 32 ++++++++++++++++++ .../tasks/docker/start_docker_rgw.yml | 2 +- roles/ceph-rgw/templates/ceph-rgw.service.j2 | 1 + 29 files changed, 391 insertions(+), 10 deletions(-) create mode 100644 roles/ceph-common/tasks/checks/check_ntp_atomic.yml create mode 100644 roles/ceph-common/tasks/misc/ntp_atomic.yml diff --git a/roles/ceph-common/tasks/checks/check_ntp_atomic.yml b/roles/ceph-common/tasks/checks/check_ntp_atomic.yml new file mode 100644 index 000000000..e1fbba41c --- /dev/null +++ b/roles/ceph-common/tasks/checks/check_ntp_atomic.yml @@ -0,0 +1,7 @@ +--- +- name: check ntp installation on atomic + command: rpm -q chrony + register: ntp_pkg_query + ignore_errors: true + changed_when: false + when: ansible_os_family == 'RedHat' diff --git a/roles/ceph-common/tasks/misc/ntp_atomic.yml b/roles/ceph-common/tasks/misc/ntp_atomic.yml new file mode 100644 index 000000000..3e47f839b --- /dev/null +++ b/roles/ceph-common/tasks/misc/ntp_atomic.yml @@ -0,0 +1,11 @@ +--- +- include: ../checks/check_ntp_atomic.yml + when: ansible_os_family == 'RedHat' + +- name: start the ntp service + service: + name: chronyd + enabled: yes + state: started + when: + - ntp_pkg_query.rc == 0 diff --git a/roles/ceph-mds/tasks/docker/main.yml b/roles/ceph-mds/tasks/docker/main.yml index 0c42388ea..fbae68b89 100644 --- a/roles/ceph-mds/tasks/docker/main.yml +++ b/roles/ceph-mds/tasks/docker/main.yml @@ -17,6 +17,24 @@ when: ceph_health.rc != 0 - include: pre_requisite.yml + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml" + when: + - is_atomic + - ansible_os_family == 'RedHat' + - ntp_service_enabled + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml" + when: + - not is_atomic + - ansible_os_family == 'RedHat' + - ntp_service_enabled + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml" + when: + - ansible_os_family == 'Debian' + - ntp_service_enabled + - include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml" vars: ceph_docker_username: "{{ ceph_mds_docker_username }}" diff --git a/roles/ceph-mds/tasks/docker/pre_requisite.yml b/roles/ceph-mds/tasks/docker/pre_requisite.yml index a32d7caf0..972003be3 100644 --- a/roles/ceph-mds/tasks/docker/pre_requisite.yml +++ b/roles/ceph-mds/tasks/docker/pre_requisite.yml @@ -116,7 +116,7 @@ tags: with_pkg when: ansible_version['full'] | version_compare('2.1.0.0', '<') - + - name: install docker-py pip: name: docker-py @@ -125,3 +125,34 @@ with_pkg when: ansible_version['full'] | version_compare('2.1.0.0', '>=') +- name: install ntp on redhat using yum + yum: + name: ntp + state: present + when: + - ansible_os_family == 'RedHat' + - ansible_pkg_mgr == 'yum' + - ntp_service_enabled + tags: + with_pkg + +- name: install ntp on redhat using dnf + dnf: + name: ntp + state: present + when: + - ansible_os_family == 'RedHat' + - ansible_pkg_mgr == 'dnf' + - ntp_service_enabled + tags: + with_pkg + +- name: install ntp on debian + apt: + name: ntp + state: present + when: + - ansible_os_family == 'Debian' + - ntp_service_enabled + tags: + with_pkg diff --git a/roles/ceph-mds/tasks/docker/start_docker_mds.yml b/roles/ceph-mds/tasks/docker/start_docker_mds.yml index 9f3a7b68c..a62a7d536 100644 --- a/roles/ceph-mds/tasks/docker/start_docker_mds.yml +++ b/roles/ceph-mds/tasks/docker/start_docker_mds.yml @@ -44,5 +44,5 @@ net: host state: running env: "CEPH_DAEMON=MDS,CEPHFS_CREATE=1,{{ ceph_mds_docker_extra_env }}" - volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph" + volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro" when: ansible_os_family != 'RedHat' and ansible_os_family != 'CoreOS' diff --git a/roles/ceph-mds/templates/ceph-mds.service.j2 b/roles/ceph-mds/templates/ceph-mds.service.j2 index 283183d56..208c8d294 100644 --- a/roles/ceph-mds/templates/ceph-mds.service.j2 +++ b/roles/ceph-mds/templates/ceph-mds.service.j2 @@ -14,6 +14,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \ -e KV_TYPE={{kv_type}} \ -e KV_IP={{kv_endpoint}} \ {% endif -%} + -v /etc/localtime:/etc/localtime:ro \ --privileged \ -e CEPH_DAEMON=MDS \ -e CEPHFS_CREATE=1 \ diff --git a/roles/ceph-mon/tasks/docker/main.yml b/roles/ceph-mon/tasks/docker/main.yml index d2fc4f8ab..802449acf 100644 --- a/roles/ceph-mon/tasks/docker/main.yml +++ b/roles/ceph-mon/tasks/docker/main.yml @@ -20,6 +20,23 @@ - include: pre_requisite.yml +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml" + when: + - is_atomic + - ansible_os_family == 'RedHat' + - ntp_service_enabled + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml" + when: + - not is_atomic + - ansible_os_family == 'RedHat' + - ntp_service_enabled + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml" + when: + - ansible_os_family == 'Debian' + - ntp_service_enabled + - include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml" vars: ceph_docker_username: "{{ ceph_mon_docker_username }}" diff --git a/roles/ceph-mon/tasks/docker/pre_requisite.yml b/roles/ceph-mon/tasks/docker/pre_requisite.yml index dde220721..9c27ee8cc 100644 --- a/roles/ceph-mon/tasks/docker/pre_requisite.yml +++ b/roles/ceph-mon/tasks/docker/pre_requisite.yml @@ -126,3 +126,35 @@ tags: with_pkg when: ansible_version['full'] | version_compare('2.1.0.0', '>=') + +- name: install ntp on redhat using yum + yum: + name: ntp + state: present + when: + - ansible_os_family == 'RedHat' + - ansible_pkg_mgr == 'yum' + - ntp_service_enabled + tags: + with_pkg + +- name: install ntp on redhat using dnf + dnf: + name: ntp + state: present + when: + - ansible_os_family == 'RedHat' + - ansible_pkg_mgr == 'dnf' + - ntp_service_enabled + tags: + with_pkg + +- name: install ntp on debian + apt: + name: ntp + state: present + when: + - ansible_os_family == 'Debian' + - ntp_service_enabled + tags: + with_pkg diff --git a/roles/ceph-mon/tasks/docker/start_docker_monitor.yml b/roles/ceph-mon/tasks/docker/start_docker_monitor.yml index 7072a2e25..f3754a850 100644 --- a/roles/ceph-mon/tasks/docker/start_docker_monitor.yml +++ b/roles/ceph-mon/tasks/docker/start_docker_monitor.yml @@ -85,7 +85,7 @@ state: "running" privileged: "{{ mon_docker_privileged }}" env: "MON_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_mon_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=MON,CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }},CEPH_FSID={{ fsid }},{{ ceph_mon_extra_envs }}" - volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph" + volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro" when: - ansible_os_family != 'RedHat' - ansible_os_family != 'CoreOS' @@ -99,6 +99,7 @@ state: "running" privileged: "{{ mon_docker_privileged }}" env: "KV_TYPE={{kv_type}},KV_IP={{kv_endpoint}},MON_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_mon_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=MON,CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }},{{ ceph_mon_extra_envs }}" + volumes: "/etc/localtime:/etc/localtime:ro" when: - ansible_os_family != 'RedHat' - ansible_os_family != 'CoreOS' diff --git a/roles/ceph-mon/templates/ceph-mon.service.j2 b/roles/ceph-mon/templates/ceph-mon.service.j2 index 93c5a66ad..b8facb244 100644 --- a/roles/ceph-mon/templates/ceph-mon.service.j2 +++ b/roles/ceph-mon/templates/ceph-mon.service.j2 @@ -15,6 +15,7 @@ ExecStart=/usr/bin/docker run --rm --name %i --net=host \ -e KV_IP={{kv_endpoint}}\ -e KV_PORT={{kv_port}} \ {% endif -%} + -v /etc/localtime:/etc/localtime:ro \ {% if mon_docker_privileged -%} --privileged \ {% endif -%} diff --git a/roles/ceph-nfs/tasks/docker/main.yml b/roles/ceph-nfs/tasks/docker/main.yml index ae9a3cb21..1e4d030f9 100644 --- a/roles/ceph-nfs/tasks/docker/main.yml +++ b/roles/ceph-nfs/tasks/docker/main.yml @@ -20,6 +20,23 @@ - include: pre_requisite.yml +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml" + when: + - is_atomic + - ansible_os_family == 'RedHat' + - ntp_service_enabled + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml" + when: + - not is_atomic + - ansible_os_family == 'RedHat' + - ntp_service_enabled + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml" + when: + - ansible_os_family == 'Debian' + - ntp_service_enabled + - include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml" vars: ceph_docker_username: "{{ ceph_nfs_docker_username }}" diff --git a/roles/ceph-nfs/tasks/docker/pre_requisite.yml b/roles/ceph-nfs/tasks/docker/pre_requisite.yml index c937afc72..69b538518 100644 --- a/roles/ceph-nfs/tasks/docker/pre_requisite.yml +++ b/roles/ceph-nfs/tasks/docker/pre_requisite.yml @@ -97,3 +97,35 @@ enabled: yes tags: with_pkg + +- name: install ntp on redhat using yum + yum: + name: ntp + state: present + when: + - ansible_os_family == 'RedHat' + - ansible_pkg_mgr == 'yum' + - ntp_service_enabled + tags: + with_pkg + +- name: install ntp on redhat using dnf + dnf: + name: ntp + state: present + when: + - ansible_os_family == 'RedHat' + - ansible_pkg_mgr == 'dnf' + - ntp_service_enabled + tags: + with_pkg + +- name: install ntp on debian + apt: + name: ntp + state: present + when: + - ansible_os_family == 'Debian' + - ntp_service_enabled + tags: + with_pkg diff --git a/roles/ceph-nfs/tasks/docker/start_docker_nfs.yml b/roles/ceph-nfs/tasks/docker/start_docker_nfs.yml index 77a5a9604..7e0196c93 100644 --- a/roles/ceph-nfs/tasks/docker/start_docker_nfs.yml +++ b/roles/ceph-nfs/tasks/docker/start_docker_nfs.yml @@ -61,7 +61,7 @@ privileged: true ports: "{{ ceph_nfs_port }}:{{ ceph_nfs_port }},111:111" env: "CEPH_DAEMON=NFS,CEPH_PUBLIC_NETWORK={{ ceph_nfs_docker_subnet }},{{ ceph_nfs_extra_envs }}" - volumes: "/etc/ceph:/etc/ceph,/etc/ganesha:/etc/ganesha" + volumes: "/etc/ceph:/etc/ceph,/etc/ganesha:/etc/ganesha,/etc/localtime:/etc/localtime:ro" when: not is_atomic and ansible_os_family != 'CoreOS' and @@ -75,7 +75,7 @@ state: "running" privileged: true env: "CEPH_DAEMON=NFS,CEPH_PUBLIC_NETWORK={{ ceph_nfs_docker_subnet }},{{ ceph_nfs_extra_envs }}" - volumes: "/etc/ganesha:/etc/ganesha" + volumes: "/etc/ganesha:/etc/ganesha,/etc/localtime:/etc/localtime:ro" when: not is_atomic and ansible_os_family != 'CoreOS' and diff --git a/roles/ceph-nfs/templates/ceph-nfs.service.j2 b/roles/ceph-nfs/templates/ceph-nfs.service.j2 index d78d2d0d2..bd8b41b0a 100644 --- a/roles/ceph-nfs/templates/ceph-nfs.service.j2 +++ b/roles/ceph-nfs/templates/ceph-nfs.service.j2 @@ -15,6 +15,7 @@ ExecStart=/usr/bin/docker run --rm --name %i --net=host \ -e KV_TYPE={{kv_type}} \ -e KV_IP={{kv_endpoint}}\ {% endif -%} + -v /etc/localtime:/etc/localtime:ro \ --privileged \ -e CEPH_DAEMON=NFS \ -e CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }} \ diff --git a/roles/ceph-osd/tasks/docker/main.yml b/roles/ceph-osd/tasks/docker/main.yml index 41e68785e..16ccd8cb2 100644 --- a/roles/ceph-osd/tasks/docker/main.yml +++ b/roles/ceph-osd/tasks/docker/main.yml @@ -20,6 +20,23 @@ - include: pre_requisite.yml +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml" + when: + - is_atomic + - ansible_os_family == 'RedHat' + - ntp_service_enabled + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml" + when: + - not is_atomic + - ansible_os_family == 'RedHat' + - ntp_service_enabled + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml" + when: + - ansible_os_family == 'Debian' + - ntp_service_enabled + - include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml" vars: ceph_docker_username: '{{ ceph_osd_docker_username }}' diff --git a/roles/ceph-osd/tasks/docker/pre_requisite.yml b/roles/ceph-osd/tasks/docker/pre_requisite.yml index 728d562b1..ab7e21057 100644 --- a/roles/ceph-osd/tasks/docker/pre_requisite.yml +++ b/roles/ceph-osd/tasks/docker/pre_requisite.yml @@ -125,3 +125,35 @@ tags: with_pkg when: ansible_version['full'] | version_compare('2.1.0.0', '>=') + +- name: install ntp on redhat using yum + yum: + name: ntp + state: present + when: + - ansible_os_family == 'RedHat' + - ansible_pkg_mgr == 'yum' + - ntp_service_enabled + tags: + with_pkg + +- name: install ntp on redhat using dnf + dnf: + name: ntp + state: present + when: + - ansible_os_family == 'RedHat' + - ansible_pkg_mgr == 'dnf' + - ntp_service_enabled + tags: + with_pkg + +- name: install ntp on debian + apt: + name: ntp + state: present + when: + - ansible_os_family == 'Debian' + - ntp_service_enabled + tags: + with_pkg diff --git a/roles/ceph-osd/tasks/docker/start_docker_osd.yml b/roles/ceph-osd/tasks/docker/start_docker_osd.yml index 34cbacbc1..f466f02b8 100644 --- a/roles/ceph-osd/tasks/docker/start_docker_osd.yml +++ b/roles/ceph-osd/tasks/docker/start_docker_osd.yml @@ -28,6 +28,7 @@ -v /etc/ceph:/etc/ceph \ -v /var/lib/ceph/:/var/lib/ceph/ \ -v /dev:/dev \ + -v /etc/localtime:/etc/localtime:ro \ -e "OSD_DEVICE={{ item.0 }}" \ -e "{{ ceph_osd_docker_prepare_env }}" \ "{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}:{{ ceph_osd_docker_image_tag }}" \ @@ -48,6 +49,7 @@ --name="{{ ansible_hostname }}-osd-prepare-{{ item.0 | regex_replace('/', '') }}" \ -v /dev:/dev \ + -v /etc/localtime:/etc/localtime:ro \ -e "OSD_DEVICE={{ item.0 }}" \ -e "{{ ceph_osd_docker_prepare_env }}" \ -e CEPH_DAEMON=osd_ceph_disk_prepare \ @@ -106,7 +108,7 @@ state: started privileged: yes env: "OSD_DEVICE={{ item }},{{ ceph_osd_docker_extra_env }}" - volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/dev:/dev,/run:/run" + volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro,/dev:/dev,/run:/run" with_items: ceph_osd_docker_devices when: - ansible_os_family != 'RedHat' @@ -122,7 +124,7 @@ state: running privileged: yes env: "KV_TYPE={{kv_type}},KV_IP={{kv_endpoint}},OSD_DEVICE={{ item }},{{ ceph_osd_docker_extra_env }}" - volumes: "/dev/:/dev/" + volumes: "/etc/localtime:/etc/localtime:ro,/dev/:/dev/" with_items: ceph_osd_docker_devices when: - ansible_os_family != 'RedHat' diff --git a/roles/ceph-osd/templates/ceph-osd.service.j2 b/roles/ceph-osd/templates/ceph-osd.service.j2 index 12e6a9520..a6841b9ac 100644 --- a/roles/ceph-osd/templates/ceph-osd.service.j2 +++ b/roles/ceph-osd/templates/ceph-osd.service.j2 @@ -15,6 +15,7 @@ ExecStart=/usr/bin/docker run --rm --net=host --pid=host\ -e KV_IP={{kv_endpoint}} \ -e KV_PORT={{kv_port}} \ {% endif -%} + -v /etc/localtime:/etc/localtime:ro \ -v /dev:/dev \ --privileged \ -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \ diff --git a/roles/ceph-rbd-mirror/tasks/docker/main.yml b/roles/ceph-rbd-mirror/tasks/docker/main.yml index 886beca75..7bfe9da2c 100644 --- a/roles/ceph-rbd-mirror/tasks/docker/main.yml +++ b/roles/ceph-rbd-mirror/tasks/docker/main.yml @@ -17,6 +17,24 @@ when: ceph_health.rc != 0 - include: pre_requisite.yml + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml" + when: + - is_atomic + - ansible_os_family == 'RedHat' + - ntp_service_enabled + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml" + when: + - not is_atomic + - ansible_os_family == 'RedHat' + - ntp_service_enabled + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml" + when: + - ansible_os_family == 'Debian' + - ntp_service_enabled + - include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml" vars: ceph_docker_username: "{{ ceph_rbd_mirror_docker_username }}" diff --git a/roles/ceph-rbd-mirror/tasks/docker/pre_requisite.yml b/roles/ceph-rbd-mirror/tasks/docker/pre_requisite.yml index 4b060aed6..6cc5e0bba 100644 --- a/roles/ceph-rbd-mirror/tasks/docker/pre_requisite.yml +++ b/roles/ceph-rbd-mirror/tasks/docker/pre_requisite.yml @@ -124,3 +124,35 @@ tags: with_pkg when: ansible_version['full'] | version_compare('2.1.0.0', '>=') + +- name: install ntp on redhat using yum + yum: + name: ntp + state: present + when: + - ansible_os_family == 'RedHat' + - ansible_pkg_mgr == 'yum' + - ntp_service_enabled + tags: + with_pkg + +- name: install ntp on redhat using dnf + dnf: + name: ntp + state: present + when: + - ansible_os_family == 'RedHat' + - ansible_pkg_mgr == 'dnf' + - ntp_service_enabled + tags: + with_pkg + +- name: install ntp on debian + apt: + name: ntp + state: present + when: + - ansible_os_family == 'Debian' + - ntp_service_enabled + tags: + with_pkg diff --git a/roles/ceph-rbd-mirror/tasks/docker/start_docker_rbd_mirror.yml b/roles/ceph-rbd-mirror/tasks/docker/start_docker_rbd_mirror.yml index cb4ff3b4a..fcd34cc7a 100644 --- a/roles/ceph-rbd-mirror/tasks/docker/start_docker_rbd_mirror.yml +++ b/roles/ceph-rbd-mirror/tasks/docker/start_docker_rbd_mirror.yml @@ -43,5 +43,5 @@ name: ceph-{{ ansible_hostname }}-rbd-mirror net: host state: running - volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph" + volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro" when: ansible_os_family != 'RedHat' and ansible_os_family != 'CoreOS' diff --git a/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2 b/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2 index 69aa5b605..618967e56 100644 --- a/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2 +++ b/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2 @@ -14,6 +14,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \ -e KV_TYPE={{kv_type}} \ -e KV_IP={{kv_endpoint}} \ {% endif -%} + -v /etc/localtime:/etc/localtime:ro \ --privileged \ -e CEPH_DAEMON=RBD_MIRROR \ --name={{ ansible_hostname }} \ diff --git a/roles/ceph-restapi/tasks/docker/main.yml b/roles/ceph-restapi/tasks/docker/main.yml index a0945cc1f..fc2274794 100644 --- a/roles/ceph-restapi/tasks/docker/main.yml +++ b/roles/ceph-restapi/tasks/docker/main.yml @@ -1,5 +1,31 @@ --- +- name: check if it is Atomic host + stat: path=/run/ostree-booted + register: stat_ostree + +- name: set fact for using Atomic host + set_fact: + is_atomic: '{{ stat_ostree.stat.exists }}' + - include: pre_requisite.yml + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml" + when: + - is_atomic + - ansible_os_family == 'RedHat' + - ntp_service_enabled + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml" + when: + - not is_atomic + - ansible_os_family == 'RedHat' + - ntp_service_enabled + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml" + when: + - ansible_os_family == 'Debian' + - ntp_service_enabled + - include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml" vars: ceph_docker_username: "{{ ceph_restapi_docker_username }}" diff --git a/roles/ceph-restapi/tasks/docker/pre_requisite.yml b/roles/ceph-restapi/tasks/docker/pre_requisite.yml index c4e8922c9..d89fe36f6 100644 --- a/roles/ceph-restapi/tasks/docker/pre_requisite.yml +++ b/roles/ceph-restapi/tasks/docker/pre_requisite.yml @@ -122,3 +122,35 @@ enabled: yes tags: with_pkg + +- name: install ntp on redhat using yum + yum: + name: ntp + state: present + when: + - ansible_os_family == 'RedHat' + - ansible_pkg_mgr == 'yum' + - ntp_service_enabled + tags: + with_pkg + +- name: install ntp on redhat using dnf + dnf: + name: ntp + state: present + when: + - ansible_os_family == 'RedHat' + - ansible_pkg_mgr == 'dnf' + - ntp_service_enabled + tags: + with_pkg + +- name: install ntp on debian + apt: + name: ntp + state: present + when: + - ansible_os_family == 'Debian' + - ntp_service_enabled + tags: + with_pkg diff --git a/roles/ceph-restapi/tasks/docker/start_docker_restapi.yml b/roles/ceph-restapi/tasks/docker/start_docker_restapi.yml index 40e6b355b..045c07d1d 100644 --- a/roles/ceph-restapi/tasks/docker/start_docker_restapi.yml +++ b/roles/ceph-restapi/tasks/docker/start_docker_restapi.yml @@ -7,4 +7,4 @@ expose: "{{ ceph_restapi_port }}" state: running env: "RESTAPI_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_restapi_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=RESTAPI,{{ ceph_restapi_docker_extra_env }}" - volumes: "/etc/ceph:/etc/ceph" + volumes: "/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro" diff --git a/roles/ceph-rgw/tasks/docker/main.yml b/roles/ceph-rgw/tasks/docker/main.yml index 787a4fdcc..360d37b71 100644 --- a/roles/ceph-rgw/tasks/docker/main.yml +++ b/roles/ceph-rgw/tasks/docker/main.yml @@ -17,6 +17,24 @@ when: ceph_health.rc != 0 - include: pre_requisite.yml + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml" + when: + - is_atomic + - ansible_os_family == 'RedHat' + - ntp_service_enabled + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml" + when: + - not is_atomic + - ansible_os_family == 'RedHat' + - ntp_service_enabled + +- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml" + when: + - ansible_os_family == 'Debian' + - ntp_service_enabled + - include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml" vars: ceph_docker_username: "{{ ceph_rgw_docker_username }}" diff --git a/roles/ceph-rgw/tasks/docker/pre_requisite.yml b/roles/ceph-rgw/tasks/docker/pre_requisite.yml index c322ed12c..92c3ae8ae 100644 --- a/roles/ceph-rgw/tasks/docker/pre_requisite.yml +++ b/roles/ceph-rgw/tasks/docker/pre_requisite.yml @@ -110,3 +110,35 @@ enabled: yes tags: with_pkg + +- name: install ntp on redhat using yum + yum: + name: ntp + state: present + when: + - ansible_os_family == 'RedHat' + - ansible_pkg_mgr == 'yum' + - ntp_service_enabled + tags: + with_pkg + +- name: install ntp on redhat using dnf + dnf: + name: ntp + state: present + when: + - ansible_os_family == 'RedHat' + - ansible_pkg_mgr == 'dnf' + - ntp_service_enabled + tags: + with_pkg + +- name: install ntp on debian + apt: + name: ntp + state: present + when: + - ansible_os_family == 'Debian' + - ntp_service_enabled + tags: + with_pkg diff --git a/roles/ceph-rgw/tasks/docker/start_docker_rgw.yml b/roles/ceph-rgw/tasks/docker/start_docker_rgw.yml index 11e9f910e..cfdd107b1 100644 --- a/roles/ceph-rgw/tasks/docker/start_docker_rgw.yml +++ b/roles/ceph-rgw/tasks/docker/start_docker_rgw.yml @@ -45,5 +45,5 @@ ports: "{{ ceph_rgw_civetweb_port }}:{{ ceph_rgw_civetweb_port }}" state: running env: "CEPH_DAEMON=RGW,{{ ceph_rgw_docker_extra_env }}" - volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph" + volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro" when: ansible_os_family != 'RedHat' and ansible_os_family != 'CoreOS' diff --git a/roles/ceph-rgw/templates/ceph-rgw.service.j2 b/roles/ceph-rgw/templates/ceph-rgw.service.j2 index 5a173f95b..4dfcc754e 100644 --- a/roles/ceph-rgw/templates/ceph-rgw.service.j2 +++ b/roles/ceph-rgw/templates/ceph-rgw.service.j2 @@ -14,6 +14,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \ -e KV_TYPE={{kv_type}} \ -e KV_IP={{kv_endpoint}} \ {% endif -%} + -v /etc/localtime:/etc/localtime:ro \ --privileged \ -e CEPH_DAEMON=RGW \ --name={{ ansible_hostname }} \ From ba92eb48e86556fb215c924b6d9080fcb54e1df1 Mon Sep 17 00:00:00 2001 From: Ivan Font Date: Thu, 28 Jul 2016 16:43:48 -0700 Subject: [PATCH 03/27] Update ntp atomic plays to use is_atomic variable Signed-off-by: Ivan Font --- roles/ceph-common/tasks/checks/check_ntp_atomic.yml | 1 - roles/ceph-common/tasks/misc/ntp_atomic.yml | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/roles/ceph-common/tasks/checks/check_ntp_atomic.yml b/roles/ceph-common/tasks/checks/check_ntp_atomic.yml index e1fbba41c..15a5b2d13 100644 --- a/roles/ceph-common/tasks/checks/check_ntp_atomic.yml +++ b/roles/ceph-common/tasks/checks/check_ntp_atomic.yml @@ -4,4 +4,3 @@ register: ntp_pkg_query ignore_errors: true changed_when: false - when: ansible_os_family == 'RedHat' diff --git a/roles/ceph-common/tasks/misc/ntp_atomic.yml b/roles/ceph-common/tasks/misc/ntp_atomic.yml index 3e47f839b..11dfc988e 100644 --- a/roles/ceph-common/tasks/misc/ntp_atomic.yml +++ b/roles/ceph-common/tasks/misc/ntp_atomic.yml @@ -1,6 +1,6 @@ --- - include: ../checks/check_ntp_atomic.yml - when: ansible_os_family == 'RedHat' + when: is_atomic - name: start the ntp service service: From b0907aaeea8e41ca1a42a70b59a9503d0d688d0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Fri, 27 May 2016 16:37:07 +0200 Subject: [PATCH 04/27] ceph-common: test mon initial members MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Sébastien Han --- roles/ceph-common/templates/ceph.conf.j2 | 69 ++++++++++--------- .../ceph-common/templates/mon_addr_address.j2 | 3 +- .../templates/mon_addr_interface.j2 | 3 +- 3 files changed, 38 insertions(+), 37 deletions(-) diff --git a/roles/ceph-common/templates/ceph.conf.j2 b/roles/ceph-common/templates/ceph.conf.j2 index aa383c217..ebff7b59e 100644 --- a/roles/ceph-common/templates/ceph.conf.j2 +++ b/roles/ceph-common/templates/ceph.conf.j2 @@ -8,59 +8,62 @@ auth service required = none auth client required = none auth supported = none {% endif %} -{% if not mon_containerized_deployment_with_kv %} +{% if not mon_containerized_deployment_with_kv and not mon_containerized_deployment %} fsid = {{ fsid }} {% endif %} max open files = {{ max_open_files }} {% if common_single_host_mode is defined %} osd crush chooseleaf type = 0 {% endif %} -[client.libvirt] -admin socket = {{ rbd_client_admin_socket_path }}/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor -log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by SELinux or AppArmor +{# NOTE (leseb): the blank lines in-between are needed otherwise we won't get any line break #} +{% if groups[mon_group_name] is defined %} +mon_initial_members = {% if groups[mon_group_name] is defined %}{% for host in groups[mon_group_name] %}{% if hostvars[host]['ansible_fqdn'] is defined and mon_use_fqdn %}{{ hostvars[host]['ansible_fqdn'] }}{% if not loop.last %},{% endif %}{% elif hostvars[host]['ansible_hostname'] is defined %}{{ hostvars[host]['ansible_hostname'] }}{% if not loop.last %},{% endif %}{% endif %}{% endfor %}{% endif %} -[mon] -{% if not mon_containerized_deployment_with_kv %} -{% for host in groups[mon_group_name] %} -{% if hostvars[host]['ansible_fqdn'] is defined and mon_use_fqdn %} -[mon.{{ hostvars[host]['ansible_fqdn'] }}] -host = {{ hostvars[host]['ansible_fqdn'] }} -{% elif hostvars[host]['ansible_hostname'] is defined %} -[mon.{{ hostvars[host]['ansible_hostname'] }}] -host = {{ hostvars[host]['ansible_hostname'] }} -{% endif %} -# we need to check if monitor_interface is defined in the inventory per host or if it's set in a group_vars file -{% if mon_containerized_deployment %} -{% set interface = ["ansible_",ceph_mon_docker_interface]|join %} -{% if interface in hostvars[host] and 'ipv4' in hostvars[host][interface] %} -# user address from interface {{ ceph_mon_docker_interface }} -mon addr = {{ hostvars[host][interface]['ipv4']['address'] }} - {% elif hostvars[host]['monitor_address'] is defined %} - # use host monitor address -mon addr = {{ hostvars[host]['monitor_address'] }} - {% elif monitor_address != "0.0.0.0" %} - # use group_var monitor address -mon addr = monitor_address -{% endif %} +mon_host = {% if groups[mon_group_name] is defined %}{% for host in groups[mon_group_name] %}{{ hostvars[host]['ansible_' + monitor_interface]['ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %} {% elif (hostvars[host]['monitor_interface'] is defined and hostvars[host]['monitor_interface'] != "interface") or monitor_interface != "interface" %} {% include 'mon_addr_interface.j2' %} {% else %} {% include 'mon_addr_address.j2' %} {% endif %} +{% if mon_containerized_deployment %} +fsid = {{ fsid }} +{% if groups[mon_group_name] is defined %} +{% for host in groups[mon_group_name] %} +{% if mon_containerized_deployment %} +{% set interface = ["ansible_",ceph_mon_docker_interface]|join %} +mon_host = {{ hostvars[host]['ansible_' + interface]['ipv4']['address'] }} +{% if not loop.last %},{% endif %} +{% elif hostvars[host]['monitor_address'] is defined %} +mon_host = {{ hostvars[host]['monitor_address'] }} +{% if not loop.last %},{% endif %} +{% elif monitor_address != "0.0.0.0" %} +mon_host = monitor_address +{% if not loop.last %},{% endif %} +{% endif %} {% endfor %} {% endif %} +{% endif %} + +{% if public_network is defined %} +public_network = {{ public_network }} +{% endif %} +{% if cluster_network is defined %} +cluster_network = {{ cluster_network }} +{% endif %} +max open files = {{ max_open_files }} +{% if common_single_host_mode is defined %} +osd crush chooseleaf type = 0 +{% endif %} + +[client.libvirt] +admin socket = {{ rbd_client_admin_socket_path }}/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor +log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by SELinux or AppArmor [osd] osd mkfs type = {{ osd_mkfs_type }} osd mkfs options xfs = {{ osd_mkfs_options_xfs }} osd mount options xfs = {{ osd_mount_options_xfs }} osd journal size = {{ journal_size }} -{% if cluster_network is defined %} -cluster_network = {{ cluster_network }} -{% endif %} -{% if public_network is defined %} -public_network = {{ public_network }} -{% endif %} {% if filestore_xattr_use_omap != None %} filestore xattr use omap = {{ filestore_xattr_use_omap }} {% elif osd_mkfs_type == "ext4" %} diff --git a/roles/ceph-common/templates/mon_addr_address.j2 b/roles/ceph-common/templates/mon_addr_address.j2 index 19dfa5312..0a6323594 100644 --- a/roles/ceph-common/templates/mon_addr_address.j2 +++ b/roles/ceph-common/templates/mon_addr_address.j2 @@ -1,2 +1 @@ -mon addr = {{ hostvars[host]['monitor_address'] if hostvars[host]['monitor_address'] is defined else monitor_address }} - +mon host = {% for host in groups[mon_group_name] %}{{ hostvars[host]['monitor_address'] if hostvars[host]['monitor_address'] is defined else monitor_address }}{% if not loop.last %},{% endif %}{% endfor %} diff --git a/roles/ceph-common/templates/mon_addr_interface.j2 b/roles/ceph-common/templates/mon_addr_interface.j2 index b5c85cd06..91db00785 100644 --- a/roles/ceph-common/templates/mon_addr_interface.j2 +++ b/roles/ceph-common/templates/mon_addr_interface.j2 @@ -1,2 +1 @@ -mon addr = {{ hostvars[host]['ansible_' + (hostvars[host]['monitor_interface'] if hostvars[host]['monitor_interface'] is defined else monitor_interface) ]['ipv4']['address'] }} - +mon host = {% for host in groups[mon_group_name] %}{{ hostvars[host]['ansible_' + (hostvars[host]['monitor_interface'] if hostvars[host]['monitor_interface'] is defined else monitor_interface) ]['ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %} From c2e347c1550c0259fc37eb4e168ba14c43c854b2 Mon Sep 17 00:00:00 2001 From: James Saint-Rossy Date: Tue, 9 Aug 2016 22:53:07 -0400 Subject: [PATCH 05/27] Various small fixes: local_action required root, ansible2 warnings, using file module instead of rm --- purge-cluster.yml | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/purge-cluster.yml b/purge-cluster.yml index b2c8c9473..ab2b8ec13 100644 --- a/purge-cluster.yml +++ b/purge-cluster.yml @@ -293,15 +293,18 @@ osd_group_name in group_names - name: remove osd mountpoint tree - shell: rm -rf /var/lib/ceph/osd + file: + path: /var/lib/ceph/osd/ + state: absent register: remove_osd_mountpoints - failed_when: false + ignore_errors: true when: osd_group_name in group_names - name: remove monitor store and bootstrap keys - shell: rm -rf /var/lib/ceph/ - failed_when: false + file: + path: /var/lib/ceph/ + state: absent when: mon_group_name in group_names @@ -313,7 +316,7 @@ - remove data when: osd_group_name in group_names and - remove_osd_mountpoints.rc != 0 + remove_osd_mountpoints.failed is defined - name: see if ceph-disk is installed shell: "which ceph-disk" @@ -322,7 +325,7 @@ - name: zap osd disks shell: ceph-disk zap "{{ item }}" - with_items: devices + with_items: "{{ devices | default([]) }}" when: osd_group_name in group_names and ceph_disk_present.rc == 0 and @@ -423,6 +426,7 @@ - name: request data removal local_action: shell echo requesting data removal + become: false notify: - remove data From 673f54a10068f75b1812360d7ebcb7e9889c1150 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Wed, 10 Aug 2016 10:34:23 +0200 Subject: [PATCH 06/27] osd: fix collocation spelling and declare dmcrypt variables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * changed s/colocation/collocation/ * declare dmcrypt variable in ceph-common so the variables check does not fail Signed-off-by: Sébastien Han --- group_vars/all.sample | 2 ++ group_vars/osds.sample | 2 +- roles/ceph-common/defaults/main.yml | 2 ++ .../ceph-common/tasks/checks/check_mandatory_vars.yml | 11 ++++++----- roles/ceph-osd/defaults/main.yml | 2 +- roles/ceph-osd/tasks/activate_osds.yml | 4 ++-- roles/ceph-osd/tasks/main.yml | 4 ++-- roles/ceph-osd/tasks/pre_requisite.yml | 2 +- ...colocation.yml => dmcrypt-journal-collocation.yml} | 4 ++-- 9 files changed, 19 insertions(+), 14 deletions(-) rename roles/ceph-osd/tasks/scenarios/{dmcrypt-journal-colocation.yml => dmcrypt-journal-collocation.yml} (94%) diff --git a/group_vars/all.sample b/group_vars/all.sample index d9732ab4c..941a2da99 100644 --- a/group_vars/all.sample +++ b/group_vars/all.sample @@ -352,6 +352,8 @@ dummy: #raw_multi_journal: False #osd_directory: False #bluestore: False +#dmcrypt_journal_collocation: False +#dmcrypt_dedicated_journal: False #osd_auto_discovery: False diff --git a/group_vars/osds.sample b/group_vars/osds.sample index b2241ba04..f04ddbd47 100644 --- a/group_vars/osds.sample +++ b/group_vars/osds.sample @@ -121,7 +121,7 @@ dummy: # Keys are stored into the monitors k/v store # Use 'true' to enable this scenario # Both journal and data are stored on the same dm-crypt encrypted device -#dmcrypt_journal_colocation: false +#dmcrypt_journal_collocation: false # VI. Encrypt osd data and/or journal devices with dm-crypt. diff --git a/roles/ceph-common/defaults/main.yml b/roles/ceph-common/defaults/main.yml index aefdd012b..bfc5d023b 100644 --- a/roles/ceph-common/defaults/main.yml +++ b/roles/ceph-common/defaults/main.yml @@ -344,5 +344,7 @@ journal_collocation: False raw_multi_journal: False osd_directory: False bluestore: False +dmcrypt_journal_collocation: False +dmcrypt_dedicated_journal: False osd_auto_discovery: False diff --git a/roles/ceph-common/tasks/checks/check_mandatory_vars.yml b/roles/ceph-common/tasks/checks/check_mandatory_vars.yml index 4ab6dd1f3..499d593c0 100644 --- a/roles/ceph-common/tasks/checks/check_mandatory_vars.yml +++ b/roles/ceph-common/tasks/checks/check_mandatory_vars.yml @@ -71,7 +71,7 @@ - not raw_multi_journal - not osd_directory - not bluestore - - not dmcrypt_journal_colocation + - not dmcrypt_journal_collocation - not dmcrypt_dedicated_journal - name: verify only one osd scenario was chosen @@ -86,14 +86,15 @@ or (raw_multi_journal and osd_directory) or (raw_multi_journal and bluestore) or (osd_directory and bluestore) - or (dmcrypt_journal_colocation and journal_collocation) - or (dmcrypt_journal_colocation and raw_multi_journal) - or (dmcrypt_journal_colocation and osd_directory) - or (dmcrypt_journal_colocation and bluestore) + or (dmcrypt_journal_collocation and journal_collocation) + or (dmcrypt_journal_collocation and raw_multi_journal) + or (dmcrypt_journal_collocation and osd_directory) + or (dmcrypt_journal_collocation and bluestore) or (dmcrypt_dedicated_journal and journal_collocation) or (dmcrypt_dedicated_journal and raw_multi_journal) or (dmcrypt_dedicated_journal and osd_directory) or (dmcrypt_dedicated_journal and bluestore) + or (dmcrypt_dedicated_journal and dmcrypt_journal_collocation) - name: verify devices have been provided fail: diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index f2927b56e..97002b1f5 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -113,7 +113,7 @@ bluestore: false # Keys are stored into the monitors k/v store # Use 'true' to enable this scenario # Both journal and data are stored on the same dm-crypt encrypted device -dmcrypt_journal_colocation: false +dmcrypt_journal_collocation: false # VI. Encrypt osd data and/or journal devices with dm-crypt. diff --git a/roles/ceph-osd/tasks/activate_osds.yml b/roles/ceph-osd/tasks/activate_osds.yml index 4219e005f..6a9724a97 100644 --- a/roles/ceph-osd/tasks/activate_osds.yml +++ b/roles/ceph-osd/tasks/activate_osds.yml @@ -36,7 +36,7 @@ - item.value.removable == "0" - item.value.partitions|count == 0 - osd_auto_discovery - - dmcrypt_journal_colocation + - dmcrypt_journal_collocation - name: activate osd(s) when device is a disk (dmcrypt) command: ceph-disk activate --dmcrypt {{ item.1 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1 @@ -56,7 +56,7 @@ # https://github.com/ansible/ansible/issues/4297 - name: combine ispartition results set_fact: - combined_activate_osd_disk_results: "{{ activate_osd_disk if not dmcrypt_journal_colocation else activate_osd_disk_dmcrypt }}" + combined_activate_osd_disk_results: "{{ activate_osd_disk if not dmcrypt_journal_collocation else activate_osd_disk_dmcrypt }}" - name: fail if ceph-disk cannot create an OSD fail: diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index d965a558c..4550816b6 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -22,9 +22,9 @@ - osd_objectstore == 'bluestore' - not osd_containerized_deployment -- include: ./scenarios/dmcrypt-journal-colocation.yml +- include: ./scenarios/dmcrypt-journal-collocation.yml when: - - dmcrypt_journal_colocation + - dmcrypt_journal_collocation - not osd_containerized_deployment - include: ./scenarios/dmcrypt-dedicated-journal.yml diff --git a/roles/ceph-osd/tasks/pre_requisite.yml b/roles/ceph-osd/tasks/pre_requisite.yml index 9e2afda17..4941471b3 100644 --- a/roles/ceph-osd/tasks/pre_requisite.yml +++ b/roles/ceph-osd/tasks/pre_requisite.yml @@ -35,7 +35,7 @@ set_fact: copy_admin_key: true when: - - dmcrypt_journal_colocation or dmcrypt_dedicated_journal + - dmcrypt_journal_collocation or dmcrypt_dedicated_journal - name: copy osd bootstrap key copy: diff --git a/roles/ceph-osd/tasks/scenarios/dmcrypt-journal-colocation.yml b/roles/ceph-osd/tasks/scenarios/dmcrypt-journal-collocation.yml similarity index 94% rename from roles/ceph-osd/tasks/scenarios/dmcrypt-journal-colocation.yml rename to roles/ceph-osd/tasks/scenarios/dmcrypt-journal-collocation.yml index 534fd3c57..91057b5aa 100644 --- a/roles/ceph-osd/tasks/scenarios/dmcrypt-journal-colocation.yml +++ b/roles/ceph-osd/tasks/scenarios/dmcrypt-journal-collocation.yml @@ -16,7 +16,7 @@ - ansible_devices is defined - item.value.removable == "0" - item.value.partitions|count == 0 - - dmcrypt_journal_colocation + - dmcrypt_journal_collocation - osd_auto_discovery - name: manually prepare osd disk(s) (dmcrypt) @@ -30,7 +30,7 @@ - not item.1.get("skipped") - item.0.get("rc", 0) != 0 - item.1.get("rc", 0) != 0 - - dmcrypt_journal_colocation + - dmcrypt_journal_collocation - not osd_auto_discovery - include: ../activate_osds.yml From 0cf983c9c3c65ae56564cc615c63156aac180cd7 Mon Sep 17 00:00:00 2001 From: daniel lin Date: Tue, 9 Aug 2016 15:32:52 -0400 Subject: [PATCH 07/27] Changes to allow for flexible rsync directory -rsync directory was hardcoded previously and did not reflect the default /vagrant rsync directory --- Vagrantfile | 3 ++- vagrant_variables.yml.atomic | 3 +++ vagrant_variables.yml.sample | 3 +++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/Vagrantfile b/Vagrantfile index e3197c98e..5cf98d7c1 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -18,6 +18,7 @@ CLIENTS = settings['client_vms'] SUBNET = settings['subnet'] BOX = settings['vagrant_box'] BOX_URL = settings['vagrant_box_url'] +SYNC_DIR = settings['vagrant_sync_dir'] MEMORY = settings['memory'] STORAGECTL = settings['vagrant_storagectl'] ETH = settings['eth'] @@ -112,7 +113,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| # Faster bootup. Disable if you need this for libvirt config.vm.provider :libvirt do |v,override| - override.vm.synced_folder '.', '/home/vagrant/sync', disabled: true + override.vm.synced_folder '.', SYNC_DIR, disabled: true end if BOX == 'openstack' diff --git a/vagrant_variables.yml.atomic b/vagrant_variables.yml.atomic index e0a0e9824..63ea2b227 100644 --- a/vagrant_variables.yml.atomic +++ b/vagrant_variables.yml.atomic @@ -23,6 +23,9 @@ disks: "[ '/dev/sdb', '/dev/sdc' ]" eth: 'enp0s8' vagrant_box: centos/atomic-host +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +vagrant_sync_dir: /home/vagrant/sync # if vagrant fails to attach storage controller, add the storage controller name by: # VBoxManage storagectl `VBoxManage list vms |grep ceph-ansible_osd0|awk '{print $1}'|tr \" ' '` --name "SATA" --add sata diff --git a/vagrant_variables.yml.sample b/vagrant_variables.yml.sample index 23c4ef26a..b7fb8a491 100644 --- a/vagrant_variables.yml.sample +++ b/vagrant_variables.yml.sample @@ -44,6 +44,9 @@ disks: "[ '/dev/sdb', '/dev/sdc' ]" # - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= # - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ vagrant_box: ubuntu/trusty64 +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +vagrant_sync_dir: /home/vagrant/sync # VAGRANT URL # This is a URL to download an image from an alternate location. vagrant_box # above should be set to the filename of the image. From 0d71f9fb790c0ac04c739c20296bed60c2cbc10b Mon Sep 17 00:00:00 2001 From: Ivan Font Date: Thu, 11 Aug 2016 18:21:57 -0700 Subject: [PATCH 08/27] Updates for containerized rbd-mirror role - Remove /var/lib/ceph dependencies - Add support for rbd-mirror image tag Signed-off-by: Ivan Font --- roles/ceph-rbd-mirror/tasks/docker/selinux.yml | 1 - .../tasks/docker/start_docker_rbd_mirror.yml | 17 +++++------------ .../templates/ceph-rbd-mirror.service.j2 | 3 +-- 3 files changed, 6 insertions(+), 15 deletions(-) diff --git a/roles/ceph-rbd-mirror/tasks/docker/selinux.yml b/roles/ceph-rbd-mirror/tasks/docker/selinux.yml index 3630824d7..ce8b3054f 100644 --- a/roles/ceph-rbd-mirror/tasks/docker/selinux.yml +++ b/roles/ceph-rbd-mirror/tasks/docker/selinux.yml @@ -8,6 +8,5 @@ shell: chcon -Rt svirt_sandbox_file_t {{ item }} with_items: - /etc/ceph - - /var/lib/ceph changed_when: false when: sestatus.stdout != 'Disabled' diff --git a/roles/ceph-rbd-mirror/tasks/docker/start_docker_rbd_mirror.yml b/roles/ceph-rbd-mirror/tasks/docker/start_docker_rbd_mirror.yml index fcd34cc7a..abd100687 100644 --- a/roles/ceph-rbd-mirror/tasks/docker/start_docker_rbd_mirror.yml +++ b/roles/ceph-rbd-mirror/tasks/docker/start_docker_rbd_mirror.yml @@ -4,21 +4,14 @@ become: true template: src: "{{ role_path }}/templates/ceph-rbd-mirror.service.j2" - dest: /var/lib/ceph/ceph-rbd-mirror@.service + dest: /etc/systemd/system/ceph-rbd-mirror@.service owner: "root" group: "root" mode: "0644" when: ansible_os_family == 'RedHat' or ansible_os_family == 'CoreOS' -- name: link systemd unit file for rbd mirror instance - file: - src: /var/lib/ceph/ceph-rbd-mirror@.service - dest: /etc/systemd/system/multi-user.target.wants/ceph-rbd-mirror@{{ ansible_hostname }}.service - state: link - when: ansible_os_family == 'RedHat' or ansible_os_family == 'CoreOS' - - name: enable systemd unit file for rbd mirror instance - command: systemctl enable /etc/systemd/system/multi-user.target.wants/ceph-rbd-mirror@{{ ansible_hostname }}.service + command: systemctl enable ceph-rbd-mirror@{{ ansible_hostname }}.service failed_when: false changed_when: false when: ansible_os_family == 'RedHat' or ansible_os_family == 'CoreOS' @@ -39,9 +32,9 @@ - name: run the ceph rbd mirror docker image docker: - image: "{{ ceph_rbd_mirror_docker_username }}/{{ ceph_rbd_mirror_docker_imagename }}" - name: ceph-{{ ansible_hostname }}-rbd-mirror + image: "{{ ceph_rbd_mirror_docker_username }}/{{ ceph_rbd_mirror_docker_imagename }}:{{ ceph_rbd_mirror_docker_image_tag }}" + name: "{{ ansible_hostname }}" net: host state: running - volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro" + volumes: "/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro" when: ansible_os_family != 'RedHat' and ansible_os_family != 'CoreOS' diff --git a/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2 b/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2 index 618967e56..594e26e1c 100644 --- a/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2 +++ b/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2 @@ -8,7 +8,6 @@ ExecStartPre=-/usr/bin/docker stop {{ ansible_hostname }} ExecStartPre=-/usr/bin/docker rm {{ ansible_hostname }} ExecStart=/usr/bin/docker run --rm --net=host \ {% if not rbd_mirror_containerized_deployment_with_kv -%} - -v /var/lib/ceph:/var/lib/ceph \ -v /etc/ceph:/etc/ceph \ {% else -%} -e KV_TYPE={{kv_type}} \ @@ -18,7 +17,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \ --privileged \ -e CEPH_DAEMON=RBD_MIRROR \ --name={{ ansible_hostname }} \ - {{ ceph_rbd_mirror_docker_username }}/{{ ceph_rbd_mirror_docker_imagename }} + {{ ceph_rbd_mirror_docker_username }}/{{ ceph_rbd_mirror_docker_imagename }}:{{ ceph_rbd_mirror_docker_image_tag }} ExecStopPost=-/usr/bin/docker stop {{ ansible_hostname }} Restart=always RestartSec=10s From 08766a243a705626defd3988c6f58127efb4ae4a Mon Sep 17 00:00:00 2001 From: Daniel Lin Date: Mon, 6 Jun 2016 10:22:20 -0400 Subject: [PATCH 09/27] Allow ceph-ansible to be run on a locally built/installed Ceph -First install ceph into a directory with CMake cmake -DCMAKE_INSTALL_LIBEXECDIR=/usr/lib -DWITH_SYSTEMD=ON -DCMAKE_INSTALL_PREFIX:PATH:=/usr && make DESTDIR= install/strip -Ceph-ansible copies over the install_dir -User can use rundep_installer.sh to install any runtime dependencies that ceph needs onto the machine from rundep --- group_vars/all.sample | 18 ++++- roles/ceph-common/defaults/main.yml | 18 ++++- .../tasks/checks/check_mandatory_vars.yml | 1 + .../tasks/installs/install_on_redhat.yml | 72 ++++++++++++++++--- roles/ceph-mon/tasks/deploy_monitors.yml | 2 +- roles/ceph-mon/tasks/start_monitor.yml | 6 +- rundep.sample | 45 ++++++++++++ rundep_installer.sh | 27 +++++++ 8 files changed, 171 insertions(+), 18 deletions(-) create mode 100644 rundep.sample create mode 100755 rundep_installer.sh diff --git a/group_vars/all.sample b/group_vars/all.sample index 941a2da99..4d3e7e15a 100644 --- a/group_vars/all.sample +++ b/group_vars/all.sample @@ -84,12 +84,24 @@ dummy: ## Configure package origin # -#ceph_origin: 'upstream' # or 'distro' +#ceph_origin: 'upstream' #'distro' or 'local' # 'distro' means that no separate repo file will be added # you will get whatever version of Ceph is included in your Linux distro. -# -#ceph_use_distro_backports: false # DEBIAN ONLY +# 'local' means that the ceph binaries will be copied over from the local machine +# LOCAL CEPH INSTALLATION (ceph_origin==local) +# +# Path to DESTDIR of the ceph install +#ceph_installation_dir: "/path/to/ceph_installation/" +# Whether or not to use installer script rundep_installer.sh +# This script takes in rundep and installs the packages line by line onto the machine +# If this is set to false then it is assumed that the machine ceph is being copied onto will already have +# all runtime dependencies installed +#use_installer: false +# Root directory for ceph-ansible +#ansible_dir: "/path/to/ceph-ansible" + +#ceph_use_distro_backports: false # DEBIAN ONLY # STABLE ######## diff --git a/roles/ceph-common/defaults/main.yml b/roles/ceph-common/defaults/main.yml index bfc5d023b..fe9d16849 100644 --- a/roles/ceph-common/defaults/main.yml +++ b/roles/ceph-common/defaults/main.yml @@ -76,12 +76,24 @@ ceph_test: False ## Configure package origin # -ceph_origin: 'upstream' # or 'distro' +ceph_origin: 'upstream' # or 'distro' or 'local' # 'distro' means that no separate repo file will be added # you will get whatever version of Ceph is included in your Linux distro. -# -ceph_use_distro_backports: false # DEBIAN ONLY +# 'local' means that the ceph binaries will be copied over from the local machine +# LOCAL CEPH INSTALLATION (ceph_origin==local) +# +# Path to DESTDIR of the ceph install +#ceph_installation_dir: "/path/to/ceph_installation/" +# Whether or not to use installer script rundep_installer.sh +# This script takes in rundep and installs the packages line by line onto the machine +# If this is set to false then it is assumed that the machine ceph is being copied onto will already have +# all runtime dependencies installed +#use_installer: false +# Root directory for ceph-ansible +#ansible_dir: "/path/to/ceph-ansible" + +ceph_use_distro_backports: false # DEBIAN ONLY # STABLE ######## diff --git a/roles/ceph-common/tasks/checks/check_mandatory_vars.yml b/roles/ceph-common/tasks/checks/check_mandatory_vars.yml index 499d593c0..e317589b7 100644 --- a/roles/ceph-common/tasks/checks/check_mandatory_vars.yml +++ b/roles/ceph-common/tasks/checks/check_mandatory_vars.yml @@ -5,6 +5,7 @@ when: - ceph_origin != 'upstream' - ceph_origin != 'distro' + - ceph_origin != 'local' tags: - package-install diff --git a/roles/ceph-common/tasks/installs/install_on_redhat.yml b/roles/ceph-common/tasks/installs/install_on_redhat.yml index 93e6d045d..56b3c9121 100644 --- a/roles/ceph-common/tasks/installs/install_on_redhat.yml +++ b/roles/ceph-common/tasks/installs/install_on_redhat.yml @@ -39,13 +39,67 @@ include: redhat_ceph_repository.yml when: ceph_origin == 'upstream' +- name: make sure /tmp exists + file: + path: /tmp + state: directory + when: + - ceph_origin == 'local' + - use_installer + +- name: use mktemp to create name for rundep + command: "mktemp /tmp/rundep.XXXXXXXX" + register: rundep_location + when: + - ceph_origin == 'local' + - use_installer + +- name: copy rundep + copy: + src: "{{ansible_dir}}/rundep" + dest: "{{ item }}" + with_items: rundep_location.stdout_lines + when: + - ceph_origin == 'local' + - use_installer + +- name: install ceph dependencies + script: "{{ ansible_dir }}/rundep_installer.sh {{ item }}" + become: true + with_items: rundep_location.stdout_lines + when: + - ceph_origin == 'local' + - use_installer + - name: install ceph yum: name: ceph state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - when: not use_server_package_split + when: + - not use_server_package_split + - ansible_pkg_mgr == "yum" + - ceph_origin != 'local' -- name: install distro or red hat storage ceph mon +- name: synchronize ceph install + synchronize: + src: "{{ceph_installation_dir}}/" + dest: "/" + when: + - ceph_origin == 'local' + +- name: create user group ceph + group: + name: 'ceph' + when: + - ceph_origin == 'local' + +- name: create user ceph + user: + name: 'ceph' + when: + - ceph_origin == 'local' + +- name: install distro or red hat storage ceph mon via yum yum: name: "ceph-mon" state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" @@ -57,7 +111,7 @@ or ceph_origin == "distro" or ceph_custom -- name: install distro or red hat storage ceph mon +- name: install distro or red hat storage ceph mon via dnf dnf: name: "ceph-mon" state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" @@ -69,7 +123,7 @@ or ceph_dev or ceph_custom -- name: install distro or red hat storage ceph osd +- name: install distro or red hat storage ceph osd via yum yum: name: "ceph-osd" state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" @@ -81,7 +135,7 @@ or ceph_dev or ceph_custom -- name: install distro or red hat storage ceph osd +- name: install distro or red hat storage ceph osd via dnf dnf: name: "ceph-osd" state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" @@ -93,7 +147,7 @@ or ceph_dev or ceph_custom -- name: install distro or red hat storage ceph mds +- name: install distro or red hat storage ceph mds via yum yum: name: "ceph-mds" state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" @@ -105,7 +159,7 @@ or ceph_dev or ceph_custom -- name: install distro or red hat storage ceph mds +- name: install distro or red hat storage ceph mds via dnf dnf: name: "ceph-mds" state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" @@ -117,7 +171,7 @@ or ceph_dev or ceph_custom -- name: install distro or red hat storage ceph base +- name: install distro or red hat storage ceph base via yum yum: name: "ceph-base" state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" @@ -129,7 +183,7 @@ or ceph_dev or ceph_custom -- name: install distro or red hat storage ceph base +- name: install distro or red hat storage ceph base via dnf dnf: name: "ceph-base" state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" diff --git a/roles/ceph-mon/tasks/deploy_monitors.yml b/roles/ceph-mon/tasks/deploy_monitors.yml index 2f773ce5a..78518b4a3 100644 --- a/roles/ceph-mon/tasks/deploy_monitors.yml +++ b/roles/ceph-mon/tasks/deploy_monitors.yml @@ -53,7 +53,7 @@ - is_after_hammer - name: ceph monitor mkfs without keyring (for or after infernalis release) - command: ceph-mon --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }} + command: ceph-mon --cluster {{ cluster }} --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }} args: creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db when: diff --git a/roles/ceph-mon/tasks/start_monitor.yml b/roles/ceph-mon/tasks/start_monitor.yml index 9d00eecd3..846c77392 100644 --- a/roles/ceph-mon/tasks/start_monitor.yml +++ b/roles/ceph-mon/tasks/start_monitor.yml @@ -10,13 +10,15 @@ with_items: - done - upstart - when: not use_systemd + when: + - not use_systemd - name: start and add that the monitor service to the init sequence (ubuntu) command: initctl emit ceph-mon cluster={{ cluster }} id={{ monitor_name }} changed_when: false failed_when: false - when: not use_systemd + when: + - not use_systemd # NOTE (leseb): somehow the service ansible module is messing things up # as a safety measure we run the raw command diff --git a/rundep.sample b/rundep.sample new file mode 100644 index 000000000..2f76e1ed6 --- /dev/null +++ b/rundep.sample @@ -0,0 +1,45 @@ +#Package lines can be commented out with '#' +# +#boost-atomic +#boost-chrono +#boost-date-time +#boost-iostreams +#boost-program +#boost-random +#boost-regex +#boost-system +#boost-thread +#bzip2-libs +#cyrus-sasl-lib +#expat +#fcgi +#fuse-libs +#glibc +#hdparm +#keyutils-libs +#leveldb +#libaio +#libatomic_ops +#libattr +#libblkid +#libcap +#libcom_err +#libcurl +#libgcc +#libicu +#libidn +#libnghttp2 +#libpsl +#libselinux +#libssh2 +#libstdc++ +#libunistring +#nss-softokn-freebl +#openldap +#openssl-libs +#pcre +#python-nose +#python-sphinx +#snappy +#systemd-libs +#zlib diff --git a/rundep_installer.sh b/rundep_installer.sh new file mode 100755 index 000000000..6da916231 --- /dev/null +++ b/rundep_installer.sh @@ -0,0 +1,27 @@ +#!/bin/bash -e +# +# Copyright (C) 2014, 2015 Red Hat +# +# Author: Daniel Lin +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# + +if test -f /etc/redhat-release ; then + PACKAGE_INSTALLER=yum +elif type apt-get > /dev/null 2>&1 ; then + PACKAGE_INSTALLER=apt-get +else + echo "ERROR: Package Installer could not be determined" + exit 1 +fi + +while read p; do + if [[ $p =~ ^#.* ]] ; then + continue + fi + $PACKAGE_INSTALLER install $p -y +done < $1 From 2fd9bbbe09d875805b000b5cd0da689c2f8497dd Mon Sep 17 00:00:00 2001 From: Daniel Gryniewicz Date: Thu, 21 Jul 2016 09:17:19 -0400 Subject: [PATCH 10/27] NFS for FSAL_RGW Add support for FSAL_RGW to the NFS gateway. Both standard and containerized versions are supported. --- group_vars/nfss.sample | 22 ++++++++++ .../tasks/installs/install_on_redhat.yml | 30 +++++++++++-- roles/ceph-common/templates/ganesha.conf.j2 | 43 +++++++++++++++++-- roles/ceph-nfs/defaults/main.yml | 22 ++++++++++ roles/ceph-nfs/tasks/docker/copy_configs.yml | 30 ++++++++++--- .../ceph-nfs/tasks/docker/create_configs.yml | 15 +++++++ .../tasks/docker/dirs_permissions.yml | 2 + roles/ceph-nfs/tasks/docker/fetch_configs.yml | 19 +++++--- roles/ceph-nfs/tasks/docker/main.yml | 11 +++-- roles/ceph-nfs/tasks/pre_requisite.yml | 30 ++++++++++++- roles/ceph-nfs/templates/ceph-nfs.service.j2 | 5 +-- .../tasks/docker/start_docker_osd.yml | 4 +- roles/ceph-rgw/tasks/docker/copy_configs.yml | 36 ++++++++++++++++ roles/ceph-rgw/tasks/docker/main.yml | 2 + 14 files changed, 242 insertions(+), 29 deletions(-) create mode 100644 roles/ceph-rgw/tasks/docker/copy_configs.yml diff --git a/group_vars/nfss.sample b/group_vars/nfss.sample index 8e378d247..027949468 100644 --- a/group_vars/nfss.sample +++ b/group_vars/nfss.sample @@ -35,6 +35,28 @@ dummy: #ceph_nfs_pseudo_path: "/ceph" #ceph_nfs_protocols: "3,4" #ceph_nfs_access_type: "RW" +#ceph_nfs_log_file: "/var/log/ganesha.log" + +#################### +# FSAL Ceph Config # +#################### +#ceph_nfs_ceph_export_id: 20134 +#ceph_nfs_ceph_pseudo_path: "/ceph" +#ceph_nfs_ceph_protocols: "3,4" +#ceph_nfs_ceph_access_type: "RW" + +################### +# FSAL RGW Config # +################### +#ceph_nfs_rgw_export_id: 20134 +#ceph_nfs_rgw_pseudo_path: "/ceph" +#ceph_nfs_rgw_protocols: "3,4" +#ceph_nfs_rgw_access_type: "RW" +#ceph_nfs_rgw_user: "cephnfs" +# Note: keys are optional and can be generated, but not on containerized, where +# they must be configered. +#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY" +#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C" ################### diff --git a/roles/ceph-common/tasks/installs/install_on_redhat.yml b/roles/ceph-common/tasks/installs/install_on_redhat.yml index 56b3c9121..528358832 100644 --- a/roles/ceph-common/tasks/installs/install_on_redhat.yml +++ b/roles/ceph-common/tasks/installs/install_on_redhat.yml @@ -227,18 +227,40 @@ - rgw_group_name in group_names - ansible_pkg_mgr == "dnf" -- name: install NFS gateway +- name: install nfs ceph gateway yum: name: nfs-ganesha-ceph - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" when: - nfs_group_name in group_names - ansible_pkg_mgr == "yum" + - fsal_ceph -- name: install NFS gateway +- name: install nfs ceph gateway dnf: name: nfs-ganesha-ceph - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" when: - nfs_group_name in group_names - ansible_pkg_mgr == "dnf" + - fsal_ceph + +- name: install nfs rgw gateway + yum: + name: "{{ item }}" + with_items: + - nfs-ganesha-rgw + - ceph-radosgw + when: + - nfs_group_name in group_names + - ansible_pkg_mgr == "yum" + - fsal_rgw + +- name: install nfs rgw gateway + dnf: + name: "{{ item }}" + with_items: + - nfs-ganesha-rgw + - ceph-radosgw + when: + - nfs_group_name in group_names + - ansible_pkg_mgr == "dnf" + - fsal_rgw diff --git a/roles/ceph-common/templates/ganesha.conf.j2 b/roles/ceph-common/templates/ganesha.conf.j2 index ab64ea6a3..38bb67b0b 100644 --- a/roles/ceph-common/templates/ganesha.conf.j2 +++ b/roles/ceph-common/templates/ganesha.conf.j2 @@ -1,17 +1,18 @@ #jinja2: trim_blocks: "true", lstrip_blocks: "true" # {{ ansible_managed }} +{% if fsal_ceph %} EXPORT { - Export_ID={{ ceph_nfs_export_id }}; + Export_ID={{ ceph_nfs_ceph_export_id }}; Path = "/"; - Pseudo = {{ ceph_nfs_pseudo_path }}; + Pseudo = {{ ceph_nfs_ceph_pseudo_path }}; - Access_Type = {{ ceph_nfs_access_type }}; + Access_Type = {{ ceph_nfs_ceph_access_type }}; - NFS_Protocols = {{ ceph_nfs_protocols }}; + NFS_Protocols = {{ ceph_nfs_ceph_protocols }}; Transport_Protocols = TCP; @@ -21,3 +22,37 @@ EXPORT Name = CEPH; } } +{% endif %} +{% if fsal_rgw %} +EXPORT +{ + Export_ID={{ ceph_nfs_rgw_export_id }}; + + Path = "/"; + + Pseudo = {{ ceph_nfs_rgw_pseudo_path }}; + + Access_Type = {{ ceph_nfs_rgw_access_type }}; + + NFS_Protocols = {{ ceph_nfs_rgw_protocols }}; + + Transport_Protocols = TCP; + + Sectype = sys,krb5,krb5i,krb5p; + + FSAL { + Name = RGW; + User_Id = "{{ ceph_nfs_rgw_user }}"; + Access_Key_Id ="{{ ceph_nfs_rgw_access_key }}"; + Secret_Access_Key = "{{ ceph_nfs_rgw_secret_key }}"; + } +} +{% endif %} + +LOG { + Facility { + name = FILE; + destination = "{{ ceph_nfs_log_file }}"; + enable = active; + } +} diff --git a/roles/ceph-nfs/defaults/main.yml b/roles/ceph-nfs/defaults/main.yml index 64f643e5e..2848361ef 100644 --- a/roles/ceph-nfs/defaults/main.yml +++ b/roles/ceph-nfs/defaults/main.yml @@ -27,6 +27,28 @@ ceph_nfs_export_id: 20134 ceph_nfs_pseudo_path: "/ceph" ceph_nfs_protocols: "3,4" ceph_nfs_access_type: "RW" +ceph_nfs_log_file: "/var/log/ganesha.log" + +#################### +# FSAL Ceph Config # +#################### +ceph_nfs_ceph_export_id: 20134 +ceph_nfs_ceph_pseudo_path: "/ceph" +ceph_nfs_ceph_protocols: "3,4" +ceph_nfs_ceph_access_type: "RW" + +################### +# FSAL RGW Config # +################### +ceph_nfs_rgw_export_id: 20134 +ceph_nfs_rgw_pseudo_path: "/ceph" +ceph_nfs_rgw_protocols: "3,4" +ceph_nfs_rgw_access_type: "RW" +ceph_nfs_rgw_user: "cephnfs" +# Note: keys are optional and can be generated, but not on containerized, where +# they must be configered. +#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY" +#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C" ################### diff --git a/roles/ceph-nfs/tasks/docker/copy_configs.yml b/roles/ceph-nfs/tasks/docker/copy_configs.yml index f2ba50e81..8f1f3835a 100644 --- a/roles/ceph-nfs/tasks/docker/copy_configs.yml +++ b/roles/ceph-nfs/tasks/docker/copy_configs.yml @@ -1,10 +1,28 @@ --- -- name: push ceph files to the ansible server - fetch: - src: "{{ item.0 }}" - dest: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}" - flat: yes +- name: set config and keys paths + set_fact: + ceph_config_keys: + - /etc/ceph/ceph.conf + - /etc/ceph/ceph.client.admin.keyring + - /var/lib/ceph/radosgw/keyring + +- name: stat for config and keys + local_action: stat path={{ fetch_directory }}/docker_mon_files/{{ item }} + with_items: ceph_config_keys + changed_when: false + become: false + failed_when: false + register: statconfig + +- name: try to fetch config and keys + copy: + src: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}" + dest: "{{ item.0 }}" + owner: "64045" + group: "64045" + mode: 0644 + changed_when: false with_together: - ceph_config_keys - statconfig.results - when: item.1.stat.exists == false + when: item.1.stat.exists == true diff --git a/roles/ceph-nfs/tasks/docker/create_configs.yml b/roles/ceph-nfs/tasks/docker/create_configs.yml index 3a9b51ccc..560ff20fc 100644 --- a/roles/ceph-nfs/tasks/docker/create_configs.yml +++ b/roles/ceph-nfs/tasks/docker/create_configs.yml @@ -7,6 +7,21 @@ group: root mode: 0644 +- name: create the nfs rgw user + docker: + image: "{{ ceph_rgw_docker_username }}/{{ ceph_rgw_docker_imagename }}:{{ ceph_rgw_docker_image_tag }}" + name: ceph-{{ ansible_hostname }}-rgw-user + expose: "{{ ceph_rgw_civetweb_port }}" + ports: "{{ ceph_rgw_civetweb_port }}:{{ ceph_rgw_civetweb_port }}" + state: running + env: "CEPH_DAEMON=RGW_USER,RGW_USERNAME={{ ceph_nfs_rgw_user }},RGW_USER_ACCESS_KEY={{ ceph_nfs_rgw_access_key }},RGW_USER_SECRET_KEY={{ ceph_nfs_rgw_secret_key }}" + volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph" + when: fsal_rgw + +- name: get user create output + command: docker logs ceph-{{ ansible_hostname }}-rgw-user + register: rgwuser + - name: generate ganesha configuration file action: config_template args: diff --git a/roles/ceph-nfs/tasks/docker/dirs_permissions.yml b/roles/ceph-nfs/tasks/docker/dirs_permissions.yml index 87ca765d7..b83f36b8c 100644 --- a/roles/ceph-nfs/tasks/docker/dirs_permissions.yml +++ b/roles/ceph-nfs/tasks/docker/dirs_permissions.yml @@ -22,6 +22,7 @@ with_items: - /etc/ceph/ - /var/lib/ceph/ + - /var/lib/ceph/radosgw when: not after_hammer - name: create bootstrap directories (after hammer) @@ -34,6 +35,7 @@ with_items: - /etc/ceph/ - /var/lib/ceph/ + - /var/lib/ceph/radosgw when: after_hammer - name: create ganesha directories diff --git a/roles/ceph-nfs/tasks/docker/fetch_configs.yml b/roles/ceph-nfs/tasks/docker/fetch_configs.yml index bd7f746ae..07e6a40d2 100644 --- a/roles/ceph-nfs/tasks/docker/fetch_configs.yml +++ b/roles/ceph-nfs/tasks/docker/fetch_configs.yml @@ -1,13 +1,12 @@ --- -- name: set config and keys paths +- name: set config paths set_fact: - ceph_config_keys: - - /etc/ceph/ceph.conf + nfs_config_keys: - /etc/ganesha/ganesha.conf - name: stat for config and keys local_action: stat path={{ fetch_directory }}/docker_mon_files/{{ item }} - with_items: ceph_config_keys + with_items: nfs_config_keys changed_when: false become: false failed_when: false @@ -22,6 +21,16 @@ mode: 0644 changed_when: false with_together: - - ceph_config_keys + - nfs_config_keys - statconfig.results when: item.1.stat.exists == true + +- name: push ganesha files to the ansible server + fetch: + src: "{{ item.0 }}" + dest: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}" + flat: yes + with_together: + - nfs_config_keys + - statconfig.results + when: item.1.stat.exists == false diff --git a/roles/ceph-nfs/tasks/docker/main.yml b/roles/ceph-nfs/tasks/docker/main.yml index 1e4d030f9..2507d11de 100644 --- a/roles/ceph-nfs/tasks/docker/main.yml +++ b/roles/ceph-nfs/tasks/docker/main.yml @@ -45,12 +45,19 @@ - include: dirs_permissions.yml -# let the first ganesha create configs and keyrings +# Copy Ceph configs to host +- include: copy_configs.yml + +- include: selinux.yml + when: ansible_os_family == 'RedHat' + +# let the first ganesha create configs and users - include: create_configs.yml when: inventory_hostname == groups.nfss[0] and mon_containerized_default_ceph_conf_with_kv +# Copy Ganesha configs to host - include: fetch_configs.yml when: not mon_containerized_deployment_with_kv @@ -59,5 +66,3 @@ - include: start_docker_nfs.yml -- include: copy_configs.yml - when: not mon_containerized_deployment_with_kv diff --git a/roles/ceph-nfs/tasks/pre_requisite.yml b/roles/ceph-nfs/tasks/pre_requisite.yml index 6119cd40b..695f93cab 100644 --- a/roles/ceph-nfs/tasks/pre_requisite.yml +++ b/roles/ceph-nfs/tasks/pre_requisite.yml @@ -1,5 +1,5 @@ --- -- name: create NFS gateway directories +- name: create nfs gateway directories file: path: "{{ item }}" state: directory @@ -10,7 +10,33 @@ - /var/lib/nfs/ganesha - /var/run/ganesha -- name: start NFS gateway service +- name: create rgw nfs user + command: radosgw-admin user create --uid={{ ceph_nfs_rgw_user }} --display-name="RGW NFS User" + register: rgwuser + when: fsal_rgw + +- name: set access key + set_fact: + ceph_nfs_rgw_access_key: "{{ (rgwuser.stdout | from_json)['keys'][0]['access_key'] }}" + when: fsal_rgw + +- name: set secret key + set_fact: + ceph_nfs_rgw_secret_key: "{{(rgwuser.stdout | from_json)['keys'][0]['secret_key']}}" + when: fsal_rgw + +- name: generate ganesha configuration file + action: config_template + args: + src: "{{ playbook_dir }}/roles/ceph-common/templates/ganesha.conf.j2" + dest: /etc/ganesha/ganesha.conf + owner: "root" + group: "root" + mode: "0644" + config_overrides: "{{ ganesha_conf_overrides }}" + config_type: ini + +- name: start nfs gateway service service: name: nfs-ganesha state: started diff --git a/roles/ceph-nfs/templates/ceph-nfs.service.j2 b/roles/ceph-nfs/templates/ceph-nfs.service.j2 index bd8b41b0a..023bcfa77 100644 --- a/roles/ceph-nfs/templates/ceph-nfs.service.j2 +++ b/roles/ceph-nfs/templates/ceph-nfs.service.j2 @@ -7,7 +7,7 @@ After=docker.service EnvironmentFile=-/etc/environment ExecStartPre=-/usr/bin/docker rm %i ExecStartPre=/usr/bin/mkdir -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha -ExecStart=/usr/bin/docker run --rm --name %i --net=host \ +ExecStart=/usr/bin/docker run --rm --net=host \ {% if not mon_containerized_deployment_with_kv -%} -v /etc/ceph:/etc/ceph \ -v /etc/ganesha:/etc/ganesha \ @@ -18,8 +18,7 @@ ExecStart=/usr/bin/docker run --rm --name %i --net=host \ -v /etc/localtime:/etc/localtime:ro \ --privileged \ -e CEPH_DAEMON=NFS \ - -e CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }} \ - --name={{ ansible_hostname }} \ + --name=nfs-{{ ansible_hostname }} \ {{ ceph_nfs_docker_username }}/{{ ceph_nfs_docker_imagename }}:{{ ceph_nfs_docker_image_tag }} ExecStopPost=-/usr/bin/docker stop %i Restart=always diff --git a/roles/ceph-osd/tasks/docker/start_docker_osd.yml b/roles/ceph-osd/tasks/docker/start_docker_osd.yml index f466f02b8..9f34a9811 100644 --- a/roles/ceph-osd/tasks/docker/start_docker_osd.yml +++ b/roles/ceph-osd/tasks/docker/start_docker_osd.yml @@ -31,8 +31,8 @@ -v /etc/localtime:/etc/localtime:ro \ -e "OSD_DEVICE={{ item.0 }}" \ -e "{{ ceph_osd_docker_prepare_env }}" \ - "{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}:{{ ceph_osd_docker_image_tag }}" \ - OSD_CEPH_DISK_PREPARE + -e CEPH_DAEMON=osd_ceph_disk_prepare \ + "{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}:{{ ceph_osd_docker_image_tag }}" with_together: - ceph_osd_docker_devices - osd_prepared.results diff --git a/roles/ceph-rgw/tasks/docker/copy_configs.yml b/roles/ceph-rgw/tasks/docker/copy_configs.yml new file mode 100644 index 000000000..3345f1803 --- /dev/null +++ b/roles/ceph-rgw/tasks/docker/copy_configs.yml @@ -0,0 +1,36 @@ +--- +- name: set config and keys paths + set_fact: + rgw_config_keys: + - "/var/lib/ceph/radosgw/{{ ansible_hostname }}/keyring" + when: fsal_rgw + +- name: wait for rgw keyring + wait_for: path="/var/lib/ceph/radosgw/{{ ansible_hostname }}/keyring" + when: + - fsal_rgw + - inventory_hostname == groups.rgws[0] + +- name: stat for config and keys + local_action: stat path={{ fetch_directory }}/docker_mon_files/{{ item }} + with_items: rgw_config_keys + changed_when: false + become: false + failed_when: false + register: statconfig + when: + - fsal_rgw + - inventory_hostname == groups.rgws[0] + +- name: push ceph files to the ansible server + fetch: + src: "{{ item.0 }}" + dest: "{{ fetch_directory }}/docker_mon_files/var/lib/ceph/radosgw/keyring" + flat: yes + with_together: + - rgw_config_keys + - statconfig.results + when: + - item.1.stat.exists == false + - fsal_rgw + - inventory_hostname == groups.rgws[0] diff --git a/roles/ceph-rgw/tasks/docker/main.yml b/roles/ceph-rgw/tasks/docker/main.yml index 360d37b71..63579ed8a 100644 --- a/roles/ceph-rgw/tasks/docker/main.yml +++ b/roles/ceph-rgw/tasks/docker/main.yml @@ -49,3 +49,5 @@ when: ansible_os_family == 'RedHat' - include: start_docker_rgw.yml + +- include: copy_configs.yml From a76bc46d2af0ffd57d47c52d3d489f64a020e0a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Thu, 11 Aug 2016 17:20:07 +0200 Subject: [PATCH 11/27] add shrink playbooks: mons and osds MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We now have the ability to shrink a ceph cluster with the help of 2 new playbooks. Even if a lot portions of those are identical I thought I would make more sense to separate both for several reasons: * it is rare to remove mon(s) and osd(s) * this remains a tricky process so to avoid any overlap we keep things * separated For monitors, just select the list of the monitor hostnames you want to delete from the cluster and execute the playbook like this. The hostname must be resolvable. Then run the playbook like this: ansible-playbook shrink-cluster.yml -e mon_host=ceph-mon-01,ceph-mon-02 Are you sure you want to shrink the cluster? [no]: yes For OSDs, just select the list of the OSD id you want to delete from the cluster and execute the playbook like this: ansible-playbook shrink-cluster.yml -e osd_ids=0,2,4 Are you sure you want to shrink the cluster? [no]: yes If you know what you're doing you can run it like this: ansible-playbook shrink-cluster.yml -e ireallymeanit=yes -e osd_ids=0,2,4 Thanks a lot to @SamYaple for his help on the complex variables/fact/filters Signed-off-by: Sébastien Han --- shrink-mon.yml | 142 +++++++++++++++++++++++++++++++++++++++++++++++++ shrink-osd.yml | 131 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 273 insertions(+) create mode 100644 shrink-mon.yml create mode 100644 shrink-osd.yml diff --git a/shrink-mon.yml b/shrink-mon.yml new file mode 100644 index 000000000..93f74c449 --- /dev/null +++ b/shrink-mon.yml @@ -0,0 +1,142 @@ +--- +# This playbook shrinks the Ceph monitors from your cluster +# It can remove any number of monitor(s) from the cluster and ALL THEIR DATA +# +# Use it like this: +# ansible-playbook shrink-mon.yml -e mon_host=ceph-mon01,ceph-mon02 +# Prompts for confirmation to shrink, defaults to no and +# doesn't shrink the cluster. yes shrinks the cluster. +# +# ansible-playbook -e ireallymeanit=yes|no shrink-cluster.yml +# Overrides the prompt using -e option. Can be used in +# automation scripts to avoid interactive prompt. + + +- name: confirm whether user really meant to remove monitor(s) from the ceph cluster + + hosts: + - localhost + + gather_facts: false + become: true + + vars_prompt: + - name: ireallymeanit + prompt: Are you sure you want to shrink the cluster? + default: 'no' + private: no + + tasks: + - include_vars: roles/ceph-common/defaults/main.yml + - include_vars: group_vars/all + + - name: exit playbook, if user did not mean to shrink cluster + fail: + msg: "Exiting shrink-mon playbook, no monitor(s) was/were removed. + To shrink the cluster, either say 'yes' on the prompt or + or use `-e ireallymeanit=yes` on the command line when + invoking the playbook" + when: ireallymeanit != 'yes' + + - name: exit playbook, if no monitor(s) was/were given + fail: + msg: "mon_host must be declared + Exiting shrink-cluster playbook, no monitor(s) was/were removed. + On the command line when invoking the playbook, you can use + -e mon_host=ceph-mon01,ceph-mon02 argument." + when: mon_host is not defined + + - name: test if ceph command exist + command: command -v ceph + changed_when: false + failed_when: false + register: ceph_command + + - name: exit playbook, if ceph command does not exist + debug: + msg: "The ceph command is not available, please install it :(" + run_once: true + when: + - ceph_command.rc != 0 + + - name: exit playbook, if cluster files do not exist + stat: + path: "{{ item }}" + register: ceph_conf_key + with_items: + - /etc/ceph/{{ cluster }}.conf + - /etc/ceph/{{ cluster }}.client.admin.keyring + failed_when: false + + - fail: + msg: "Ceph's configuration file is not present in /etc/ceph" + with_items: "{{ceph_conf_key.results}}" + when: + - item.stat.exists == false + + - name: exit playbook, if can not connect to the cluster + command: timeout 5 ceph --cluster {{ cluster }} health + register: ceph_health + until: ceph_health.stdout.find("HEALTH") > -1 + retries: 5 + delay: 2 + + - name: verify given monitors are reachable + command: ping -c 1 {{ item }} + with_items: "{{mon_host.split(',')}}" + register: mon_reachable + failed_when: false + + - fail: + msg: "One or more monitors are not reachable, please check your /etc/hosts or your DNS" + with_items: "{{mon_reachable.results}}" + when: + - item.rc != 0 + + - name: stop monitor service (systemd) + service: + name: ceph-mon@{{ item }} + state: stopped + enabled: no + with_items: "{{mon_host.split(',')}}" + delegate_to: "{{item}}" + failed_when: false + + - name: purge monitor store + file: + path: /var/lib/ceph/mon/{{ cluster }}-{{ item }} + state: absent + with_items: "{{mon_host.split(',')}}" + delegate_to: "{{item}}" + + - name: remove monitor from the quorum + command: ceph --cluster {{ cluster }} mon remove {{ item }} + failed_when: false + with_items: "{{mon_host.split(',')}}" + + # NOTE (leseb): sorry for the 'sleep' command + # but it will take a couple of seconds for other monitors + # to notice that one member has left. + # 'sleep 5' is not that bad and should be sufficient + - name: verify the monitor is out of the cluster + shell: "sleep 5 && ceph --cluster {{ cluster }} -s | grep monmap | sed 's/.*quorum//' | egrep -sq {{ item }}" + with_items: "{{mon_host.split(',')}}" + failed_when: false + register: ceph_health_mon + + - name: please remove the monitor from your ceph configuration file + debug: + msg: "The monitor(s) has/have been successfully removed from the cluster. + Please remove the monitor(s) entry(ies) from the rest of your ceph configuration files, cluster wide." + run_once: true + with_items: "{{ceph_health_mon.results}}" + when: + - item.rc != 0 + + - name: please remove the monitor from your ceph configuration file + fail: + msg: "Monitor(s) appear(s) to still be part of the cluster, please check what happened." + run_once: true + with_items: "{{ceph_health_mon.results}}" + when: + - item.rc == 0 diff --git a/shrink-osd.yml b/shrink-osd.yml new file mode 100644 index 000000000..5fb1bd60f --- /dev/null +++ b/shrink-osd.yml @@ -0,0 +1,131 @@ +--- +# This playbook shrinks Ceph OSDs. +# It can remove any number of OSD(s) from the cluster and ALL THEIR DATA +# +# Use it like this: +# ansible-playbook shrink-osd.yml -e osd_id=0,2,6 +# Prompts for confirmation to shrink, defaults to no and +# doesn't shrink the cluster. yes shrinks the cluster. +# +# ansible-playbook -e ireallymeanit=yes|no shrink-osd.yml +# Overrides the prompt using -e option. Can be used in +# automation scripts to avoid interactive prompt. + + +- name: confirm whether user really meant to remove osd(s) from the cluster + + hosts: + - localhost + + gather_facts: false + become: true + + vars_prompt: + - name: ireallymeanit + prompt: Are you sure you want to shrink the cluster? + default: 'no' + private: no + + tasks: + - include_vars: roles/ceph-common/defaults/main.yml + - include_vars: group_vars/all + + - name: exit playbook, if user did not mean to shrink cluster + fail: + msg: "Exiting shrink-osd playbook, no osd(s) was/were removed.. + To shrink the cluster, either say 'yes' on the prompt or + or use `-e ireallymeanit=yes` on the command line when + invoking the playbook" + when: ireallymeanit != 'yes' + + - name: exit playbook, if no osd(s) was/were given + fail: + msg: "osd_ids must be declared + Exiting shrink-osd playbook, no OSD()s was/were removed. + On the command line when invoking the playbook, you can use + -e osd_ids=0,1,2,3 argument." + when: osd_ids is not defined + + - name: test if ceph command exist + command: command -v ceph + changed_when: false + failed_when: false + register: ceph_command + + - name: exit playbook, if ceph command does not exist + debug: + msg: "The ceph command is not available, please install it :(" + run_once: true + when: + - ceph_command.rc != 0 + + - name: exit playbook, if cluster files do not exist + stat: + path: "{{ item }}" + register: ceph_conf_key + with_items: + - /etc/ceph/{{ cluster }}.conf + - /etc/ceph/{{ cluster }}.client.admin.keyring + failed_when: false + + - fail: + msg: "Ceph's configuration file is not present in /etc/ceph" + with_items: "{{ceph_conf_key.results}}" + when: + - item.stat.exists == false + + - name: exit playbook, if can not connect to the cluster + command: timeout 5 ceph --cluster {{ cluster }} health + register: ceph_health + until: ceph_health.stdout.find("HEALTH") > -1 + retries: 5 + delay: 2 + +# NOTE (leseb): just in case, the complex filters mechanism below does not work anymore. +# This will be a quick and easy fix but will require using the shell module. +# - name: find the host where the osd(s) is/are running on +# shell: | +# ceph --cluster {{ cluster }} osd find {{ item }} | grep -Po '(?<="ip": ")[^:]*' +# with_items: "{{osd_ids.split(',')}}" +# register: osd_hosts +# + - name: find the host where the osd(s) is/are running on + command: ceph --cluster {{ cluster }} osd find {{ item }} + with_items: "{{osd_ids.split(',')}}" + register: osd_hosts + + - set_fact: ip_item="{{(item.stdout | from_json).ip}}" + with_items: "{{osd_hosts.results}}" + register: ip_result + + - set_fact: ips="{{ ip_result.results | map(attribute='ansible_facts.ip_item') | list }}" + + - set_fact: real_ips="{{ ips | regex_replace(':[0-9][0-9][0-9][0-9]\/[0-9][0-9][0-9][0-9]', '') }}" + + - name: check if ceph admin key exists on the osd nodes + stat: + path: "/etc/ceph/{{ cluster }}.client.admin.keyring" + register: ceph_admin_key + with_items: "{{real_ips}}" + delegate_to: "{{item}}" + failed_when: false + + - fail: + msg: "The Ceph admin key is not present on the OSD node, please add it and remove it after the playbook is done." + with_items: "{{ceph_admin_key.results}}" + when: + - item.stat.exists == false + + - name: deactivating osd(s) + command: ceph-disk deactivate --cluster {{ cluster }} --deactivate-by-id {{ item.0 }} --mark-out + with_together: + - "{{osd_ids.split(',')}}" + - "{{real_ips}}" + delegate_to: "{{item.1}}" + + - name: destroying osd(s) + command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.0 }} --zap + with_together: + - "{{osd_ids.split(',')}}" + - "{{real_ips}}" + delegate_to: "{{item.1}}" From a0fc1becc80837dc4c5584ac448399bf685d282d Mon Sep 17 00:00:00 2001 From: Ivan Font Date: Fri, 12 Aug 2016 17:45:55 -0700 Subject: [PATCH 12/27] Fix ceph.conf template for containerized deployment Signed-off-by: Ivan Font --- roles/ceph-common/templates/ceph.conf.j2 | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/roles/ceph-common/templates/ceph.conf.j2 b/roles/ceph-common/templates/ceph.conf.j2 index ebff7b59e..12c039239 100644 --- a/roles/ceph-common/templates/ceph.conf.j2 +++ b/roles/ceph-common/templates/ceph.conf.j2 @@ -18,20 +18,24 @@ osd crush chooseleaf type = 0 {# NOTE (leseb): the blank lines in-between are needed otherwise we won't get any line break #} {% if groups[mon_group_name] is defined %} mon_initial_members = {% if groups[mon_group_name] is defined %}{% for host in groups[mon_group_name] %}{% if hostvars[host]['ansible_fqdn'] is defined and mon_use_fqdn %}{{ hostvars[host]['ansible_fqdn'] }}{% if not loop.last %},{% endif %}{% elif hostvars[host]['ansible_hostname'] is defined %}{{ hostvars[host]['ansible_hostname'] }}{% if not loop.last %},{% endif %}{% endif %}{% endfor %}{% endif %} +{% endif %} -mon_host = {% if groups[mon_group_name] is defined %}{% for host in groups[mon_group_name] %}{{ hostvars[host]['ansible_' + monitor_interface]['ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %} +{% if not mon_containerized_deployment and not mon_containerized_deployment_with_kv %} +{% if groups[mon_group_name] is defined %} +mon host = {% for host in groups[mon_group_name] %}{{ hostvars[host]['ansible_' + monitor_interface]['ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %} {% elif (hostvars[host]['monitor_interface'] is defined and hostvars[host]['monitor_interface'] != "interface") or monitor_interface != "interface" %} {% include 'mon_addr_interface.j2' %} {% else %} {% include 'mon_addr_address.j2' %} {% endif %} +{% endif %} {% if mon_containerized_deployment %} fsid = {{ fsid }} {% if groups[mon_group_name] is defined %} {% for host in groups[mon_group_name] %} {% if mon_containerized_deployment %} {% set interface = ["ansible_",ceph_mon_docker_interface]|join %} -mon_host = {{ hostvars[host]['ansible_' + interface]['ipv4']['address'] }} +mon_host = {{ hostvars[host][interface]['ipv4']['address'] }} {% if not loop.last %},{% endif %} {% elif hostvars[host]['monitor_address'] is defined %} mon_host = {{ hostvars[host]['monitor_address'] }} From 290b83cd0655e355e61677a4b3934ee6400ae38b Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Fri, 12 Aug 2016 11:09:28 -0400 Subject: [PATCH 13/27] group_vars: convert ceph_stable_rh_storage to ceph_rhcs Signed-off-by: Alfredo Deza Resolves: issue#811 --- group_vars/all.sample | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/group_vars/all.sample b/group_vars/all.sample index 4d3e7e15a..0779a6269 100644 --- a/group_vars/all.sample +++ b/group_vars/all.sample @@ -144,15 +144,15 @@ dummy: # time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS # on RHEL 7. # -#ceph_stable_rh_storage: false +#ceph_rhcs: false # This will affect how/what repositories are enabled depending on the desired # version. The next version will use "2" not "2.0" which would not work. -#ceph_stable_rh_storage_version: 1.3 # next version is 2 -#ceph_stable_rh_storage_cdn_install: false # assumes all the nodes can connect to cdn.redhat.com -#ceph_stable_rh_storage_iso_install: false # usually used when nodes don't have access to cdn.redhat.com -#ceph_stable_rh_storage_iso_path: -#ceph_stable_rh_storage_mount_path: /tmp/rh-storage-mount -#ceph_stable_rh_storage_repository_path: /tmp/rh-storage-repo # where to copy iso's content +#ceph_rhcs_version: 1.3 # next version is 2 +#ceph_rhcs_cdn_install: false # assumes all the nodes can connect to cdn.redhat.com +#ceph_rhcs_iso_install: false # usually used when nodes don't have access to cdn.redhat.com +#ceph_rhcs_iso_path: +#ceph_rhcs_mount_path: /tmp/rh-storage-mount +#ceph_rhcs_repository_path: /tmp/rh-storage-repo # where to copy iso's content # UBUNTU CLOUD ARCHIVE From a37e2f7a1ce5b743c6839b5afa163b6d1341f4d3 Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Fri, 12 Aug 2016 11:26:44 -0400 Subject: [PATCH 14/27] ceph-common: convert ceph_stable_rh_storage to ceph_rhcs in README Signed-off-by: Alfredo Deza Resolves: issue#811 --- roles/ceph-common/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ceph-common/README.md b/roles/ceph-common/README.md index 6c1b81fd1..3a09fae4e 100644 --- a/roles/ceph-common/README.md +++ b/roles/ceph-common/README.md @@ -25,7 +25,7 @@ Have a look at `defaults/main.yml`. * Install source, choose one of these: * `ceph_stable` * `ceph_dev` - * `ceph_stable_rh_storage` + * `ceph_rhcs` * `ceph_custom` * `journal_size` * `monitor_interface` From 492518a2cd97306fa0877b8cfb5b754709211e38 Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Fri, 12 Aug 2016 11:27:01 -0400 Subject: [PATCH 15/27] ceph-common: convert ceph_stable_rh_storage to ceph_rhcs in defaults Signed-off-by: Alfredo Deza Resolves: issue#811 --- roles/ceph-common/defaults/main.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/roles/ceph-common/defaults/main.yml b/roles/ceph-common/defaults/main.yml index fe9d16849..3f6dc4ccd 100644 --- a/roles/ceph-common/defaults/main.yml +++ b/roles/ceph-common/defaults/main.yml @@ -136,15 +136,15 @@ ceph_stable_redhat_distro: el7 # time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS # on RHEL 7. # -ceph_stable_rh_storage: false +ceph_rhcs: false # This will affect how/what repositories are enabled depending on the desired # version. The next version will use "2" not "2.0" which would not work. -ceph_stable_rh_storage_version: 1.3 # next version is 2 -ceph_stable_rh_storage_cdn_install: false # assumes all the nodes can connect to cdn.redhat.com -ceph_stable_rh_storage_iso_install: false # usually used when nodes don't have access to cdn.redhat.com -#ceph_stable_rh_storage_iso_path: -ceph_stable_rh_storage_mount_path: /tmp/rh-storage-mount -ceph_stable_rh_storage_repository_path: /tmp/rh-storage-repo # where to copy iso's content +ceph_rhcs_version: 1.3 # next version is 2 +ceph_rhcs_cdn_install: false # assumes all the nodes can connect to cdn.redhat.com +ceph_rhcs_iso_install: false # usually used when nodes don't have access to cdn.redhat.com +#ceph_rhcs_iso_path: +ceph_rhcs_mount_path: /tmp/rh-storage-mount +ceph_rhcs_repository_path: /tmp/rh-storage-repo # where to copy iso's content # UBUNTU CLOUD ARCHIVE From b41c84bb0d91d1312e289b48d8339cb2ffa39680 Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Fri, 12 Aug 2016 11:27:43 -0400 Subject: [PATCH 16/27] ceph-common: convert ceph_stable_rh_storage to ceph_rhcs in repo template Signed-off-by: Alfredo Deza Resolves: issue#811 --- roles/ceph-common/templates/redhat_storage_repo.j2 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/ceph-common/templates/redhat_storage_repo.j2 b/roles/ceph-common/templates/redhat_storage_repo.j2 index 45828a83c..16f57c483 100644 --- a/roles/ceph-common/templates/redhat_storage_repo.j2 +++ b/roles/ceph-common/templates/redhat_storage_repo.j2 @@ -1,21 +1,21 @@ # {{ ansible_managed }} [rh_storage_mon] name=Red Hat Ceph Storage - local packages for Ceph monitor -baseurl=file://{{ ceph_stable_rh_storage_repository_path }}/MON +baseurl=file://{{ ceph_rhcs_repository_path }}/MON enabled=1 gpgcheck=1 priority=1 [rh_storage_osd] name=Red Hat Ceph Storage - local packages for Ceph OSD -baseurl=file://{{ ceph_stable_rh_storage_repository_path }}/OSD +baseurl=file://{{ ceph_rhcs_repository_path }}/OSD enabled=1 gpgcheck=1 priority=1 [rh_storage_tools] name=Red Hat Ceph Storage - local packages for Ceph client, MDS, and RGW -baseurl=file://{{ ceph_stable_rh_storage_repository_path }}/Tools +baseurl=file://{{ ceph_rhcs_repository_path }}/Tools enabled=1 gpgcheck=1 priority=1 From 23051e7ea5658e1daf0ffb8e404d70fdc4805c94 Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Fri, 12 Aug 2016 11:28:03 -0400 Subject: [PATCH 17/27] ceph-common: convert ceph_stable_rh_storage to ceph_rhcs in main task Signed-off-by: Alfredo Deza Resolves: issue#811 --- roles/ceph-common/tasks/main.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/roles/ceph-common/tasks/main.yml b/roles/ceph-common/tasks/main.yml index cd87f5e70..54edb2256 100644 --- a/roles/ceph-common/tasks/main.yml +++ b/roles/ceph-common/tasks/main.yml @@ -11,15 +11,15 @@ - include: ./pre_requisites/prerequisite_rh_storage_iso_install.yml when: - - ceph_stable_rh_storage - - ceph_stable_rh_storage_iso_install + - ceph_rhcs + - ceph_rhcs_iso_install tags: - package-install - include: ./pre_requisites/prerequisite_rh_storage_cdn_install.yml when: - - ceph_stable_rh_storage - - ceph_stable_rh_storage_cdn_install + - ceph_rhcs + - ceph_rhcs_cdn_install - ansible_os_family == "RedHat" tags: - package-install @@ -27,28 +27,28 @@ - include: ./installs/install_on_redhat.yml when: ansible_os_family == 'RedHat' and - not ceph_stable_rh_storage_iso_install + not ceph_rhcs_iso_install tags: - package-install - include: ./installs/install_rh_storage_on_redhat.yml when: - ansible_distribution == "RedHat" - - ceph_stable_rh_storage + - ceph_rhcs tags: - package-install - include: ./installs/install_on_debian.yml when: - ansible_os_family == 'Debian' - - not ceph_stable_rh_storage + - not ceph_rhcs tags: - package-install - include: ./installs/install_rh_storage_on_debian.yml when: - ansible_os_family == 'Debian' - - ceph_stable_rh_storage + - ceph_rhcs tags: - package-install From efe2c9e518a6b177816706bc605c746f504e1c31 Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Fri, 12 Aug 2016 11:28:26 -0400 Subject: [PATCH 18/27] ceph-common: convert ceph_stable_rh_storage to ceph_rhcs in system checks Signed-off-by: Alfredo Deza Resolves: issue#811 --- roles/ceph-common/tasks/checks/check_system.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ceph-common/tasks/checks/check_system.yml b/roles/ceph-common/tasks/checks/check_system.yml index 9338a3e5a..21e348c96 100644 --- a/roles/ceph-common/tasks/checks/check_system.yml +++ b/roles/ceph-common/tasks/checks/check_system.yml @@ -18,7 +18,7 @@ fail: msg: "Distribution not supported {{ ansible_distribution_version }} by Red Hat Ceph Storage, only RHEL 7.1" when: - - ceph_stable_rh_storage + - ceph_rhcs - ansible_distribution_version | version_compare('7.1', '<') - name: fail on unsupported distribution for ubuntu cloud archive From 30494497b0310cabfeabd18cb4972e3372e22779 Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Fri, 12 Aug 2016 11:28:55 -0400 Subject: [PATCH 19/27] ceph-common: convert ceph_stable_rh_storage to ceph_rhcs in mandatory vars Signed-off-by: Alfredo Deza Resolves: issue#811 --- .../ceph-common/tasks/checks/check_mandatory_vars.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/roles/ceph-common/tasks/checks/check_mandatory_vars.yml b/roles/ceph-common/tasks/checks/check_mandatory_vars.yml index e317589b7..ed800d992 100644 --- a/roles/ceph-common/tasks/checks/check_mandatory_vars.yml +++ b/roles/ceph-common/tasks/checks/check_mandatory_vars.yml @@ -16,18 +16,18 @@ - ceph_origin == 'upstream' - not ceph_stable - not ceph_dev - - not ceph_stable_rh_storage + - not ceph_rhcs - not ceph_stable_uca tags: - package-install - name: verify that a method was chosen for red hat storage fail: - msg: "choose between ceph_stable_rh_storage_cdn_install and ceph_stable_rh_storage_iso_install" + msg: "choose between ceph_rhcs_cdn_install and ceph_rhcs_iso_install" when: - - ceph_stable_rh_storage - - not ceph_stable_rh_storage_cdn_install - - not ceph_stable_rh_storage_iso_install + - ceph_rhcs + - not ceph_rhcs_cdn_install + - not ceph_rhcs_iso_install - ceph_origin == "upstream" tags: - package-install From 90730a7f4b86695be7e8ee7f5cb3203fd3cdb408 Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Fri, 12 Aug 2016 11:29:39 -0400 Subject: [PATCH 20/27] ceph-common: convert ceph_stable_rh_storage to ceph_rhcs when installing rhs on debian Signed-off-by: Alfredo Deza Resolves: issue#811 --- .../installs/install_rh_storage_on_debian.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/roles/ceph-common/tasks/installs/install_rh_storage_on_debian.yml b/roles/ceph-common/tasks/installs/install_rh_storage_on_debian.yml index 8bc5452a9..b82ca8833 100644 --- a/roles/ceph-common/tasks/installs/install_rh_storage_on_debian.yml +++ b/roles/ceph-common/tasks/installs/install_rh_storage_on_debian.yml @@ -5,15 +5,15 @@ # intelligent way to determine the location of the key. - name: install the rh ceph storage repository key apt_key: - file: "{{ ceph_stable_rh_storage_repository_path }}/MON/release.asc" + file: "{{ ceph_rhcs_repository_path }}/MON/release.asc" state: present when: - - ceph_stable_rh_storage - - ceph_stable_rh_storage_iso_install + - ceph_rhcs + - ceph_rhcs_iso_install - name: add rh ceph storage repositories apt_repository: - repo: "deb file://{{ ceph_stable_rh_storage_repository_path }}/{{ item }}/ {{ ansible_lsb.codename }} main" + repo: "deb file://{{ ceph_rhcs_repository_path }}/{{ item }}/ {{ ansible_lsb.codename }} main" state: present changed_when: false with_items: @@ -22,14 +22,14 @@ - "Tools" - "Agent" when: - - ceph_stable_rh_storage - - ceph_stable_rh_storage_iso_install + - ceph_rhcs + - ceph_rhcs_iso_install - name: add the red hat storage apt-key apt_key: data: "{{ lookup('file', role_path+'/files/cephstablerhcs.asc') }}" state: present - when: not ceph_stable_rh_storage_iso_install + when: not ceph_rhcs_iso_install - name: install dependencies apt: From a4036459443a07da3a9f2354dc8641a7f786569a Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Fri, 12 Aug 2016 11:30:06 -0400 Subject: [PATCH 21/27] ceph-common: convert ceph_stable_rh_storage to ceph_rhcs when installing rhs on redhat Signed-off-by: Alfredo Deza Resolves: issue#811 --- .../tasks/installs/install_rh_storage_on_redhat.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/ceph-common/tasks/installs/install_rh_storage_on_redhat.yml b/roles/ceph-common/tasks/installs/install_rh_storage_on_redhat.yml index dfdfba574..b950e1136 100644 --- a/roles/ceph-common/tasks/installs/install_rh_storage_on_redhat.yml +++ b/roles/ceph-common/tasks/installs/install_rh_storage_on_redhat.yml @@ -1,10 +1,10 @@ --- - name: install red hat storage repository key rpm_key: - key: "{{ ceph_stable_rh_storage_repository_path }}/RPM-GPG-KEY-redhat-release" + key: "{{ ceph_rhcs_repository_path }}/RPM-GPG-KEY-redhat-release" state: present when: - - ceph_stable_rh_storage_iso_install + - ceph_rhcs_iso_install - name: add red hat storage repository template: @@ -14,7 +14,7 @@ group: root mode: 0644 when: - - ceph_stable_rh_storage_iso_install + - ceph_rhcs_iso_install - name: install dependencies yum: From 1681a3ecb672042e0d366c41ffa49a44b5e5f676 Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Fri, 12 Aug 2016 11:30:41 -0400 Subject: [PATCH 22/27] ceph-common: convert ceph_stable_rh_storage to ceph_rhcs in prerequisites for cdn install Signed-off-by: Alfredo Deza Resolves: issue#811 --- .../prerequisite_rh_storage_cdn_install.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/roles/ceph-common/tasks/pre_requisites/prerequisite_rh_storage_cdn_install.yml b/roles/ceph-common/tasks/pre_requisites/prerequisite_rh_storage_cdn_install.yml index 4c1583066..d2d7ab864 100644 --- a/roles/ceph-common/tasks/pre_requisites/prerequisite_rh_storage_cdn_install.yml +++ b/roles/ceph-common/tasks/pre_requisites/prerequisite_rh_storage_cdn_install.yml @@ -5,42 +5,42 @@ changed_when: false - name: check if the red hat storage monitor repo is already present - shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-{{ ceph_stable_rh_storage_version }}-mon-rpms + shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-{{ ceph_rhcs_version }}-mon-rpms changed_when: false failed_when: false register: rh_storage_mon_repo when: mon_group_name in group_names - name: enable red hat storage monitor repository - command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_stable_rh_storage_version }}-mon-rpms + command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_rhcs_version }}-mon-rpms changed_when: false when: - mon_group_name in group_names - rh_storage_mon_repo.rc != 0 - name: check if the red hat storage osd repo is already present - shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-{{ ceph_stable_rh_storage_version }}-osd-rpms + shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-{{ ceph_rhcs_version }}-osd-rpms changed_when: false failed_when: false register: rh_storage_osd_repo when: osd_group_name in group_names - name: enable red hat storage osd repository - command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_stable_rh_storage_version }}-osd-rpms + command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_rhcs_version }}-osd-rpms changed_when: false when: - osd_group_name in group_names - rh_storage_osd_repo.rc != 0 - name: check if the red hat storage rados gateway repo is already present - shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-{{ ceph_stable_rh_storage_version }}-tools-rpms + shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-{{ ceph_rhcs_version }}-tools-rpms changed_when: false failed_when: false register: rh_storage_rgw_repo when: rgw_group_name in group_names - name: enable red hat storage rados gateway repository - command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_stable_rh_storage_version }}-tools-rpms + command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_rhcs_version }}-tools-rpms changed_when: false when: - rgw_group_name in group_names From 3037b756243fd2d62dab9e391df2934bbcb45c5e Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Fri, 12 Aug 2016 11:31:01 -0400 Subject: [PATCH 23/27] ceph-common: convert ceph_stable_rh_storage to ceph_rhcs in prerequisites for iso install Signed-off-by: Alfredo Deza Resolves: issue#811 --- .../prerequisite_rh_storage_iso_install.yml | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/roles/ceph-common/tasks/pre_requisites/prerequisite_rh_storage_iso_install.yml b/roles/ceph-common/tasks/pre_requisites/prerequisite_rh_storage_iso_install.yml index c376e6b56..e589fec34 100644 --- a/roles/ceph-common/tasks/pre_requisites/prerequisite_rh_storage_iso_install.yml +++ b/roles/ceph-common/tasks/pre_requisites/prerequisite_rh_storage_iso_install.yml @@ -4,40 +4,40 @@ path: "{{ item }}" state: directory with_items: - - "{{ ceph_stable_rh_storage_mount_path }}" - - "{{ ceph_stable_rh_storage_repository_path }}" + - "{{ ceph_rhcs_mount_path }}" + - "{{ ceph_rhcs_repository_path }}" - name: ensure destination iso directory exists file: - path: "{{ ceph_stable_rh_storage_iso_path | dirname }}" + path: "{{ ceph_rhcs_iso_path | dirname }}" state: directory recurse: yes - when: "'{{ ceph_stable_rh_storage_iso_path | dirname }}' != '/'" + when: "'{{ ceph_rhcs_iso_path | dirname }}' != '/'" - name: fetch the red hat storage iso from the ansible server copy: - src: "{{ ceph_stable_rh_storage_iso_path }}" - dest: "{{ ceph_stable_rh_storage_iso_path }}" + src: "{{ ceph_rhcs_iso_path }}" + dest: "{{ ceph_rhcs_iso_path }}" -# assumption: ceph_stable_rh_storage_mount_path does not specify directory +# assumption: ceph_rhcs_mount_path does not specify directory - name: mount red hat storage iso file mount: - name: "{{ ceph_stable_rh_storage_mount_path }}" - src: "{{ ceph_stable_rh_storage_iso_path }}" + name: "{{ ceph_rhcs_mount_path }}" + src: "{{ ceph_rhcs_iso_path }}" fstype: iso9660 opts: ro,loop,noauto passno: 2 state: mounted - name: copy red hat storage iso content - shell: cp -r {{ ceph_stable_rh_storage_mount_path }}/* {{ ceph_stable_rh_storage_repository_path }} + shell: cp -r {{ ceph_rhcs_mount_path }}/* {{ ceph_rhcs_repository_path }} args: - creates: "{{ ceph_stable_rh_storage_repository_path }}/README" + creates: "{{ ceph_rhcs_repository_path }}/README" - name: unmount red hat storage iso file mount: - name: "{{ ceph_stable_rh_storage_mount_path }}" - src: "{{ ceph_stable_rh_storage_iso_path }}" + name: "{{ ceph_rhcs_mount_path }}" + src: "{{ ceph_rhcs_iso_path }}" fstype: iso9660 state: unmounted From dde346ff30adbf5aef522613101dec9d028c78e0 Mon Sep 17 00:00:00 2001 From: Christoph Dwertmann Date: Tue, 16 Aug 2016 15:56:40 +1000 Subject: [PATCH 24/27] Remove duplicate "max open files" --- roles/ceph-common/templates/ceph.conf.j2 | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/ceph-common/templates/ceph.conf.j2 b/roles/ceph-common/templates/ceph.conf.j2 index ebff7b59e..35de51a2d 100644 --- a/roles/ceph-common/templates/ceph.conf.j2 +++ b/roles/ceph-common/templates/ceph.conf.j2 @@ -50,7 +50,6 @@ public_network = {{ public_network }} {% if cluster_network is defined %} cluster_network = {{ cluster_network }} {% endif %} -max open files = {{ max_open_files }} {% if common_single_host_mode is defined %} osd crush chooseleaf type = 0 {% endif %} From 884e7cc26beab7a5a69542b3d7e78555f6ed785b Mon Sep 17 00:00:00 2001 From: Ivan Font Date: Tue, 16 Aug 2016 12:07:45 -0700 Subject: [PATCH 25/27] Add rbd-mirror to vagrant variables atomic sample Signed-off-by: Ivan Font --- vagrant_variables.yml.atomic | 1 + 1 file changed, 1 insertion(+) diff --git a/vagrant_variables.yml.atomic b/vagrant_variables.yml.atomic index 63ea2b227..b41136a22 100644 --- a/vagrant_variables.yml.atomic +++ b/vagrant_variables.yml.atomic @@ -8,6 +8,7 @@ osd_vms: 1 mds_vms: 0 rgw_vms: 0 nfs_vms: 0 +rbd_mirror_vms: 0 client_vms: 0 # Deploy RESTAPI on each of the Monitors From fde819d1a848260eecd38fd921754a8c44c2a87e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Wed, 17 Aug 2016 11:48:42 +0200 Subject: [PATCH 26/27] create a directory for infrastructure playbooks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since we have a couple of infrastructure related playbooks (additionnally to the roles we are using to deploy Ceph), it makes sense to have them located in a separate directory. Signed-off-by: Sébastien Han --- infrastructure-playbooks/README.md | 5 +++++ .../cluster-maintenance.yml | 0 .../cluster-os-migration.yml | 0 .../localrepo-site.yml.sample | 0 .../osd-configure.yml | 0 .../purge-cluster.yml | 0 .../purge-docker-cluster.yml | 0 .../rolling_update.yml | 0 shrink-mon.yml => infrastructure-playbooks/shrink-mon.yml | 0 shrink-osd.yml => infrastructure-playbooks/shrink-osd.yml | 0 .../take-over-existing-cluster.yml | 0 11 files changed, 5 insertions(+) create mode 100644 infrastructure-playbooks/README.md rename cluster-maintenance.yml => infrastructure-playbooks/cluster-maintenance.yml (100%) rename cluster-os-migration.yml => infrastructure-playbooks/cluster-os-migration.yml (100%) rename localrepo-site.yml.sample => infrastructure-playbooks/localrepo-site.yml.sample (100%) rename osd-configure.yml => infrastructure-playbooks/osd-configure.yml (100%) rename purge-cluster.yml => infrastructure-playbooks/purge-cluster.yml (100%) rename purge-docker-cluster.yml => infrastructure-playbooks/purge-docker-cluster.yml (100%) rename rolling_update.yml => infrastructure-playbooks/rolling_update.yml (100%) rename shrink-mon.yml => infrastructure-playbooks/shrink-mon.yml (100%) rename shrink-osd.yml => infrastructure-playbooks/shrink-osd.yml (100%) rename take-over-existing-cluster.yml => infrastructure-playbooks/take-over-existing-cluster.yml (100%) diff --git a/infrastructure-playbooks/README.md b/infrastructure-playbooks/README.md new file mode 100644 index 000000000..ba32864c0 --- /dev/null +++ b/infrastructure-playbooks/README.md @@ -0,0 +1,5 @@ +Infrastructure playbooks +======================== + +This directory contains a variety of playbooks that can be used independently of the Ceph roles we have. +They aim to perform infrastructure related tasks that would help use managing a Ceph cluster or performing certain operational tasks. diff --git a/cluster-maintenance.yml b/infrastructure-playbooks/cluster-maintenance.yml similarity index 100% rename from cluster-maintenance.yml rename to infrastructure-playbooks/cluster-maintenance.yml diff --git a/cluster-os-migration.yml b/infrastructure-playbooks/cluster-os-migration.yml similarity index 100% rename from cluster-os-migration.yml rename to infrastructure-playbooks/cluster-os-migration.yml diff --git a/localrepo-site.yml.sample b/infrastructure-playbooks/localrepo-site.yml.sample similarity index 100% rename from localrepo-site.yml.sample rename to infrastructure-playbooks/localrepo-site.yml.sample diff --git a/osd-configure.yml b/infrastructure-playbooks/osd-configure.yml similarity index 100% rename from osd-configure.yml rename to infrastructure-playbooks/osd-configure.yml diff --git a/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml similarity index 100% rename from purge-cluster.yml rename to infrastructure-playbooks/purge-cluster.yml diff --git a/purge-docker-cluster.yml b/infrastructure-playbooks/purge-docker-cluster.yml similarity index 100% rename from purge-docker-cluster.yml rename to infrastructure-playbooks/purge-docker-cluster.yml diff --git a/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml similarity index 100% rename from rolling_update.yml rename to infrastructure-playbooks/rolling_update.yml diff --git a/shrink-mon.yml b/infrastructure-playbooks/shrink-mon.yml similarity index 100% rename from shrink-mon.yml rename to infrastructure-playbooks/shrink-mon.yml diff --git a/shrink-osd.yml b/infrastructure-playbooks/shrink-osd.yml similarity index 100% rename from shrink-osd.yml rename to infrastructure-playbooks/shrink-osd.yml diff --git a/take-over-existing-cluster.yml b/infrastructure-playbooks/take-over-existing-cluster.yml similarity index 100% rename from take-over-existing-cluster.yml rename to infrastructure-playbooks/take-over-existing-cluster.yml From 91d2c3b32dc79fdd1e29feb29ce6abd12b90cd79 Mon Sep 17 00:00:00 2001 From: asbishop Date: Thu, 7 Jul 2016 15:41:53 -0400 Subject: [PATCH 27/27] Fix pre-infernalis RPM installation of ceph-radosgw For pre-infernalis installation of ceph-radosgw from RPM, run 'chkconfig' to ensure systemd's ceph-radosgw.service is created. This fixes issue #843. --- roles/ceph-rgw/tasks/pre_requisite.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/roles/ceph-rgw/tasks/pre_requisite.yml b/roles/ceph-rgw/tasks/pre_requisite.yml index 44140dc1d..c4306ed27 100644 --- a/roles/ceph-rgw/tasks/pre_requisite.yml +++ b/roles/ceph-rgw/tasks/pre_requisite.yml @@ -41,6 +41,14 @@ group: "{{ key_group }}" when: cephx +- name: ensure ceph-radosgw systemd unit file is present + command: chkconfig --add ceph-radosgw + args: + creates: /var/run/systemd/generator.late/ceph-radosgw.service + when: + - ansible_os_family == "RedHat" + - is_before_infernalis + - name: activate rados gateway with upstart file: path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/{{ item }}