--- # ceph-common - block: - name: create ceph conf directory file: path: "/etc/ceph" state: directory owner: "ceph" group: "ceph" mode: "0755" - block: - name: count number of osds for ceph-disk scenarios set_fact: num_osds: "{{ devices | length | int }}" when: - devices | default([]) | length > 0 - osd_scenario in ['collocated', 'non-collocated'] - name: count number of osds for lvm scenario set_fact: num_osds: "{{ lvm_volumes | length | int }}" when: - lvm_volumes | default([]) | length > 0 - osd_scenario == 'lvm' # This is a best guess. Ideally we'd like to use `ceph-volume lvm batch --report` to get # a more accurate number but the ceph.conf needs to be in place before that is possible. # There is a tracker to add functionality to ceph-volume which would allow doing this # without the need for a ceph.conf: http://tracker.ceph.com/issues/36088 - name: count number of osds for lvm batch scenario set_fact: num_osds: "{{ devices | length | int * osds_per_device | default(1) }}" when: - devices | default([]) | length > 0 - osd_scenario == 'lvm' when: - inventory_hostname in groups.get(osd_group_name, []) - name: "generate ceph configuration file: {{ cluster }}.conf" action: config_template args: src: ceph.conf.j2 dest: /etc/ceph/{{ cluster }}.conf owner: "ceph" group: "ceph" mode: "0644" config_overrides: "{{ ceph_conf_overrides }}" config_type: ini notify: - restart ceph mons - restart ceph osds - restart ceph mdss - restart ceph rgws - restart ceph mgrs - restart ceph rbdmirrors - name: "ensure fetch directory exists" run_once: true become: false local_action: module: file path: "{{ fetch_directory }}/{{ fsid }}/etc/ceph" state: directory mode: "0755" when: - ceph_conf_local - name: "generate {{ cluster }}.conf configuration file locally" local_action: config_template become: false run_once: true args: src: "ceph.conf.j2" dest: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.conf" config_overrides: "{{ ceph_conf_overrides }}" config_type: ini when: - inventory_hostname in groups[mon_group_name] - ceph_conf_local when: - not containerized_deployment|bool # ceph-docker-common # only create fetch directory when: # we are not populating kv_store with default ceph.conf AND host is a mon # OR # we are not population kv_store with default ceph.conf AND there at least 1 nfs in nfs group AND host is the first nfs - block: - name: create a local fetch directory if it does not exist local_action: module: file path: "{{ fetch_directory }}" state: directory changed_when: false become: false run_once: true when: - (cephx or generate_fsid) - (not mon_containerized_default_ceph_conf_with_kv and (inventory_hostname in groups.get(mon_group_name, []))) or (not mon_containerized_default_ceph_conf_with_kv and ((groups.get(nfs_group_name, []) | length > 0) and (inventory_hostname == groups.get(nfs_group_name, [])[0]))) - name: generate cluster uuid local_action: module: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf" register: cluster_uuid become: false when: - generate_fsid - name: read cluster uuid if it already exists local_action: module: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf" changed_when: false register: cluster_uuid check_mode: no become: false when: - generate_fsid - name: ensure /etc/ceph exists file: path: /etc/ceph state: directory owner: "{{ ceph_uid }}" group: "{{ ceph_uid }}" mode: 0755 - name: "generate {{ cluster }}.conf configuration file" action: config_template args: src: "ceph.conf.j2" dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.conf" owner: "root" group: "root" mode: "0644" config_overrides: "{{ ceph_conf_overrides }}" config_type: ini notify: - restart ceph mons - restart ceph osds - restart ceph mdss - restart ceph rgws - restart ceph mgrs - restart ceph rbdmirrors - name: set fsid fact when generate_fsid = true set_fact: fsid: "{{ cluster_uuid.stdout }}" when: - generate_fsid when: - containerized_deployment|bool