ceph-ansible/roles/ceph-config/tasks/main.yml

164 lines
4.6 KiB
YAML

---
# ceph-common
- block:
- name: create ceph conf directory
file:
path: "/etc/ceph"
state: directory
owner: "ceph"
group: "ceph"
mode: "0755"
- block:
- name: count number of osds for non-lvm scenario
set_fact:
num_osds: "{{ devices | length | int }}"
when:
- devices | length > 0
- (osd_scenario == 'collocated' or osd_scenario == 'non-collocated')
- name: count number of osds for lvm scenario
set_fact:
num_osds: "{{ lvm_volumes | length | int }}"
when:
- lvm_volumes | length > 0
- osd_scenario == 'lvm'
- name: get number of osds for lvm-batch scenario
command: "ceph-volume lvm batch --report --format=json --osds-per-device osds_per_device {{ devices | join(' ') }}"
register: lvm_batch_devices
when:
- devices | length > 0
- osd_scenario == 'lvm'
- name: set_fact num_osds
set_fact:
num_osds: "{{ (lvm_batch_devices.stdout | from_json).osds | length | int }}"
when:
- devices | length > 0
- osd_scenario == 'lvm'
when:
- inventory_hostname in groups.get(osd_group_name, [])
- name: "generate ceph configuration file: {{ cluster }}.conf"
action: config_template
args:
src: ceph.conf.j2
dest: /etc/ceph/{{ cluster }}.conf
owner: "ceph"
group: "ceph"
mode: "0644"
config_overrides: "{{ ceph_conf_overrides }}"
config_type: ini
notify:
- restart ceph mons
- restart ceph osds
- restart ceph mdss
- restart ceph rgws
- restart ceph mgrs
- restart ceph rbdmirrors
- name: "ensure fetch directory exists"
run_once: true
become: false
local_action:
module: file
path: "{{ fetch_directory }}/{{ fsid }}/etc/ceph"
state: directory
mode: "0755"
when:
- ceph_conf_local
- name: "generate {{ cluster }}.conf configuration file locally"
local_action: config_template
become: false
run_once: true
args:
src: "ceph.conf.j2"
dest: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.conf"
config_overrides: "{{ ceph_conf_overrides }}"
config_type: ini
when:
- inventory_hostname in groups[mon_group_name]
- ceph_conf_local
when:
- not containerized_deployment|bool
# ceph-docker-common
# only create fetch directory when:
# we are not populating kv_store with default ceph.conf AND host is a mon
# OR
# we are not population kv_store with default ceph.conf AND there at least 1 nfs in nfs group AND host is the first nfs
- block:
- name: create a local fetch directory if it does not exist
local_action:
module: file
path: "{{ fetch_directory }}"
state: directory
changed_when: false
become: false
run_once: true
when:
- (cephx or generate_fsid)
- (not mon_containerized_default_ceph_conf_with_kv and
(inventory_hostname in groups.get(mon_group_name, []))) or
(not mon_containerized_default_ceph_conf_with_kv and
((groups.get(nfs_group_name, []) | length > 0)
and (inventory_hostname == groups.get(nfs_group_name, [])[0])))
- name: generate cluster uuid
local_action:
module: shell
python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
register: cluster_uuid
become: false
when:
- generate_fsid
- name: read cluster uuid if it already exists
local_action:
module: command
cat {{ fetch_directory }}/ceph_cluster_uuid.conf
removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
changed_when: false
register: cluster_uuid
check_mode: no
become: false
when:
- generate_fsid
- name: ensure /etc/ceph exists
file:
path: /etc/ceph
state: directory
owner: "{{ ceph_uid }}"
group: "{{ ceph_uid }}"
mode: 0755
- name: "generate {{ cluster }}.conf configuration file"
action: config_template
args:
src: "ceph.conf.j2"
dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.conf"
owner: "root"
group: "root"
mode: "0644"
config_overrides: "{{ ceph_conf_overrides }}"
config_type: ini
notify:
- restart ceph mons
- restart ceph osds
- restart ceph mdss
- restart ceph rgws
- restart ceph mgrs
- restart ceph rbdmirrors
- name: set fsid fact when generate_fsid = true
set_fact:
fsid: "{{ cluster_uuid.stdout }}"
when:
- generate_fsid
when:
- containerized_deployment|bool