ceph-ansible/roles/ceph-config/tasks/main.yml

199 lines
5.8 KiB
YAML

---
- name: include create_ceph_initial_dirs.yml
include_tasks: create_ceph_initial_dirs.yml
# ceph-common
- block:
- name: create ceph conf directory
file:
path: "/etc/ceph"
state: directory
owner: "ceph"
group: "ceph"
mode: "0755"
- block:
- name: count number of osds for ceph-disk scenarios
set_fact:
num_osds: "{{ devices | length | int }}"
when:
- devices | default([]) | length > 0
- osd_scenario in ['collocated', 'non-collocated']
- name: count number of osds for lvm scenario
set_fact:
num_osds: "{{ lvm_volumes | length | int }}"
when:
- lvm_volumes | default([]) | length > 0
- osd_scenario == 'lvm'
- name: run 'ceph-volume lvm batch --report' to see how many osds are to be created
ceph_volume:
cluster: "{{ cluster }}"
objectstore: "{{ osd_objectstore }}"
batch_devices: "{{ devices }}"
osds_per_device: "{{ osds_per_device | default(1) | int }}"
journal_size: "{{ journal_size }}"
block_db_size: "{{ block_db_size }}"
report: true
action: "batch"
register: lvm_batch_report
environment:
CEPH_VOLUME_DEBUG: 1
when:
- devices | default([]) | length > 0
- osd_scenario == 'lvm'
- name: set_fact num_osds from the output of 'ceph-volume lvm batch --report'
set_fact:
num_osds: "{{ (lvm_batch_report.stdout | from_json).osds | length | int }}"
when:
- devices | default([]) | length > 0
- osd_scenario == 'lvm'
- (lvm_batch_report.stdout | from_json).changed
- name: run 'ceph-volume lvm list' to see how many osds have already been created
ceph_volume:
action: "list"
register: lvm_list
environment:
CEPH_VOLUME_DEBUG: 1
when:
- devices | default([]) | length > 0
- osd_scenario == 'lvm'
- not (lvm_batch_report.stdout | from_json).changed
- name: set_fact num_osds from the output of 'ceph-volume lvm list'
set_fact:
num_osds: "{{ lvm_list.stdout | from_json | length | int }}"
when:
- devices | default([]) | length > 0
- osd_scenario == 'lvm'
- not (lvm_batch_report.stdout | from_json).changed
when:
- inventory_hostname in groups.get(osd_group_name, [])
- name: "generate ceph configuration file: {{ cluster }}.conf"
action: config_template
args:
src: ceph.conf.j2
dest: /etc/ceph/{{ cluster }}.conf
owner: "ceph"
group: "ceph"
mode: "0644"
config_overrides: "{{ ceph_conf_overrides }}"
config_type: ini
notify:
- restart ceph mons
- restart ceph osds
- restart ceph mdss
- restart ceph rgws
- restart ceph mgrs
- restart ceph rbdmirrors
- name: "ensure fetch directory exists"
run_once: true
become: false
file:
path: "{{ fetch_directory }}/{{ fsid }}/etc/ceph"
state: directory
mode: "0755"
delegate_to: localhost
when:
- ceph_conf_local
- name: "generate {{ cluster }}.conf configuration file locally"
config_template:
become: false
run_once: true
delegate_to: localhost
args:
src: "ceph.conf.j2"
dest: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.conf"
config_overrides: "{{ ceph_conf_overrides }}"
config_type: ini
when:
- inventory_hostname in groups[mon_group_name]
- ceph_conf_local
when:
- not containerized_deployment|bool
# ceph-container-common
# only create fetch directory when:
# we are not populating kv_store with default ceph.conf AND host is a mon
# OR
# we are not population kv_store with default ceph.conf AND there at least 1 nfs in nfs group AND host is the first nfs
- block:
- name: create a local fetch directory if it does not exist
file:
path: "{{ fetch_directory }}"
state: directory
delegate_to: localhost
changed_when: false
become: false
run_once: true
when:
- (cephx or generate_fsid)
- (not mon_containerized_default_ceph_conf_with_kv and
(inventory_hostname in groups.get(mon_group_name, []))) or
(not mon_containerized_default_ceph_conf_with_kv and
((groups.get(nfs_group_name, []) | length > 0)
and (inventory_hostname == groups.get(nfs_group_name, [])[0])))
- name: generate cluster uuid
shell: python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
args:
creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
delegate_to: localhost
register: cluster_uuid
become: false
when:
- generate_fsid
- name: read cluster uuid if it already exists
command: "cat {{ fetch_directory }}/ceph_cluster_uuid.conf"
args:
removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
delegate_to: localhost
changed_when: false
register: cluster_uuid
check_mode: no
become: false
when:
- generate_fsid
- name: ensure /etc/ceph exists
file:
path: /etc/ceph
state: directory
owner: "{{ ceph_uid }}"
group: "{{ ceph_uid }}"
mode: 0755
- name: "generate {{ cluster }}.conf configuration file"
action: config_template
args:
src: "ceph.conf.j2"
dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.conf"
owner: "root"
group: "root"
mode: "0644"
config_overrides: "{{ ceph_conf_overrides }}"
config_type: ini
notify:
- restart ceph mons
- restart ceph osds
- restart ceph mdss
- restart ceph rgws
- restart ceph mgrs
- restart ceph rbdmirrors
- name: set fsid fact when generate_fsid = true
set_fact:
fsid: "{{ cluster_uuid.stdout }}"
when:
- generate_fsid
when:
- containerized_deployment|bool