ceph-ansible/roles/ceph-config/tasks/main.yml

154 lines
5.0 KiB
YAML
Raw Normal View History

---
- name: include create_ceph_initial_dirs.yml
include_tasks: create_ceph_initial_dirs.yml
when: containerized_deployment | bool
- name: config file operations related to OSDs
when:
- inventory_hostname in groups.get(osd_group_name, [])
# the rolling_update.yml playbook sets num_osds to the number of currently
# running osds
- not rolling_update | bool
block:
- name: count number of osds for lvm scenario
set_fact:
num_osds: "{{ lvm_volumes | length | int }}"
when: lvm_volumes | default([]) | length > 0
- name: run 'ceph-volume lvm batch --report' to see how many osds are to be created
ceph_volume:
cluster: "{{ cluster }}"
objectstore: "{{ osd_objectstore }}"
batch_devices: "{{ devices }}"
osds_per_device: "{{ osds_per_device | default(1) | int }}"
journal_size: "{{ journal_size }}"
block_db_size: "{{ block_db_size }}"
report: true
action: "batch"
register: lvm_batch_report
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
PYTHONIOENCODING: utf-8
changed_when: false
when: devices | default([]) | length > 0
- name: set_fact num_osds from the output of 'ceph-volume lvm batch --report'
set_fact:
num_osds: "{{ (lvm_batch_report.stdout | from_json).osds | length | int }}"
when:
- devices | default([]) | length > 0
- (lvm_batch_report.stdout | from_json).changed
- name: run 'ceph-volume lvm list' to see how many osds have already been created
ceph_volume:
action: "list"
register: lvm_list
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
PYTHONIOENCODING: utf-8
changed_when: false
when:
- devices | default([]) | length > 0
- not (lvm_batch_report.stdout | from_json).changed
- name: set_fact num_osds from the output of 'ceph-volume lvm list'
set_fact:
num_osds: "{{ lvm_list.stdout | from_json | length | int }}"
when:
- devices | default([]) | length > 0
- not (lvm_batch_report.stdout | from_json).changed
# ceph-common
- name: config file operation for non-containerized scenarios
when: not containerized_deployment | bool
block:
- name: create ceph conf directory
file:
path: "/etc/ceph"
state: directory
owner: "ceph"
group: "ceph"
mode: "{{ ceph_directories_mode | default('0755') }}"
- name: "generate ceph configuration file: {{ cluster }}.conf"
action: config_template
args:
src: ceph.conf.j2
dest: /etc/ceph/{{ cluster }}.conf
owner: "ceph"
group: "ceph"
mode: "0644"
config_overrides: "{{ ceph_conf_overrides }}"
config_type: ini
notify:
- restart ceph mons
- restart ceph osds
- restart ceph mdss
- restart ceph rgws
- restart ceph mgrs
- restart ceph rbdmirrors
- restart ceph rbd-target-api-gw
- name: "ensure fetch directory exists"
run_once: true
become: false
file:
path: "{{ fetch_directory }}/{{ fsid }}/etc/ceph"
state: directory
mode: "{{ ceph_directories_mode | default('0755') }}"
delegate_to: localhost
when: ceph_conf_local | bool
- name: "generate {{ cluster }}.conf configuration file locally"
config_template:
become: false
run_once: true
delegate_to: localhost
args:
src: "ceph.conf.j2"
dest: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.conf"
config_overrides: "{{ ceph_conf_overrides }}"
config_type: ini
when:
- inventory_hostname in groups[mon_group_name]
- ceph_conf_local | bool
- name: config file operations for containerized scenarios
when: containerized_deployment | bool
block:
- name: create a local fetch directory if it does not exist
file:
syntax: change local_action syntax Use a nicer syntax for `local_action` tasks. We used to have oneliner like this: ``` local_action: wait_for port=22 host={{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} state=started delay=10 timeout=500 }} ``` The usual syntax: ``` local_action: module: wait_for port: 22 host: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" state: started delay: 10 timeout: 500 ``` is nicer and kind of way to keep consistency regarding the whole playbook. This also fix a potential issue about missing quotation : ``` Traceback (most recent call last): File "/tmp/ansible_wQtWsi/ansible_module_command.py", line 213, in <module> main() File "/tmp/ansible_wQtWsi/ansible_module_command.py", line 185, in main rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None, data=stdin) File "/tmp/ansible_wQtWsi/ansible_modlib.zip/ansible/module_utils/basic.py", line 2710, in run_command File "/usr/lib64/python2.7/shlex.py", line 279, in split return list(lex) File "/usr/lib64/python2.7/shlex.py", line 269, in next token = self.get_token() File "/usr/lib64/python2.7/shlex.py", line 96, in get_token raw = self.read_token() File "/usr/lib64/python2.7/shlex.py", line 172, in read_token raise ValueError, "No closing quotation" ValueError: No closing quotation ``` writing `local_action: shell echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf` can cause trouble because it's complaining with missing quotes, this fix solves this issue. Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1510555 Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
2018-01-31 16:23:28 +08:00
path: "{{ fetch_directory }}"
state: directory
delegate_to: localhost
changed_when: false
become: false
run_once: true
when:
- (cephx or generate_fsid) | bool
- ((inventory_hostname in groups.get(mon_group_name, [])) or
(groups.get(nfs_group_name, []) | length > 0) and inventory_hostname == groups.get(nfs_group_name, [])[0])
- name: "generate {{ cluster }}.conf configuration file"
action: config_template
args:
src: "ceph.conf.j2"
dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.conf"
owner: "root"
group: "root"
mode: "0644"
config_overrides: "{{ ceph_conf_overrides }}"
config_type: ini
notify:
- restart ceph mons
- restart ceph osds
- restart ceph mdss
- restart ceph rgws
- restart ceph mgrs
- restart ceph rbdmirrors
- restart ceph rbd-target-api-gw