common: do not run tasks in main.yml, use include

For readibility and clarity we do not run any tasks directly in the
main.yml file. This file should only contain include, which helps us
later to apply conditionnals if we want to.

Signed-off-by: Sébastien Han <seb@redhat.com>
pull/1158/head
Sébastien Han 2016-12-09 14:51:35 +01:00
parent 1de8176bf4
commit 189f4fee47
9 changed files with 231 additions and 217 deletions

2
.gitignore vendored
View File

@ -12,7 +12,7 @@ group_vars/restapis
group_vars/agent
group_vars/*.yml
*.DS_Store
*.yml
/*.yml
*.pyc
*.sw?
.tox

View File

@ -0,0 +1,14 @@
---
- name: check for a ceph socket
shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1"
changed_when: false
failed_when: false
always_run: true
register: socket
- name: check for a rados gateway socket
shell: "stat {{ rbd_client_admin_socket_path }}*.asok > /dev/null 2>&1"
changed_when: false
failed_when: false
always_run: true
register: socketrgw

View File

@ -0,0 +1,47 @@
---
- name: configure cluster name
lineinfile:
dest: /etc/sysconfig/ceph
insertafter: EOF
create: yes
line: "CLUSTER={{ cluster }}"
when: ansible_os_family == "RedHat"
# NOTE(leseb): we are performing the following check
# to ensure any Jewel installation will not fail.
# The following commit https://github.com/ceph/ceph/commit/791eba81a5467dd5de4f1680ed0deb647eb3fb8b
# fixed a package issue where the path was the wrong.
# This bug is not yet on all the distros package so we are working around it
# Impacted versions:
# - Jewel from UCA: https://bugs.launchpad.net/ubuntu/+source/ceph/+bug/1582773
# - Jewel from latest Canonical 16.04 distro
# - All previous versions from Canonical
# - Infernalis from ceph.com
- name: check /etc/default/ceph exist
stat:
path: /etc/default/ceph
register: etc_default_ceph
always_run: true
when: ansible_os_family == "Debian"
- name: configure cluster name
lineinfile:
dest: /etc/default/ceph
insertafter: EOF
create: yes
line: "CLUSTER={{ cluster }}"
when:
- ansible_os_family == "Debian"
- etc_default_ceph.stat.exists
- not etc_default_ceph.stat.isdir
- name: configure cluster name
lineinfile:
dest: /etc/default/ceph/ceph
insertafter: EOF
create: yes
line: "CLUSTER={{ cluster }}"
when:
- ansible_os_family == "Debian"
- etc_default_ceph.stat.exists
- etc_default_ceph.stat.isdir

View File

@ -0,0 +1,12 @@
---
- name: create rbd client directory
file:
path: "{{ item }}"
state: directory
owner: "{{ rbd_client_directory_owner }}"
group: "{{ rbd_client_directory_group }}"
mode: "{{ rbd_client_directory_mode }}"
with_items:
- "{{ rbd_client_admin_socket_path }}"
- "{{ rbd_client_log_path }}"
when: rbd_client_directories

View File

@ -40,3 +40,82 @@
- set_fact:
mds_name: "{{ ansible_fqdn }}"
when: mds_use_fqdn
- set_fact:
dir_owner: ceph
dir_group: ceph
dir_mode: "0755"
when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- set_fact:
dir_owner: root
dir_group: root
dir_mode: "0755"
when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- set_fact:
key_owner: root
key_group: root
key_mode: "0600"
when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- set_fact:
key_owner: ceph
key_group: ceph
key_mode: "0600"
when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- set_fact:
activate_file_owner: ceph
activate_file_group: ceph
activate_file_mode: "0644"
when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- set_fact:
activate_file_owner: root
activate_file_group: root
activate_file_mode: "0644"
when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- set_fact:
rbd_client_directory_owner: root
when:
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- rbd_client_directory_owner is not defined
or not rbd_client_directory_owner
- set_fact:
rbd_client_directory_owner: ceph
when:
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- rbd_client_directory_owner is not defined
or not rbd_client_directory_owner
- set_fact:
rbd_client_directory_group: root
when:
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- rbd_client_directory_group is not defined
or not rbd_client_directory_group
- set_fact:
rbd_client_directory_group: ceph
when:
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- rbd_client_directory_group is not defined
or not rbd_client_directory_group
- set_fact:
rbd_client_directory_mode: "1777"
when:
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- rbd_client_directory_mode is not defined
or not rbd_client_directory_mode
- set_fact:
rbd_client_directory_mode: "0770"
when:
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- rbd_client_directory_mode is not defined
or not rbd_client_directory_mode

View File

@ -0,0 +1,34 @@
---
- name: create ceph conf directory
file:
path: /etc/ceph
state: directory
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ dir_mode }}"
- name: generate ceph configuration file
action: config_template
args:
src: ceph.conf.j2
dest: /etc/ceph/{{ cluster }}.conf
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ activate_file_mode }}"
config_overrides: "{{ ceph_conf_overrides }}"
config_type: ini
notify:
- restart ceph mons
- restart ceph mons on ubuntu
- restart ceph mons with systemd
- restart ceph osds
- restart ceph osds on ubuntu
- restart ceph osds with systemd
- restart ceph mdss
- restart ceph mdss on ubuntu
- restart ceph mdss with systemd
- restart ceph rgws
- restart ceph rgws on ubuntu
- restart ceph rgws on red hat
- restart ceph rgws with systemd
- restart ceph nfss

View File

@ -0,0 +1,31 @@
---
- name: create a local fetch directory if it does not exist
local_action: file path={{ fetch_directory }} state=directory
changed_when: false
become: false
run_once: true
when: cephx or generate_fsid
- name: generate cluster fsid
local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
register: cluster_uuid
become: false
when:
- generate_fsid
- ceph_current_fsid.rc != 0
- name: reuse cluster fsid when cluster is already running
local_action: shell echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
become: false
when: ceph_current_fsid.rc == 0
- name: read cluster fsid if it already exists
local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
changed_when: false
register: cluster_uuid
become: false
always_run: true
when: generate_fsid

View File

@ -86,219 +86,8 @@
static: False
- include: facts.yml
- set_fact:
dir_owner: ceph
dir_group: ceph
dir_mode: "0755"
when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- set_fact:
dir_owner: root
dir_group: root
dir_mode: "0755"
when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- set_fact:
key_owner: root
key_group: root
key_mode: "0600"
when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- set_fact:
key_owner: ceph
key_group: ceph
key_mode: "0600"
when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- set_fact:
activate_file_owner: ceph
activate_file_group: ceph
activate_file_mode: "0644"
when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- set_fact:
activate_file_owner: root
activate_file_group: root
activate_file_mode: "0644"
when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- set_fact:
rbd_client_directory_owner: root
when:
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- rbd_client_directory_owner is not defined
or not rbd_client_directory_owner
- set_fact:
rbd_client_directory_owner: ceph
when:
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- rbd_client_directory_owner is not defined
or not rbd_client_directory_owner
- set_fact:
rbd_client_directory_group: root
when:
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- rbd_client_directory_group is not defined
or not rbd_client_directory_group
- set_fact:
rbd_client_directory_group: ceph
when:
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- rbd_client_directory_group is not defined
or not rbd_client_directory_group
- set_fact:
rbd_client_directory_mode: "1777"
when:
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- rbd_client_directory_mode is not defined
or not rbd_client_directory_mode
- set_fact:
rbd_client_directory_mode: "0770"
when:
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- rbd_client_directory_mode is not defined
or not rbd_client_directory_mode
- name: check for a ceph socket
shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1"
changed_when: false
failed_when: false
always_run: true
register: socket
- name: check for a rados gateway socket
shell: "stat {{ rbd_client_admin_socket_path }}*.asok > /dev/null 2>&1"
changed_when: false
failed_when: false
always_run: true
register: socketrgw
- name: create a local fetch directory if it does not exist
local_action: file path={{ fetch_directory }} state=directory
changed_when: false
become: false
run_once: true
when: cephx or generate_fsid
- name: generate cluster fsid
local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
register: cluster_uuid
become: false
when:
- generate_fsid
- ceph_current_fsid.rc != 0
- name: reuse cluster fsid when cluster is already running
local_action: shell echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
become: false
when: ceph_current_fsid.rc == 0
- name: read cluster fsid if it already exists
local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
changed_when: false
register: cluster_uuid
become: false
always_run: true
when: generate_fsid
- name: create ceph conf directory
file:
path: /etc/ceph
state: directory
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ dir_mode }}"
- name: generate ceph configuration file
action: config_template
args:
src: ceph.conf.j2
dest: /etc/ceph/{{ cluster }}.conf
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ activate_file_mode }}"
config_overrides: "{{ ceph_conf_overrides }}"
config_type: ini
notify:
- restart ceph mons
- restart ceph mons on ubuntu
- restart ceph mons with systemd
- restart ceph osds
- restart ceph osds on ubuntu
- restart ceph osds with systemd
- restart ceph mdss
- restart ceph mdss on ubuntu
- restart ceph mdss with systemd
- restart ceph rgws
- restart ceph rgws on ubuntu
- restart ceph rgws on red hat
- restart ceph rgws with systemd
- restart ceph nfss
- name: create rbd client directory
file:
path: "{{ item }}"
state: directory
owner: "{{ rbd_client_directory_owner }}"
group: "{{ rbd_client_directory_group }}"
mode: "{{ rbd_client_directory_mode }}"
with_items:
- "{{ rbd_client_admin_socket_path }}"
- "{{ rbd_client_log_path }}"
when: rbd_client_directories
- name: configure cluster name
lineinfile:
dest: /etc/sysconfig/ceph
insertafter: EOF
create: yes
line: "CLUSTER={{ cluster }}"
when: ansible_os_family == "RedHat"
# NOTE(leseb): we are performing the following check
# to ensure any Jewel installation will not fail.
# The following commit https://github.com/ceph/ceph/commit/791eba81a5467dd5de4f1680ed0deb647eb3fb8b
# fixed a package issue where the path was the wrong.
# This bug is not yet on all the distros package so we are working around it
# Impacted versions:
# - Jewel from UCA: https://bugs.launchpad.net/ubuntu/+source/ceph/+bug/1582773
# - Jewel from latest Canonical 16.04 distro
# - All previous versions from Canonical
# - Infernalis from ceph.com
- name: check /etc/default/ceph exist
stat:
path: /etc/default/ceph
register: etc_default_ceph
always_run: true
when: ansible_os_family == "Debian"
- name: configure cluster name
lineinfile:
dest: /etc/default/ceph
insertafter: EOF
create: yes
line: "CLUSTER={{ cluster }}"
when:
- ansible_os_family == "Debian"
- etc_default_ceph.stat.exists
- not etc_default_ceph.stat.isdir
- name: configure cluster name
lineinfile:
dest: /etc/default/ceph/ceph
insertafter: EOF
create: yes
line: "CLUSTER={{ cluster }}"
when:
- ansible_os_family == "Debian"
- etc_default_ceph.stat.exists
- etc_default_ceph.stat.isdir
- include: ./checks/check_socket.yml
- include: generate_cluster_fsid.yml
- include: generate_ceph_conf.yml
- include: create_rbd_client_dir.yml
- include: configure_cluster_name.yml

View File

@ -0,0 +1,8 @@
---
- name: make sure monitor_interface or monitor_address or monitor_address_block is configured
fail:
msg: "Either monitor_interface, monitor_address, or monitor_address_block must be configured. Interface for the monitor to listen on or IP address of that interface"
when:
- monitor_interface == 'interface'
- monitor_address == '0.0.0.0'
- not monitor_address_block