Unify formatting of when conditional

This is purely a refactor. Converts when 'and' conditionals into lists
rather than multiline strings. This does not work for nested
conditionals, but those can be formated with indents.

Moves one line when statements onto the same line as the when command
itself.

A small logic bug was found in ceph-osd/tasks/check_devices.yml which
which was also fixed.

Signed-off-by: Sam Yaple <sam@yaple.net>
pull/749/head
Sam Yaple 2016-05-09 14:08:33 +00:00
parent 4cf2d98de3
commit 069c93a238
44 changed files with 477 additions and 511 deletions

View File

@ -3,8 +3,7 @@
command: subscription-manager identity
register: subscription
changed_when: false
when:
ansible_os_family == 'RedHat'
when: ansible_os_family == 'RedHat'
- name: install dependencies
apt:

View File

@ -6,36 +6,36 @@
- name: restart ceph mons
command: service ceph restart mon
when:
socket.rc == 0 and
ansible_distribution != 'Ubuntu' and
mon_group_name in group_names and
is_before_infernalis
- socket.rc == 0
- ansible_distribution != 'Ubuntu'
- mon_group_name in group_names
- is_before_infernalis
- name: restart ceph mons with systemd
service:
name: ceph-mon@{{ monitor_name }}
state: restarted
when:
socket.rc == 0 and
use_systemd and
mon_group_name in group_names and
is_after_hammer
- socket.rc == 0
- use_systemd
- mon_group_name in group_names
- is_after_hammer
- name: restart ceph mons on ubuntu
command: initctl restart ceph-mon cluster={{ cluster }} id={{ monitor_name }}
when:
socket.rc == 0 and
ansible_distribution == 'Ubuntu' and
not use_systemd and
mon_group_name in group_names
- socket.rc == 0
- ansible_distribution == 'Ubuntu'
- not use_systemd
- mon_group_name in group_names
- name: restart ceph osds
command: service ceph restart osd
when:
socket.rc == 0 and
ansible_distribution != 'Ubuntu' and
osd_group_name in group_names and
is_before_infernalis
- socket.rc == 0
- ansible_distribution != 'Ubuntu'
- osd_group_name in group_names
- is_before_infernalis
# This does not just restart OSDs but everything else too. Unfortunately
# at this time the ansible role does not have an OSD id list to use
@ -45,10 +45,10 @@
name: ceph.target
state: restarted
when:
socket.rc == 0 and
use_systemd and
osd_group_name in group_names and
is_after_hammer
- socket.rc == 0
- use_systemd
- osd_group_name in group_names
- is_after_hammer
- name: restart ceph osds on ubuntu
shell: |
@ -56,71 +56,73 @@
initctl restart ceph-osd cluster={{ cluster }} id=$id
done
when:
socket.rc == 0 and
ansible_distribution == 'Ubuntu' and
not use_systemd and
osd_group_name in group_names
- socket.rc == 0
- ansible_distribution == 'Ubuntu'
- not use_systemd
- osd_group_name in group_names
- name: restart ceph mdss on ubuntu
command: initctl restart ceph-mds cluster={{ cluster }} id={{ ansible_hostname }}
when:
socket.rc == 0 and
ansible_distribution == 'Ubuntu' and
not use_systemd and
mds_group_name in group_names
- socket.rc == 0
- ansible_distribution == 'Ubuntu'
- not use_systemd
- mds_group_name in group_names
- name: restart ceph mdss
command: service ceph restart mds
when:
socket.rc == 0 and
ansible_distribution != 'Ubuntu' and
use_systemd and
mds_group_name in group_names and
is_before_infernalis
- socket.rc == 0
- ansible_distribution != 'Ubuntu'
- use_systemd
- mds_group_name in group_names
- ceph_stable
- ceph_stable_release in ceph_stable_releases
- name: restart ceph mdss with systemd
service:
name: ceph-mds@{{ mds_name }}
state: restarted
when:
socket.rc == 0 and
use_systemd and
mds_group_name in group_names and
is_after_hammer
- socket.rc == 0
- use_systemd
- mds_group_name in group_names
- is_after_hammer
- name: restart ceph rgws on ubuntu
command: initctl restart radosgw cluster={{ cluster }} id=rgw.{{ ansible_hostname }}
when:
socketrgw.rc == 0 and
ansible_distribution == 'Ubuntu' and
not use_systemd and
rgw_group_name in group_names
- socketrgw.rc == 0
- ansible_distribution == 'Ubuntu'
- not use_systemd
- rgw_group_name in group_names
- name: restart ceph rgws
command: /etc/init.d/radosgw restart
when:
socketrgw.rc == 0 and
ansible_distribution != 'Ubuntu' and
rgw_group_name in group_names and
is_before_infernalis
- socketrgw.rc == 0
- ansible_distribution != 'Ubuntu'
- rgw_group_name in group_names
- is_before_infernalis
- name: restart ceph rgws on red hat
command: /etc/init.d/ceph-radosgw restart
when:
socketrgw.rc == 0 and
ansible_os_family == 'RedHat' and
rgw_group_name in group_names and
is_before_infernalis
- socketrgw.rc == 0
- ansible_os_family == 'RedHat'
- rgw_group_name in group_names
- is_before_infernalis
- name: restart ceph rgws with systemd
service:
name: ceph-rgw@{{ ansible_hostname }}
state: restarted
when:
socketrgw.rc == 0 and
use_systemd and
rgw_group_name in group_names and
is_after_hammer
- socketrgw.rc == 0
- use_systemd
- ansible_distribution != 'Ubuntu'
- rgw_group_name in group_names
- is_after_hammer
- name: restart apache2
service:
@ -128,8 +130,8 @@
state: restarted
enabled: yes
when:
ansible_os_family == 'Debian' and
rgw_group_name in group_names
- ansible_os_family == 'Debian'
- rgw_group_name in group_names
- name: restart apache2
service:
@ -137,5 +139,5 @@
state: restarted
enabled: yes
when:
ansible_os_family == 'RedHat' and
rgw_group_name in group_names
- ansible_os_family == 'RedHat'
- rgw_group_name in group_names

View File

@ -10,8 +10,8 @@
debug:
msg: "nmap is not installed, can not test if ceph ports are allowed :("
when:
check_firewall and
nmapexist.rc != 0
- check_firewall
- nmapexist.rc != 0
- name: check if monitor port is not filtered
local_action: shell set -o pipefail && nmap -p 6789 {{ item }} {{ hostvars[item]['ansible_' + monitor_interface]['ipv4']['address'] }} | grep -sqo filtered
@ -20,20 +20,20 @@
with_items: groups.{{ mon_group_name }}
register: monportstate
when:
check_firewall and
mon_group_name in group_names and
nmapexist.rc == 0
- check_firewall
- mon_group_name in group_names
- nmapexist.rc == 0
- name: fail if monitor port is filtered
fail:
msg: "Please allow port 6789 on your firewall"
with_items: monportstate.results
when:
check_firewall and
item.rc == 0 and
mon_group_name is defined and
mon_group_name in group_names and
nmapexist.rc == 0
- check_firewall
- item.has_key('rc') and item.rc == 0
- mon_group_name is defined
- mon_group_name in group_names
- nmapexist.rc == 0
- name: check if osd and mds range is not filtered
local_action: shell set -o pipefail && nmap -p 6800-7300 {{ item }} {{ hostvars[item]['ansible_default_ipv4']['address'] }} | grep -sqo filtered
@ -42,20 +42,20 @@
with_items: groups.{{ osd_group_name }}
register: osdrangestate
when:
check_firewall and
osd_group_name in group_names and
nmapexist.rc == 0
- check_firewall
- osd_group_name in group_names
- nmapexist.rc == 0
- name: fail if osd and mds range is filtered (osd hosts)
fail:
msg: "Please allow range from 6800 to 7300 on your firewall"
with_items: osdrangestate.results
when:
check_firewall and
item.rc == 0 and
osd_group_name is defined and
osd_group_name in group_names and
nmapexist.rc == 0
- check_firewall
- item.has_key('rc') and item.rc == 0
- osd_group_name is defined
- osd_group_name in group_names
- nmapexist.rc == 0
- name: check if osd and mds range is not filtered
local_action: shell set -o pipefail && nmap -p 6800-7300 {{ item }} {{ hostvars[item]['ansible_default_ipv4']['address'] }} | grep -sqo filtered
@ -64,20 +64,20 @@
with_items: groups.{{ mds_group_name }}
register: mdsrangestate
when:
check_firewall and
mds_group_name in group_names and
nmapexist.rc == 0
- check_firewall
- mds_group_name in group_names
- nmapexist.rc == 0
- name: fail if osd and mds range is filtered (mds hosts)
fail:
msg: "Please allow range from 6800 to 7300 on your firewall"
with_items: mdsrangestate.results
when:
check_firewall and
item.rc == 0 and
mds_group_name is defined and
mds_group_name in group_names and
nmapexist.rc == 0
- check_firewall
- item.has_key('rc') and item.rc == 0
- mds_group_name is defined
- mds_group_name in group_names
- nmapexist.rc == 0
- name: check if rados gateway port is not filtered
local_action: shell set -o pipefail && nmap -p {{ radosgw_civetweb_port }} {{ item }} {{ hostvars[item]['ansible_default_ipv4']['address'] }} | grep -sqo filtered
@ -86,17 +86,17 @@
with_items: groups.{{ rgw_group_name }}
register: rgwportstate
when:
check_firewall and
rgw_group_name in group_names and
nmapexist.rc == 0
- check_firewall
- rgw_group_name in group_names
- nmapexist.rc == 0
- name: fail if rados gateway port is filtered
fail:
msg: "Please allow port {{ radosgw_civetweb_port }} on your firewall"
with_items: rgwportstate.results
when:
check_firewall and
item.rc == 0 and
rgw_group_name is defined and
rgw_group_name in group_names and
nmapexist.rc == 0
- check_firewall
- item.has_key('rc') and item.rc == 0
- rgw_group_name is defined
- rgw_group_name in group_names
- nmapexist.rc == 0

View File

@ -3,8 +3,8 @@
fail:
msg: "choose an installation origin"
when:
ceph_origin != 'upstream' and
ceph_origin != 'distro'
- ceph_origin != 'upstream'
- ceph_origin != 'distro'
tags:
- package-install
@ -12,12 +12,12 @@
fail:
msg: "choose an upstream installation source or read https://github.com/ceph/ceph-ansible/wiki"
when:
ceph_origin == 'upstream' and
not ceph_stable and
not ceph_dev and
not ceph_stable_ice and
not ceph_stable_rh_storage and
not ceph_stable_uca
- ceph_origin == 'upstream'
- not ceph_stable
- not ceph_dev
- not ceph_stable_ice
- not ceph_stable_rh_storage
- not ceph_stable_uca
tags:
- package-install
@ -25,10 +25,10 @@
fail:
msg: "choose between ceph_stable_rh_storage_cdn_install and ceph_stable_rh_storage_iso_install"
when:
ceph_stable_rh_storage and
not ceph_stable_rh_storage_cdn_install and
not ceph_stable_rh_storage_iso_install and
ceph_origin == "upstream"
- ceph_stable_rh_storage
- not ceph_stable_rh_storage_cdn_install
- not ceph_stable_rh_storage_iso_install
- ceph_origin == "upstream"
tags:
- package-install
@ -36,81 +36,81 @@
fail:
msg: "journal_size must be configured. See http://ceph.com/docs/master/rados/configuration/osd-config-ref/"
when:
journal_size|int == 0 and
osd_objectstore != 'bluestore' and
osd_group_name in group_names
- journal_size|int == 0
- osd_objectstore != 'bluestore'
- osd_group_name in group_names
- name: make sure monitor_interface or monitor_address is configured
fail:
msg: "monitor_interface or monitor_address must be configured. Interface for the monitor to listen on or IP address of that interface"
when:
monitor_interface == 'interface' and
monitor_address == '0.0.0.0' and
mon_group_name in group_names
- monitor_interface == 'interface'
- monitor_address == '0.0.0.0'
- mon_group_name in group_names
- name: make sure cluster_network configured
fail:
msg: "cluster_network must be configured. Ceph replication network"
when:
cluster_network == '0.0.0.0/0' and
osd_group_name in group_names
- cluster_network == '0.0.0.0/0'
- osd_group_name in group_names
- name: make sure public_network configured
fail:
msg: "public_network must be configured. Ceph public network"
when:
public_network == '0.0.0.0/0' and
osd_group_name in group_names
- public_network == '0.0.0.0/0'
- osd_group_name in group_names
- name: make sure an osd scenario was chosen
fail:
msg: "please choose an osd scenario"
when:
osd_group_name is defined and
osd_group_name in group_names and
not journal_collocation and
not raw_multi_journal and
not osd_directory and
not bluestore
- osd_group_name is defined
- osd_group_name in group_names
- not journal_collocation
- not raw_multi_journal
- not osd_directory
- not bluestore
- name: verify only one osd scenario was chosen
fail:
msg: "please select only one osd scenario"
when:
osd_group_name is defined and
osd_group_name in group_names and
((journal_collocation and raw_multi_journal) or
(journal_collocation and osd_directory) or
(raw_multi_journal and osd_directory) or
(bluestore and journal_collocation) or
(bluestore and raw_multi_journal) or
(bluestore and osd_directory))
- osd_group_name is defined
- osd_group_name in group_names
- (journal_collocation and raw_multi_journal)
or (journal_collocation and osd_directory)
or (raw_multi_journal and osd_directory)
or (bluestore and journal_collocation)
or (bluestore and raw_multi_journal)
or (bluestore and osd_directory)
- name: verify devices have been provided
fail:
msg: "please provide devices to your osd scenario"
when:
osd_group_name is defined and
osd_group_name in group_names and
journal_collocation and
not osd_auto_discovery and
devices is not defined
- osd_group_name is defined
- osd_group_name in group_names
- journal_collocation
- not osd_auto_discovery
- devices is not defined
- name: verify journal devices have been provided
fail:
msg: "please provide devices to your osd scenario"
when:
osd_group_name is defined and
osd_group_name in group_names and
raw_multi_journal and
(raw_journal_devices is not defined or
devices is not defined)
- osd_group_name is defined
- osd_group_name in group_names
- raw_multi_journal
- raw_journal_devices is not defined
or devices is not defined
- name: verify directories have been provided
fail:
msg: "please provide directories to your osd scenario"
when:
osd_group_name is defined and
osd_group_name in group_names and
osd_directory and
osd_directories is not defined
- osd_group_name is defined
- osd_group_name in group_names
- osd_directory
- osd_directories is not defined

View File

@ -18,8 +18,8 @@
fail:
msg: "Distribution not supported {{ ansible_distribution_version }} by Red Hat Storage, only RHEL 7.1"
when:
ceph_stable_rh_storage and
{{ ansible_distribution_version | version_compare('7.1', '<') }}
- ceph_stable_rh_storage
- ansible_distribution_version | version_compare('7.1', '<')
- name: fail on unsupported distribution for ubuntu cloud archive
fail:
@ -32,5 +32,5 @@
fail:
msg: "Ansible version must be >= 1.9, please update!"
when:
ansible_version.major|int == 1 and
ansible_version.minor|int < 9
- ansible_version.major|int == 1
- ansible_version.minor|int < 9

View File

@ -1,5 +1,4 @@
---
- name: get ceph version
command: ceph --version
changed_when: false

View File

@ -1,5 +1,4 @@
---
- name: install the ceph repository stable key
apt_key:
data: "{{ lookup('file', role_path+'/files/cephstable.asc') }}"

View File

@ -35,13 +35,11 @@
pkg: radosgw
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
update_cache: yes
when:
rgw_group_name in group_names
when: rgw_group_name in group_names
- name: install ceph mds
apt:
pkg: ceph-mds
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
default_release: "{{ ceph_stable_release_uca | default(ansible_distribution_release) }}{{ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else ''}}"
when:
mds_group_name in group_names
when: mds_group_name in group_names

View File

@ -5,8 +5,8 @@
state: present
with_items: redhat_package_dependencies
when:
ansible_distribution == "RedHat" and
ansible_pkg_mgr == "yum"
- ansible_distribution == "RedHat"
- ansible_pkg_mgr == "yum"
- name: install dependencies
yum:
@ -14,8 +14,8 @@
state: present
with_items: centos_package_dependencies
when:
ansible_distribution == "CentOS" and
ansible_pkg_mgr == "yum"
- ansible_distribution == "CentOS"
- ansible_pkg_mgr == "yum"
- name: install dependencies
dnf:
@ -23,8 +23,8 @@
state: present
with_items: centos_package_dependencies
when:
ansible_distribution == "CentOS" and
ansible_pkg_mgr == "dnf"
- ansible_distribution == "CentOS"
- ansible_pkg_mgr == "dnf"
- name: configure ceph yum repository
include: redhat_ceph_repository.yml
@ -41,40 +41,48 @@
name: "ceph-mon"
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
when:
(ceph_origin == "distro" or ceph_stable_rh_storage or ceph_dev or
(ceph_stable and ceph_stable_release not in ceph_stable_releases)) and
mon_group_name in group_names and
ansible_pkg_mgr == "yum"
- mon_group_name in group_names
- ansible_pkg_mgr == "yum"
- (ceph_stable and ceph_stable_release not in ceph_stable_releases)
or ceph_stable_rh_storage
or ceph_dev
or ceph_origin == "distro"
- name: install distro or red hat storage ceph mon
dnf:
name: "ceph-mon"
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
when:
(ceph_origin == "distro" or ceph_stable_rh_storage or ceph_dev or
(ceph_stable and ceph_stable_release not in ceph_stable_releases)) and
mon_group_name in group_names and
ansible_pkg_mgr == "dnf"
- mon_group_name in group_names
- ansible_pkg_mgr == "dnf"
- (ceph_stable and ceph_stable_release not in ceph_stable_releases)
or ceph_origin == "distro"
or ceph_stable_rh_storage
or ceph_dev
- name: install distro or red hat storage ceph osd
yum:
name: "ceph-osd"
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
when:
(ceph_origin == "distro" or ceph_stable_rh_storage or ceph_dev or
(ceph_stable and ceph_stable_release not in ceph_stable_releases)) and
osd_group_name in group_names and
ansible_pkg_mgr == "yum"
- osd_group_name in group_names
- ansible_pkg_mgr == "yum"
- (ceph_stable and ceph_stable_release not in ceph_stable_releases)
or ceph_origin == "distro"
or ceph_stable_rh_storage
or ceph_dev
- name: install distro or red hat storage ceph osd
dnf:
name: "ceph-osd"
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
when:
(ceph_origin == "distro" or ceph_stable_rh_storage or ceph_dev or
(ceph_stable and ceph_stable_release not in ceph_stable_releases)) and
osd_group_name in group_names and
ansible_pkg_mgr == "dnf"
- osd_group_name in group_names
- ansible_pkg_mgr == "dnf"
- (ceph_stable and ceph_stable_release not in ceph_stable_releases)
or ceph_origin == "distro"
or ceph_stable_rh_storage
or ceph_dev
- name: install distro or red hat storage ceph mds
yum:
@ -101,16 +109,16 @@
name: ceph-test
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
when:
ceph_test and
ansible_pkg_mgr == "yum"
- ceph_test
- ansible_pkg_mgr == "yum"
- name: install ceph-test
dnf:
name: ceph-test
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
when:
ceph_test and
ansible_pkg_mgr == "dnf"
- ceph_test
- ansible_pkg_mgr == "dnf"
- name: install Inktank Ceph Enterprise RBD Kernel modules
yum:
@ -119,8 +127,8 @@
- "{{ ceph_stable_ice_temp_path }}/kmod-libceph-{{ ceph_stable_ice_kmod }}.rpm"
- "{{ ceph_stable_ice_temp_path }}/kmod-rbd-{{ ceph_stable_ice_kmod }}.rpm"
when:
ceph_stable_ice and
ansible_pkg_mgr == "yum"
- ceph_stable_ice
- ansible_pkg_mgr == "yum"
- name: install Inktank Ceph Enterprise RBD Kernel modules
dnf:
@ -129,21 +137,21 @@
- "{{ ceph_stable_ice_temp_path }}/kmod-libceph-{{ ceph_stable_ice_kmod }}.rpm"
- "{{ ceph_stable_ice_temp_path }}/kmod-rbd-{{ ceph_stable_ice_kmod }}.rpm"
when:
ceph_stable_ice and
ansible_pkg_mgr == "dnf"
- ceph_stable_ice
- ansible_pkg_mgr == "dnf"
- name: install rados gateway
yum:
name: ceph-radosgw
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
when:
rgw_group_name in group_names and
ansible_pkg_mgr == "yum"
- rgw_group_name in group_names
- ansible_pkg_mgr == "yum"
- name: install rados gateway
dnf:
name: ceph-radosgw
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
when:
rgw_group_name in group_names and
ansible_pkg_mgr == "dnf"
- rgw_group_name in group_names
- ansible_pkg_mgr == "dnf"

View File

@ -15,8 +15,8 @@
- deb http://archive.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-updates multiverse
- deb http://security.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-security multiverse
when:
ansible_lsb.codename in ['precise'] and not
http_100_continue
- ansible_lsb.codename in ['precise']
- not http_100_continue
# NOTE (leseb): disable the repo when we are using the Ceph repo for 100-continue packages
- name: disable multiverse repo for precise
@ -28,24 +28,24 @@
- deb http://archive.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-updates multiverse
- deb http://security.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-security multiverse
when:
ansible_lsb.codename in ['precise'] and
http_100_continue
- ansible_lsb.codename in ['precise']
- http_100_continue
# NOTE (leseb): needed for Ubuntu 14.04 to have access to libapache2-mod-fastcgi if 100-continue isn't being used
- name: enable multiverse repo for trusty
command: "apt-add-repository multiverse"
changed_when: false
when:
ansible_lsb.codename in ['trusty'] and not
http_100_continue
- ansible_lsb.codename in ['trusty']
- not http_100_continue
# NOTE (leseb): disable the repo when we are using the Ceph repo for 100-continue packages
- name: disable multiverse repo for trusty
command: "apt-add-repository -r multiverse"
changed_when: false
when:
ansible_lsb.codename in ['trusty'] and
http_100_continue
- ansible_lsb.codename in ['trusty']
- http_100_continue
# NOTE (leseb): if using 100-continue, add Ceph dev key
- name: install the ceph development repository key
@ -91,9 +91,7 @@
- apache2.2-bin
- apache2.2-common
- libapache2-mod-fastcgi
when:
purge_default_apache.changed or
purge_ceph_apache.changed
when: purge_default_apache.changed or purge_ceph_apache.changed
- name: install apache and fastcgi
apt:

View File

@ -16,19 +16,16 @@
apt:
name: ceph-mon
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
when:
mon_group_name in group_names
when: mon_group_name in group_names
- name: install red hat storage ceph osd
apt:
name: ceph-osd
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
when:
osd_group_name in group_names
when: osd_group_name in group_names
- name: install ceph-test
apt:
name: ceph-test
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
when:
ceph_test
when: ceph_test

View File

@ -22,40 +22,40 @@
key: "{{ ceph_stable_rh_storage_repository_path }}/RPM-GPG-KEY-redhat-release"
state: present
when:
ceph_stable_rh_storage and
ceph_stable_rh_storage_iso_install
- ceph_stable_rh_storage
- ceph_stable_rh_storage_iso_install
- name: add ceph stable repository
yum:
name: http://download.ceph.com/rpm-{{ ceph_stable_release }}/{{ ceph_stable_redhat_distro }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro|replace('rhel', 'el') }}.noarch.rpm
changed_when: false
when:
ceph_stable and
ansible_pkg_mgr == "yum"
- ceph_stable
- ansible_pkg_mgr == "yum"
- name: add ceph stable repository
dnf:
name: http://download.ceph.com/rpm-{{ ceph_stable_release }}/{{ ceph_stable_redhat_distro }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro|replace('rhel', 'el') }}.noarch.rpm
changed_when: false
when:
ceph_stable and
ansible_pkg_mgr == "dnf"
- ceph_stable
- ansible_pkg_mgr == "dnf"
- name: add ceph development repository
yum:
name: http://gitbuilder.ceph.com/ceph-rpm-{{ ceph_dev_redhat_distro }}-x86_64-basic/ref/{{ ceph_dev_branch }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro }}.noarch.rpm
changed_when: false
when:
ceph_dev and
ansible_pkg_mgr == "yum"
- ceph_dev
- ansible_pkg_mgr == "yum"
- name: add ceph development repository
dnf:
name: http://gitbuilder.ceph.com/ceph-rpm-{{ ceph_dev_redhat_distro }}-x86_64-basic/ref/{{ ceph_dev_branch }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro }}.noarch.rpm
changed_when: false
when:
ceph_dev and
ansible_pkg_mgr == "dnf"
- ceph_dev
- ansible_pkg_mgr == "dnf"
- name: add inktank ceph enterprise repository
template:
@ -74,5 +74,5 @@
group: root
mode: 0644
when:
ceph_stable_rh_storage and
ceph_stable_rh_storage_iso_install
- ceph_stable_rh_storage
- ceph_stable_rh_storage_iso_install

View File

@ -15,17 +15,17 @@
- include: ./pre_requisites/prerequisite_rh_storage_iso_install.yml
when:
ceph_stable_rh_storage and
ceph_stable_rh_storage_iso_install and
ansible_os_family == "RedHat"
- ceph_stable_rh_storage
- ceph_stable_rh_storage_iso_install
- ansible_os_family == "RedHat"
tags:
- package-install
- include: ./pre_requisites/prerequisite_rh_storage_cdn_install.yml
when:
ceph_stable_rh_storage and
ceph_stable_rh_storage_cdn_install and
ansible_os_family == "RedHat"
- ceph_stable_rh_storage
- ceph_stable_rh_storage_cdn_install
- ansible_os_family == "RedHat"
tags:
- package-install
@ -36,31 +36,31 @@
- include: ./installs/install_on_debian.yml
when:
ansible_os_family == 'Debian' and
not ceph_stable_rh_storage
- ansible_os_family == 'Debian'
- not ceph_stable_rh_storage
tags:
- package-install
- include: ./installs/install_rh_storage_on_debian.yml
when:
ansible_os_family == 'Debian' and
ceph_stable_rh_storage
- ansible_os_family == 'Debian'
- ceph_stable_rh_storage
tags:
- package-install
- include: ./installs/install_rgw_on_redhat.yml
when:
ansible_os_family == 'RedHat' and
radosgw_frontend == 'apache' and
rgw_group_name in group_names
- ansible_os_family == 'RedHat'
- radosgw_frontend == 'apache'
- rgw_group_name in group_names
tags:
- package-install
- include: ./installs/install_rgw_on_debian.yml
when:
ansible_os_family == 'Debian' and
radosgw_frontend == 'apache' and
rgw_group_name in group_names
- ansible_os_family == 'Debian'
- radosgw_frontend == 'apache'
- rgw_group_name in group_names
tags:
- package-install
@ -105,44 +105,44 @@
- set_fact:
rbd_client_directory_owner: root
when:
is_before_infernalis and
(rbd_client_directory_owner is not defined or
not rbd_client_directory_owner)
- is_before_infernalis
- rbd_client_directory_owner is not defined
or not rbd_client_directory_owner
- set_fact:
rbd_client_directory_owner: ceph
when:
is_after_hammer and
(rbd_client_directory_owner is not defined or
not rbd_client_directory_owner)
- is_after_hammer
- rbd_client_directory_owner is not defined
or not rbd_client_directory_owner
- set_fact:
rbd_client_directory_group: root
when:
is_before_infernalis and
(rbd_client_directory_group is not defined or
not rbd_client_directory_group)
- is_before_infernalis
- rbd_client_directory_group is not defined
or not rbd_client_directory_group
- set_fact:
rbd_client_directory_group: ceph
when:
is_after_hammer and
(rbd_client_directory_group is not defined or
not rbd_client_directory_group)
- is_after_hammer
- rbd_client_directory_group is not defined
or not rbd_client_directory_group
- set_fact:
rbd_client_directory_mode: "1777"
when:
is_before_infernalis and
(rbd_client_directory_mode is not defined or
not rbd_client_directory_mode)
- is_before_infernalis
- rbd_client_directory_mode is not defined
or not rbd_client_directory_mode
- set_fact:
rbd_client_directory_mode: "0770"
when:
is_after_hammer and
(rbd_client_directory_mode is not defined or
not rbd_client_directory_mode)
- is_after_hammer
- rbd_client_directory_mode is not defined
or not rbd_client_directory_mode
- name: check for a ceph socket
shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1"
@ -161,9 +161,7 @@
changed_when: false
become: false
run_once: true
when:
cephx or
generate_fsid
when: cephx or generate_fsid
- name: generate cluster uuid
local_action: shell python -c 'import uuid; print str(uuid.uuid4())' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
@ -231,8 +229,7 @@
insertafter: EOF
create: yes
line: "CLUSTER={{ cluster }}"
when:
ansible_os_family == "RedHat"
when: ansible_os_family == "RedHat"
- name: configure cluster name
lineinfile:
@ -240,5 +237,4 @@
insertafter: EOF
create: yes
line: "CLUSTER={{ cluster }}"
when:
ansible_os_family == "Debian"
when: ansible_os_family == "Debian"

View File

@ -26,8 +26,8 @@
- kmod-libceph-{{ ceph_stable_ice_kmod }}.rpm
- kmod-rbd-{{ ceph_stable_ice_kmod }}.rpm
when:
ceph_stable_ice and
ansible_os_family == 'RedHat'
- ceph_stable_ice
- ansible_os_family == 'RedHat'
- name: stat extracted ice repo files
stat:
@ -41,12 +41,12 @@
chdir: "{{ ceph_stable_ice_temp_path }}"
changed_when: false
when:
ceph_stable_ice and
repo_exist.stat.exists == False
- ceph_stable_ice
- repo_exist.stat.exists == False
- name: move ice extracted packages
shell: "mv {{ ceph_stable_ice_temp_path }}/ceph/*/* {{ ceph_stable_ice_temp_path }}"
changed_when: false
when:
ceph_stable_ice and
repo_exist.stat.exists == False
- ceph_stable_ice
- repo_exist.stat.exists == False

View File

@ -26,8 +26,8 @@
command: subscription-manager repos --enable rhel-7-server-rhceph-1.3-mon-rpms
changed_when: false
when:
mon_group_name in group_names and
rh_storage_mon_repo.rc != 0
- mon_group_name in group_names
- rh_storage_mon_repo.rc != 0
- name: check if the red hat storage osd repo is already present
shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-1.3-osd-rpms
@ -40,8 +40,8 @@
command: subscription-manager repos --enable rhel-7-server-rhceph-1.3-osd-rpms
changed_when: false
when:
osd_group_name in group_names and
rh_storage_osd_repo.rc != 0
- osd_group_name in group_names
- rh_storage_osd_repo.rc != 0
- name: check if the red hat storage rados gateway repo is already present
shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-1.3-tools-rpms
@ -54,5 +54,5 @@
command: subscription-manager repos --enable rhel-7-server-rhceph-1.3-tools-rpms
changed_when: false
when:
rgw_group_name in group_names and
rh_storage_rgw_repo.rc != 0
- rgw_group_name in group_names
- rh_storage_rgw_repo.rc != 0

View File

@ -15,8 +15,7 @@
- set_fact:
after_hammer=True
when:
ceph_version.stdout not in ['firefly','giant', 'hammer']
when: ceph_version.stdout not in ['firefly','giant', 'hammer']
- name: create bootstrap directories (for or before hammer)
file:

View File

@ -32,8 +32,8 @@
- python-pip
- docker-engine
when:
ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "yum"
- ansible_os_family == 'RedHat'
- ansible_pkg_mgr == "yum"
tags:
with_pkg
@ -45,8 +45,8 @@
- python-pip
- docker-engine
when:
ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "dnf"
- ansible_os_family == 'RedHat'
- ansible_pkg_mgr == "dnf"
tags:
with_pkg

View File

@ -18,8 +18,8 @@
- { name: "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring", copy_key: true }
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
when:
cephx and
item.copy_key|bool
- cephx
- item.copy_key|bool
- name: create mds directory
file:
@ -78,8 +78,8 @@
changed_when: false
failed_when: false
when:
use_systemd and
is_after_hammer
- use_systemd
- is_after_hammer
- name: start and add that the metadata service to the init sequence (upstart)
command: initctl emit ceph-mds cluster={{ cluster }} id={{ mds_name }}
@ -95,8 +95,8 @@
args: mds.{{ mds_name }}
changed_when: false
when:
not use_systemd and
is_before_infernalis
- not use_systemd
- is_before_infernalis
- name: start and add that the metadata service to the init sequence (systemd after hammer)
service:
@ -105,5 +105,5 @@
enabled: yes
changed_when: false
when:
use_systemd and
is_after_hammer
- use_systemd
- is_after_hammer

View File

@ -12,8 +12,8 @@
creates: /etc/ceph/{{ cluster }}.client.restapi.keyring
changed_when: false
when:
cephx and
groups[restapi_group_name] is defined
- cephx
- groups[restapi_group_name] is defined
- include: openstack_config.yml
when: openstack_config

View File

@ -49,29 +49,29 @@
args:
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
when:
cephx and
is_after_hammer
- cephx
- is_after_hammer
- name: ceph monitor mkfs without keyring (for or after infernalis release)
command: ceph-mon --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }}
args:
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
when:
not cephx and
is_after_hammer
- not cephx
- is_after_hammer
- name: ceph monitor mkfs with keyring (before infernalis release)
command: ceph-mon --mkfs -i {{ monitor_name }} --fsid {{ fsid }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }}
args:
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
when:
cephx and
is_before_infernalis
- cephx
- is_before_infernalis
- name: ceph monitor mkfs without keyring (before infernalis release)
command: ceph-mon --mkfs -i {{ monitor_name }} --fsid {{ fsid }}
args:
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
when:
not cephx and
is_before_infernalis
- not cephx
- is_before_infernalis

View File

@ -15,8 +15,8 @@
- include: checks.yml
when:
ceph_health.rc != 0 and
not mon_containerized_deployment_with_kv
- ceph_health.rc != 0
- not mon_containerized_deployment_with_kv
- include: pre_requisite.yml
@ -25,8 +25,8 @@
# let the first mon create configs and keyrings
- include: create_configs.yml
when:
inventory_hostname == groups.mons[0] and
not mon_containerized_default_ceph_conf_with_kv
- inventory_hostname == groups.mons[0]
- not mon_containerized_default_ceph_conf_with_kv
- include: fetch_configs.yml
when: not mon_containerized_deployment_with_kv
@ -45,8 +45,8 @@
creates: /etc/ceph/ceph.client.restapi.keyring
changed_when: false
when:
cephx and
mon_containerized_deployment and
groups[restapi_group_name] is defined and
inventory_hostname == groups.mons|last and
not mon_containerized_deployment_with_kv
- cephx
- mon_containerized_deployment
- groups[restapi_group_name] is defined
- inventory_hostname == groups.mons|last
- not mon_containerized_deployment_with_kv

View File

@ -32,8 +32,8 @@
- python-pip
- docker-engine
when:
ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "yum"
- ansible_os_family == 'RedHat'
- ansible_pkg_mgr == "yum"
tags:
with_pkg
@ -45,8 +45,8 @@
- python-pip
- docker-engine
when:
ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "dnf"
- ansible_os_family == 'RedHat'
- ansible_pkg_mgr == "dnf"
tags:
with_pkg

View File

@ -11,9 +11,9 @@
KV_PORT: "{{kv_port}}"
run_once: true
when:
inventory_hostname == groups.mons[0] and
mon_containerized_deployment_with_kv and
mon_containerized_default_ceph_conf_with_kv
- inventory_hostname == groups.mons[0]
- mon_containerized_deployment_with_kv
- mon_containerized_default_ceph_conf_with_kv
- name: populate kv_store with custom ceph.conf
docker:
@ -29,9 +29,9 @@
- /etc/ceph/ceph.conf:/etc/ceph/ceph.defaults
run_once: true
when:
inventory_hostname == groups.mons[0] and
mon_containerized_deployment_with_kv and
not mon_containerized_default_ceph_conf_with_kv
- inventory_hostname == groups.mons[0]
- mon_containerized_deployment_with_kv
- not mon_containerized_default_ceph_conf_with_kv
- name: delete populate-kv-store docker
docker:
@ -49,34 +49,26 @@
owner: "root"
group: "root"
mode: "0644"
when:
is_atomic or
ansible_os_family == 'CoreOS'
when: is_atomic or ansible_os_family == 'CoreOS'
- name: link systemd unit file for mon instance
file:
src: /var/lib/ceph/ceph-mon@.service
dest: /etc/systemd/system/multi-user.target.wants/ceph-mon@{{ ansible_hostname }}.service
state: link
when:
is_atomic or
ansible_os_family == 'CoreOS'
when: is_atomic or ansible_os_family == 'CoreOS'
- name: enable systemd unit file for mon instance
shell: systemctl enable /etc/systemd/system/multi-user.target.wants/ceph-mon@{{ ansible_hostname }}.service
failed_when: false
changed_when: false
when:
is_atomic or
ansible_os_family == 'CoreOS'
when: is_atomic or ansible_os_family == 'CoreOS'
- name: reload systemd unit files
shell: systemctl daemon-reload
changed_when: false
failed_when: false
when:
is_atomic or
ansible_os_family == 'CoreOS'
when: is_atomic or ansible_os_family == 'CoreOS'
- name: systemd start mon container
service:
@ -84,9 +76,7 @@
state: started
enabled: yes
changed_when: false
when:
is_atomic or
ansible_os_family == 'CoreOS'
when: is_atomic or ansible_os_family == 'CoreOS'
- name: wait for ceph.conf exists
wait_for:
@ -103,9 +93,9 @@
env: "MON_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_mon_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=MON,CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }},{{ ceph_mon_extra_envs }}"
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"
when:
not is_atomic and
ansible_os_family != 'CoreOS' and
not mon_containerized_deployment_with_kv
- not is_atomic
- ansible_os_family != 'CoreOS'
- not mon_containerized_deployment_with_kv
- name: run the ceph monitor docker image with kv
docker:
@ -116,6 +106,6 @@
privileged: "{{ mon_docker_privileged }}"
env: "KV_TYPE={{kv_type}},KV_IP={{kv_endpoint}},MON_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_mon_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=MON,CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }},{{ ceph_mon_extra_envs }}"
when:
not is_atomic and
ansible_os_family != 'CoreOS' and
mon_containerized_deployment_with_kv
- not is_atomic
- ansible_os_family != 'CoreOS'
- mon_containerized_deployment_with_kv

View File

@ -10,13 +10,13 @@
- include: create_mds_filesystems.yml
when:
not mon_containerized_deployment and
groups[mds_group_name] is defined
- not mon_containerized_deployment
- groups[mds_group_name] is defined
- include: secure_cluster.yml
when:
secure_cluster and
not mon_containerized_deployment
- secure_cluster
- not mon_containerized_deployment
- include: ./docker/main.yml
when: mon_containerized_deployment

View File

@ -24,16 +24,16 @@
command: service ceph start mon
changed_when: false
when:
ansible_distribution != "Ubuntu" and
is_before_infernalis
- ansible_distribution != "Ubuntu"
- is_before_infernalis
- name: start and add that the monitor service to the init sequence (for or after infernalis)
command: systemctl enable ceph-mon@{{ monitor_name }}
changed_when: false
failed_when: false
when:
use_systemd and
is_after_hammer
- use_systemd
- is_after_hammer
- name: start the monitor service (for or after infernalis)
service:
@ -42,8 +42,8 @@
enabled: yes
changed_when: false
when:
use_systemd and
is_after_hammer
- use_systemd
- is_after_hammer
- name: collect admin and bootstrap keys
command: ceph-create-keys --cluster {{ cluster }} --id {{ monitor_name }}

View File

@ -7,11 +7,11 @@
ignore_errors: true
with_dict: ansible_devices
when:
ansible_devices is defined and
item.value.removable == "0" and
item.value.partitions|count == 0 and
journal_collocation and
osd_auto_discovery
- ansible_devices is defined
- item.value.removable == "0"
- item.value.partitions|count == 0
- journal_collocation
- osd_auto_discovery
- name: activate osd(s) when device is a disk
command: ceph-disk activate {{ item.1 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1
@ -22,9 +22,9 @@
failed_when: false
register: activate_osd_disk
when:
not item.0.get("skipped") and
item.0.get("rc", 0) != 0 and
not osd_auto_discovery
- not item.0.get("skipped")
- item.0.get("rc", 0) != 0
- not osd_auto_discovery
- name: fail if ceph-disk cannot create an OSD
fail:
@ -42,9 +42,9 @@
changed_when: false
failed_when: false
when:
not item.0.get("skipped") and
item.0.get("rc", 0) == 0 and
not osd_auto_discovery
- not item.0.get("skipped")
- item.0.get("rc", 0) == 0
- not osd_auto_discovery
- include: osd_fragment.yml
when: crush_location
@ -54,10 +54,10 @@
policy: targeted
state: permissive
when:
ansible_selinux != false and
ansible_selinux['status'] == 'enabled' and
ansible_selinux['config_mode'] != 'disabled' and
is_before_infernalis
- ansible_selinux != false
- ansible_selinux['status'] == 'enabled'
- ansible_selinux['config_mode'] != 'disabled'
- is_before_infernalis
- name: start and add that the osd service(s) to the init sequence (before infernalis)
service:
@ -65,8 +65,8 @@
state: started
enabled: yes
when:
ansible_distribution != "Ubuntu" and
is_before_infernalis
- ansible_distribution != "Ubuntu"
- is_before_infernalis
- name: get osd id (for or after infernalis)
shell: "ls /var/lib/ceph/osd/ | grep '-' | cut -d '-' -f 2"
@ -74,8 +74,8 @@
failed_when: false
register: osd_id
when:
use_systemd and
is_after_hammer
- use_systemd
- is_after_hammer
- name: enable osd service instance(s) (for or after infernalis)
file:
@ -85,8 +85,8 @@
with_items: osd_id.stdout_lines
failed_when: false
when:
use_systemd and
is_after_hammer
- use_systemd
- is_after_hammer
- name: start and add that the osd service(s) to the init sequence (for or after infernalis)
service:
@ -96,5 +96,5 @@
with_items: osd_id.stdout_lines
changed_when: false
when:
use_systemd and
is_after_hammer
- use_systemd
- is_after_hammer

View File

@ -25,9 +25,9 @@
failed_when: false
register: ispartition_autodiscover
when:
ansible_devices is defined and
item.value.removable == "0" and
osd_auto_discovery
- ansible_devices is defined
- item.value.removable == "0"
- osd_auto_discovery
# NOTE (leseb): we must do this because of
# https://github.com/ansible/ansible/issues/4297
@ -42,9 +42,8 @@
failed_when: false
register: osd_partition_status
when:
journal_collocation or
raw_multi_journal and not
osd_auto_discovery
- journal_collocation or raw_multi_journal
- not osd_auto_discovery
- name: check the partition status of the osd disks (autodiscover disks)
shell: "parted --script /dev/{{ item.key }} print > /dev/null 2>&1"
@ -53,12 +52,11 @@
failed_when: false
register: osd_partition_status_autodiscover
when:
journal_collocation or
raw_multi_journal and
ansible_devices is defined and
item.value.removable == "0" and
item.value.partitions|count == 0 and
osd_auto_discovery
- journal_collocation or raw_multi_journal
- ansible_devices is defined
- item.value.removable == "0"
- item.value.partitions|count == 0
- osd_auto_discovery
# NOTE (leseb): we must do this because of
# https://github.com/ansible/ansible/issues/4297
@ -81,9 +79,9 @@
- devices
changed_when: false
when:
(journal_collocation or raw_multi_journal) and not
osd_auto_discovery and
item.0.rc != 0
- journal_collocation or raw_multi_journal
- not osd_auto_discovery
- item.0.rc != 0
- name: fix partitions gpt header or labels of the osd disks (autodiscover disks)
shell: sgdisk --zap-all --clear --mbrtogpt -g -- "/dev/{{ item.1.key }}"
@ -92,12 +90,12 @@
- ansible_devices
changed_when: false
when:
journal_collocation and
osd_auto_discovery and
ansible_devices is defined and
item.value.removable == "0" and
item.value.partitions|count == 0 and
item.0.rc != 0
- journal_collocation
- osd_auto_discovery
- ansible_devices is defined
- item.value.removable == "0"
- item.value.partitions|count == 0
- item.0.rc != 0
- name: fix partitions gpt header or labels of the journal devices
shell: sgdisk --zap-all --clear --mbrtogpt -g -- {{ item.1 }}
@ -106,8 +104,8 @@
- raw_journal_devices
changed_when: false
when:
raw_multi_journal and
item.0.rc != 0
- raw_multi_journal
- item.0.rc != 0
- name: check if a partition named 'ceph' exists
shell: "parted --script {{ item }} print | egrep -sq '^ 1.*ceph'"
@ -124,9 +122,9 @@
failed_when: false
register: parted_autodiscover
when:
ansible_devices is defined and
item.value.removable == "0" and
osd_auto_discovery
- ansible_devices is defined
- item.value.removable == "0"
- osd_auto_discovery
# NOTE (leseb): we must do this because of
# https://github.com/ansible/ansible/issues/4297

View File

@ -15,8 +15,7 @@
- set_fact:
after_hamer=True
when:
ceph_version.stdout not in ['firefly','giant', 'hammer']
when: ceph_version.stdout not in ['firefly','giant', 'hammer']
- name: create bootstrap directories (for or before hammer)
file:

View File

@ -7,8 +7,8 @@
- include: checks.yml
when:
ceph_health.rc != 0 and
not osd_containerized_deployment_with_kv
- ceph_health.rc != 0
- not osd_containerized_deployment_with_kv
- name: check if it is Atomic host
stat: path=/run/ostree-booted
@ -20,7 +20,7 @@
- include: pre_requisite.yml
# NOTE (jimcurtis): dirs_permissions.yml must precede fetch_configs.yml
# NOTE (jimcurtis): dirs_permissions.yml must precede fetch_configs.yml
# because it creates the directories needed by the latter.
- include: dirs_permissions.yml

View File

@ -32,8 +32,8 @@
- python-pip
- docker-engine
when:
ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "yum"
- ansible_os_family == 'RedHat'
- ansible_pkg_mgr == "yum"
tags:
with_pkg
@ -45,8 +45,8 @@
- python-pip
- docker-engine
when:
ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "dnf"
- ansible_os_family == 'RedHat'
- ansible_pkg_mgr == "dnf"
tags:
with_pkg

View File

@ -29,11 +29,11 @@
- ceph_osd_docker_devices
- osd_prepared.results
when:
item.1.get("rc", 0) != 0 and
ceph_osd_docker_prepare_env is defined and
not osd_containerized_deployment_with_kv and
not is_atomic and
not ansible_os_family == 'CoreOS'
- item.1.get("rc", 0) != 0
- ceph_osd_docker_prepare_env is defined
- not osd_containerized_deployment_with_kv
- not is_atomic
- not ansible_os_family == 'CoreOS'
- name: prepare ceph osd disk with kv_store
docker:
@ -50,11 +50,11 @@
- ceph_osd_docker_devices
- osd_prepared.results
when:
item.1.get("rc", 0) != 0 and
ceph_osd_docker_prepare_env is defined and
osd_containerized_deployment_with_kv and
not is_atomic and
not ansible_os_family == 'CoreOS'
- item.1.get("rc", 0) != 0
- ceph_osd_docker_prepare_env is defined
- osd_containerized_deployment_with_kv
- not is_atomic
- not ansible_os_family == 'CoreOS'
- name: prepare ceph osd disk for container operating systems
shell: |
@ -74,10 +74,9 @@
- ceph_osd_docker_devices
- osd_prepared.results
when:
item.1.get("rc", 0) != 0 and
is_atomic or
ansible_os_family == 'CoreOS' and
not osd_containerized_deployment_with_kv
- item.1.get("rc", 0) != 0
- is_atomic or ansible_os_family == 'CoreOS'
- not osd_containerized_deployment_with_kv
- name: prepare ceph osd disk for container operating systems with kv_store
shell: |
@ -97,10 +96,9 @@
- ceph_osd_docker_devices
- osd_prepared.results
when:
item.1.get("rc", 0) != 0 and
is_atomic or
ansible_os_family == 'CoreOS' and
osd_containerized_deployment_with_kv
- item.1.get("rc", 0) != 0
- is_atomic or ansible_os_family == 'CoreOS'
- osd_containerized_deployment_with_kv
# Use systemd to manage container on Atomic host
- name: generate systemd unit file
@ -112,9 +110,7 @@
group: "root"
mode: "0644"
failed_when: false
when:
is_atomic or
ansible_os_family == 'CoreOS'
when: is_atomic or ansible_os_family == 'CoreOS'
- name: link systemd unit file for osd instance
file:
@ -122,26 +118,20 @@
dest: /etc/systemd/system/multi-user.target.wants/ceph-osd@{{ item | basename }}.service
state: link
with_items: ceph_osd_docker_devices
when:
is_atomic or
ansible_os_family == 'CoreOS'
when: is_atomic or ansible_os_family == 'CoreOS'
- name: enable systemd unit file for osd instance
shell: systemctl enable /etc/systemd/system/multi-user.target.wants/ceph-osd@{{ item | basename }}.service
failed_when: false
changed_when: false
with_items: ceph_osd_docker_devices
when:
is_atomic or
ansible_os_family == 'CoreOS'
when: is_atomic or ansible_os_family == 'CoreOS'
- name: reload systemd unit files
shell: systemctl daemon-reload
changed_when: false
failed_when: false
when:
is_atomic or
ansible_os_family == 'CoreOS'
when: is_atomic or ansible_os_family == 'CoreOS'
- name: systemd start osd container
service:
@ -150,9 +140,7 @@
enabled: yes
changed_when: false
with_items: ceph_osd_docker_devices
when:
is_atomic or
ansible_os_family == 'CoreOS'
when: is_atomic or ansible_os_family == 'CoreOS'
- name: run the ceph osd docker image
docker:
@ -166,9 +154,9 @@
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/dev:/dev,/run:/run"
with_items: ceph_osd_docker_devices
when:
not is_atomic and
ansible_os_family != 'CoreOS' and
not osd_containerized_deployment_with_kv
- not is_atomic
- ansible_os_family != 'CoreOS'
- not osd_containerized_deployment_with_kv
- name: run the ceph osd docker image with kv
docker:
@ -182,6 +170,6 @@
volumes: "/dev/:/dev/"
with_items: ceph_osd_docker_devices
when:
not is_atomic and
ansible_os_family != 'CoreOS' and
osd_containerized_deployment_with_kv
- not is_atomic
- ansible_os_family != 'CoreOS'
- osd_containerized_deployment_with_kv

View File

@ -4,23 +4,23 @@
- include: ./scenarios/journal_collocation.yml
when:
journal_collocation and
not osd_containerized_deployment
- journal_collocation
- not osd_containerized_deployment
- include: ./scenarios/raw_multi_journal.yml
when:
raw_multi_journal and
not osd_containerized_deployment
- raw_multi_journal
- not osd_containerized_deployment
- include: ./scenarios/osd_directory.yml
when:
osd_directory and
not osd_containerized_deployment
- osd_directory
- not osd_containerized_deployment
- include: ./scenarios/bluestore.yml
when:
osd_objectstore == 'bluestore' and
not osd_containerized_deployment
- osd_objectstore == 'bluestore'
- not osd_containerized_deployment
- include: ./docker/main.yml
when: osd_containerized_deployment

View File

@ -32,5 +32,5 @@
- { name: "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring", copy_key: true }
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
when:
cephx and
item.copy_key|bool
- cephx
- item.copy_key|bool

View File

@ -1,4 +1,3 @@
---
## SCENARIO 4: BLUESTORE
@ -16,11 +15,11 @@
register: prepared_osds
with_dict: ansible_devices
when:
ansible_devices is defined and
item.value.removable == "0" and
item.value.partitions|count == 0 and
bluestore and
osd_auto_discovery
- ansible_devices is defined
- item.value.removable == "0"
- item.value.partitions|count == 0
- bluestore
- osd_auto_discovery
- name: manually prepare osd disk(s)
command: ceph-disk prepare --bluestore --cluster "{{ cluster }}" "{{ item.2 }}"
@ -30,11 +29,11 @@
- combined_ispartition_results.results
- devices
when:
not item.0.get("skipped") and
not item.1.get("skipped") and
item.0.get("rc", 0) != 0 and
item.1.get("rc", 0) != 0 and
bluestore and not
osd_auto_discovery
- not item.0.get("skipped")
- not item.1.get("skipped")
- item.0.get("rc", 0) != 0
- item.1.get("rc", 0) != 0
- bluestore
- not osd_auto_discovery
- include: ../activate_osds.yml

View File

@ -15,11 +15,11 @@
register: prepared_osds
with_dict: ansible_devices
when:
ansible_devices is defined and
item.value.removable == "0" and
item.value.partitions|count == 0 and
journal_collocation and
osd_auto_discovery
- ansible_devices is defined
- item.value.removable == "0"
- item.value.partitions|count == 0
- journal_collocation
- osd_auto_discovery
- name: manually prepare osd disk(s)
command: "ceph-disk prepare --cluster {{ cluster }} {{ item.2 }}"
@ -29,11 +29,11 @@
- combined_ispartition_results.results
- devices
when:
not item.0.get("skipped") and
not item.1.get("skipped") and
item.0.get("rc", 0) != 0 and
item.1.get("rc", 0) != 0 and
journal_collocation and not
osd_auto_discovery
- not item.0.get("skipped")
- not item.1.get("skipped")
- item.0.get("rc", 0) != 0
- item.1.get("rc", 0) != 0
- journal_collocation
- not osd_auto_discovery
- include: ../activate_osds.yml

View File

@ -19,11 +19,11 @@
changed_when: false
ignore_errors: true
when:
not item.0.get("skipped") and
not item.1.get("skipped") and
item.0.get("rc", 0) != 0 and
item.1.get("rc", 0) != 0 and
raw_multi_journal and
not osd_auto_discovery
- not item.0.get("skipped")
- not item.1.get("skipped")
- item.0.get("rc", 0) != 0
- item.1.get("rc", 0) != 0
- raw_multi_journal
- not osd_auto_discovery
- include: ../activate_osds.yml

View File

@ -24,5 +24,4 @@
owner: "{{ key_owner }}"
group: "{{ key_group }}"
mode: "{{ key_mode }}"
when:
cephx
when: cephx

View File

@ -11,8 +11,8 @@
command: service ceph start ceph-rbd-mirror
changed_when: false
when:
use_systemd and
is_before_infernalis
- use_systemd
- is_before_infernalis
- name: enable systemd unit file for the rbd mirror service (systemd after hammer)
file:
@ -22,8 +22,8 @@
changed_when: false
failed_when: false
when:
use_systemd and
is_after_hammer
- use_systemd
- is_after_hammer
- name: start and add that the rbd mirror service to the init sequence (systemd after hammer)
service:
@ -32,5 +32,5 @@
enabled: yes
changed_when: false
when:
use_systemd and
is_after_hammer
- use_systemd
- is_after_hammer

View File

@ -8,8 +8,7 @@
- set_fact:
after_hammer=True
when:
ceph_version.stdout not in ['firefly','giant', 'hammer']
when: ceph_version.stdout not in ['firefly','giant', 'hammer']
- name: create bootstrap directories (for or before hammer)
file:

View File

@ -32,8 +32,8 @@
- python-pip
- docker-engine
when:
ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "yum"
- ansible_os_family == 'RedHat'
- ansible_pkg_mgr == "yum"
tags:
with_pkg
@ -45,8 +45,8 @@
- python-pip
- docker-engine
when:
ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "dnf"
- ansible_os_family == 'RedHat'
- ansible_pkg_mgr == "dnf"
tags:
with_pkg

View File

@ -15,8 +15,7 @@
- set_fact:
after_hammer=True
when:
ceph_version.stdout not in ['firefly','giant', 'hammer']
when: ceph_version.stdout not in ['firefly','giant', 'hammer']
- name: create bootstrap directories (for or before hammer)
file:

View File

@ -32,8 +32,8 @@
- python-pip
- docker-engine
when:
ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "yum"
- ansible_os_family == 'RedHat'
- ansible_pkg_mgr == "yum"
tags:
with_pkg
@ -45,8 +45,8 @@
- python-pip
- docker-engine
when:
ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "dnf"
- ansible_os_family == 'RedHat'
- ansible_pkg_mgr == "dnf"
tags:
with_pkg

View File

@ -23,8 +23,8 @@
- { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: true }
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
when:
cephx and
item.copy_key|bool
- cephx
- item.copy_key|bool
- name: create rados gateway keyring
command: ceph --cluster {{ cluster }} --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring auth get-or-create client.rgw.{{ ansible_hostname }} osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring

View File

@ -8,10 +8,10 @@
- name: start rgw
command: /etc/init.d/radosgw start
when:
rgwstatus.rc != 0 and
ansible_distribution != "Ubuntu" and
ansible_os_family != 'RedHat' and
not use_systemd
- rgwstatus.rc != 0
- ansible_distribution != "Ubuntu"
- ansible_os_family != 'RedHat'
- not use_systemd
- name: start and add that the rados gateway service to the init sequence (ubuntu)
command: initctl emit radosgw cluster={{ cluster }} id=rgw.{{ ansible_hostname }}
@ -25,8 +25,8 @@
state: started
enabled: yes
when:
ansible_os_family == 'RedHat' and
is_before_infernalis
- ansible_os_family == 'RedHat'
- is_before_infernalis
- name: enable systemd unit file for rgw instance (for or after infernalis)
file:
@ -36,8 +36,8 @@
changed_when: false
failed_when: false
when:
use_systemd and
is_after_hammer
- use_systemd
- is_after_hammer
- name: start rgw with systemd (for or after infernalis)
service:
@ -45,5 +45,5 @@
state: started
enabled: yes
when:
use_systemd and
is_after_hammer
- use_systemd
- is_after_hammer