Re-arrange roles

Trying to add more clarity in the role's task structure.

Signed-off-by: leseb <seb@redhat.com>
pull/336/head
leseb 2015-07-23 20:01:43 +02:00
parent b26e35a70e
commit f1e6b0216d
33 changed files with 98 additions and 98 deletions

View File

@ -10,46 +10,46 @@
- ntp
- hdparm
- name: install the Ceph repository stable key
- name: install the ceph repository stable key
apt_key: >
data="{{ lookup('file', 'cephstable.asc') }}"
data="{{ lookup('file', '../../files/cephstable.asc') }}"
state=present
when: ceph_stable
- name: install the Ceph development repository key
- name: install the ceph development repository key
apt_key: >
data="{{ lookup('file', 'cephdev.asc') }}"
data="{{ lookup('file', '../../files/cephdev.asc') }}"
state=present
when: ceph_dev
- name: install Intank Ceph Enterprise repository key
- name: install intank ceph enterprise repository key
apt_key: >
data="{{ lookup('file', 'cephstableice.asc') }}"
data="{{ lookup('file', '../../files/cephstableice.asc') }}"
state=present
when: ceph_stable_ice
- name: add Ceph stable repository
- name: add ceph stable repository
apt_repository: >
repo="deb http://ceph.com/debian-{{ ceph_stable_release }}/ {{ ceph_stable_distro_source | default(ansible_lsb.codename) }} main"
state=present
changed_when: false
when: ceph_stable
- name: add Ceph development repository
- name: add ceph development repository
apt_repository: >
repo="deb http://gitbuilder.ceph.com/ceph-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/{{ ceph_dev_branch }} {{ ansible_lsb.codename }} main"
state=present
changed_when: false
when: ceph_dev
- name: add Inktank Ceph Enterprise repository
- name: add inktank ceph enterprise repository
apt_repository: >
repo="deb file://{{ ceph_stable_ice_temp_path }} {{ ansible_lsb.codename }} main"
state=present
changed_when: false
when: ceph_stable_ice
- name: install Ceph
- name: install ceph
apt: >
pkg={{ item }}
state=present

View File

@ -10,19 +10,19 @@
- yum-plugin-priorities.noarch
- epel-release
- name: install the Ceph stable repository key
- name: install the ceph stable repository key
rpm_key: >
key={{ ceph_stable_key }}
state=present
when: ceph_stable
- name: install the Ceph development repository key
- name: install the ceph development repository key
rpm_key: >
key={{ ceph_dev_key }}
state=present
when: ceph_dev
- name: install Inktank Ceph Enterprise repository key
- name: install inktank ceph enterprise repository key
rpm_key: >
key={{ ceph_stable_ice_temp_path }}/release.asc
state=present
@ -36,17 +36,17 @@
ceph_stable_rh_storage and
ceph_stable_rh_storage_iso_install
- name: add Ceph stable repository
- name: add ceph stable repository
yum: name=http://ceph.com/rpm-{{ ceph_stable_release }}/{{ ceph_stable_redhat_distro }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro|replace('rhel', 'el') }}.noarch.rpm
changed_when: false
when: ceph_stable
- name: add Ceph development repository
- name: add ceph development repository
yum: name=http://gitbuilder.ceph.com/ceph-rpm-{{ ceph_dev_redhat_distro }}-x86_64-basic/ref/{{ ceph_dev_branch }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro }}.noarch.rpm
changed_when: false
when: ceph_dev
- name: add Inktank Ceph Enterprise repository
- name: add inktank ceph enterprise repository
template: >
src=redhat_ice_repo.j2
dest=/etc/yum.repos.d/ice.repo
@ -66,7 +66,7 @@
ceph_stable_rh_storage and
ceph_stable_rh_storage_iso_install
- name: install Ceph
- name: install ceph
yum: >
name=ceph
state=present

View File

@ -1,31 +1,31 @@
---
- include: os_check.yml
- include: ./checks/check_system.yml
- include: check_mandatory_vars.yml
- include: ./checks/check_mandatory_vars.yml
- include: os_tuning.yml
- include: ./misc/system_tuning.yml
when: osd_group_name in group_names
- include: prerequisite_ice.yml
- include: ./pre_requisites/prerequisite_ice.yml
when: ceph_stable_ice
- include: prerequisite_rh_storage_iso_install.yml
- include: ./pre_requisites/prerequisite_rh_storage_iso_install.yml
when:
ceph_stable_rh_storage and
ceph_stable_rh_storage_iso_install
- include: prerequisite_rh_storage_cdn_install.yml
- include: ./pre_requisites/prerequisite_rh_storage_cdn_install.yml
when:
ceph_stable_rh_storage and
ceph_stable_rh_storage_cdn_install
- include: install_on_redhat.yml
- include: ./installs/install_on_redhat.yml
when: ansible_os_family == 'RedHat'
- include: install_on_debian.yml
- include: ./installs/install_on_debian.yml
when: ansible_os_family == 'Debian'
- name: check for a Ceph socket
- name: check for a ceph socket
shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1"
changed_when: false
ignore_errors: true
@ -37,14 +37,14 @@
ignore_errors: true
register: socketrgw
- name: generate cluster UUID
- name: generate cluster uuid
local_action: shell uuidgen | tee fetch/ceph_cluster_uuid.conf
creates=fetch/ceph_cluster_uuid.conf
register: cluster_uuid
sudo: false
when: fsid != '4a158d27-f750-41d5-9e7f-26ce4c9d2d45'
- name: read cluster UUID if it already exists
- name: read cluster uuid if it already exists
local_action: command cat fetch/ceph_cluster_uuid.conf
removes=fetch/ceph_cluster_uuid.conf
changed_when: false
@ -52,7 +52,7 @@
sudo: false
when: fsid != '4a158d27-f750-41d5-9e7f-26ce4c9d2d45'
- name: generate Ceph configuration file
- name: generate ceph configuration file
template: >
src=ceph.conf.j2
dest=/etc/ceph/ceph.conf

View File

@ -1,5 +1,5 @@
---
- name: disable OSD directory parsing by updatedb
- name: disable osd directory parsing by updatedb
command: updatedb -e /var/lib/ceph
changed_when: false
ignore_errors: true
@ -16,7 +16,7 @@
ignore_errors: true
when: disable_swap
- name: apply OS tuning
- name: apply operating system tuning
sysctl: >
name={{ item.name }}
value={{ item.value }}

View File

@ -1,5 +1,5 @@
---
- name: create ICE package directory
- name: create ice package directory
file: >
path={{ ceph_stable_ice_temp_path }}
state=directory
@ -8,7 +8,7 @@
mode=0644
when: ceph_stable_ice
- name: get ICE packages
- name: get ice packages
get_url: >
url_username={{ ceph_stable_ice_user }}
url_password={{ ceph_stable_ice_password }}
@ -16,7 +16,7 @@
dest={{ ceph_stable_ice_temp_path }}/ICE-{{ ceph_stable_ice_version }}-{{ ceph_stable_ice_distro }}.tar.gz
when: ceph_stable_ice
- name: get ICE Kernel Modules
- name: get ice Kernel Modules
get_url: >
url_username={{ ceph_stable_ice_user }}
url_password={{ ceph_stable_ice_password }}
@ -29,13 +29,13 @@
ceph_stable_ice and
ansible_os_family == 'RedHat'
- name: stat extracted ICE repo files
- name: stat extracted ice repo files
stat: >
path={{ ceph_stable_ice_temp_path }}/ice_setup.py
register: repo_exist
when: ceph_stable_ice
- name: extract ICE packages
- name: extract ice packages
shell: >
tar -xzf ICE-{{ ceph_stable_ice_version }}-{{ ceph_stable_ice_distro }}.tar.gz
chdir={{ ceph_stable_ice_temp_path }}
@ -44,7 +44,7 @@
ceph_stable_ice and
repo_exist.stat.exists == False
- name: move ICE extracted packages
- name: move ice extracted packages
shell: "mv {{ ceph_stable_ice_temp_path }}/ceph/*/* {{ ceph_stable_ice_temp_path }}"
changed_when: false
when:

View File

@ -13,14 +13,14 @@
name=docker-py
version=1.1.0 # https://github.com/ansible/ansible-modules-core/issues/1227
- name: stat for Ceph config and keys
- name: stat for ceph config and keys
stat: >
path={{ item }}
with_items: ceph_config_keys
ignore_errors: true
register: statconfig
- name: try to fetch Ceph config and keys
- name: try to fetch ceph config and keys
copy: >
src=fetch/docker_mon_files/"{{ item }}"
dest=/etc/ceph/

View File

@ -1,7 +1,7 @@
---
# Deploy Ceph metadata server(s)
- name: copy MDS bootstrap key
- name: copy mds bootstrap key
copy: >
src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-mds/ceph.keyring
dest=/var/lib/ceph/bootstrap-mds/ceph.keyring
@ -10,7 +10,7 @@
mode=600
when: cephx
- name: create MDS directory
- name: create mds directory
file: >
path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}
state=directory
@ -19,14 +19,14 @@
mode=0644
when: cephx
- name: create MDS keyring
- name: create mds keyring
command: >
ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.{{ ansible_hostname }} osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring
creates=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring
changed_when: false
when: cephx
- name: set MDS key permissions
- name: set mds key permissions
file: >
path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring
mode=0600
@ -60,7 +60,7 @@
changed_when: false
when: ansible_distribution != "Ubuntu"
- name: start and add that the metadata service to the init sequence (Ubuntu)
- name: start and add that the metadata service to the init sequence (ubuntu)
service: >
name=ceph-mds
state=started

View File

@ -4,7 +4,7 @@
- name: wait for client.admin key exists
wait_for: path=/etc/ceph/ceph.client.admin.keyring
- name: create Ceph REST API keyring
- name: create ceph rest api keyring
command: >
ceph auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/ceph.client.restapi.keyring
creates=/etc/ceph/ceph.client.restapi.keyring
@ -18,7 +18,7 @@
openstack_config and
cephx
- name: find Ceph keys
- name: find ceph keys
shell: ls -1 /etc/ceph/*.keyring
changed_when: false
register: ceph_keys

View File

@ -11,7 +11,7 @@
changed_when: false
when: not {{ ceph_version.stdout | version_compare('0.84', '<') }}
- name: create Ceph Filesystem
- name: create ceph filesystem
command: ceph fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}
changed_when: false
when: not {{ ceph_version.stdout | version_compare('0.84', '<') }}

View File

@ -12,14 +12,14 @@
name=docker-py
version=1.1.0 # https://github.com/ansible/ansible-modules-core/issues/1227
- name: stat for Ceph config and keys
- name: stat for ceph config and keys
stat: >
path={{ item }}
with_items: ceph_config_keys
ignore_errors: true
register: statconfig
- name: try to fetch Ceph config and keys
- name: try to fetch ceph config and keys
copy: >
src=fetch/docker_mon_files/"{{ item }}"
dest=/etc/ceph/
@ -31,7 +31,7 @@
- statconfig.results
when: item.1.stat.exists == False
- name: run the Ceph Monitor docker image
- name: run the ceph Monitor docker image
docker: >
image="{{ ceph_mon_docker_username }}/{{ ceph_mon_docker_imagename }}"
name=ceph-{{ ansible_hostname }}
@ -48,7 +48,7 @@
detach=yes
state=running
- name: collect Ceph files to the Ansible server
- name: collect ceph files to the ansible server
fetch: >
src={{ item }}
dest=fetch/docker_mon_files/{{ item }}

View File

@ -1,5 +1,5 @@
---
- name: create OpenStack pool
- name: create openstack pool
command: rados mkpool {{ item }}
with_items:
- "{{ openstack_glance_pool }}"
@ -9,7 +9,7 @@
changed_when: false
ignore_errors: true
- name: create OpenStack keys
- name: create openstack keys
command: >
ceph auth get-or-create {{ item.name }} {{ item.value }} -o /etc/ceph/ceph.{{ item.name }}.keyring
creates=/etc/ceph/ceph.{{ item.name }}.keyring

View File

@ -1,5 +1,5 @@
---
- name: collect all the pool
- name: collect all the pools
command: rados lspools
register: ceph_pools
when: "{{ ceph_version.stdout | version_compare('0.94', '>=') }}"

View File

@ -12,7 +12,7 @@
when: ansible_distribution == "Ubuntu"
changed_when: false
- name: start and add that the monitor service to the init sequence (Ubuntu)
- name: start and add that the monitor service to the init sequence (ubuntu)
service: >
name=ceph-mon
state=started
@ -33,7 +33,7 @@
failed_when: false
when: ansible_os_family == 'RedHat'
- name: get Ceph monitor version
- name: get ceph monitor version
shell: ceph daemon mon."{{ ansible_hostname }}" version | cut -d '"' -f 4 | cut -f 1,2 -d '.'
changed_when: false
register: ceph_version

View File

@ -1,7 +1,7 @@
---
# NOTE (leseb) : this task is for disk devices only because of the explicit use of the first
# partition.
- name: activate OSD(s) when device is a disk
- name: activate osd(s) when device is a disk
command: |
ceph-disk activate {{ item.2 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1
with_together:
@ -15,7 +15,7 @@
item.1.rc != 0
# NOTE (leseb): this task is for partitions because we don't explicitly use a partition.
- name: activate OSD(s) when device is a partition
- name: activate osd(s) when device is a partition
command: "ceph-disk activate {{ item.1 }}"
with_together:
- ispartition.results
@ -27,7 +27,7 @@
- include: osd_fragment.yml
when: crush_location
- name: start and add that the OSD service to the init sequence
- name: start and add that the osd service(s) to the init sequence
service: >
name=ceph
state=started

View File

@ -3,14 +3,14 @@
- include: pre_requisite.yml
when: not ceph_containerized_deployment
- include: journal_collocation.yml
- include: ./scenarios/journal_collocation.yml
when: journal_collocation and not ceph_containerized_deployment
- include: raw_multi_journal.yml
- include: ./scenarios/raw_multi_journal.yml
when: raw_multi_journal and not ceph_containerized_deployment
- include: osd_directory.yml
- include: ./scenarios/osd_directory.yml
when: osd_directory and not ceph_containerized_deployment
- include: docker.yml
- include: ./scenarios/docker.yml
when: ceph_containerized_deployment

View File

@ -1,19 +1,19 @@
---
- name: get OSD path
- name: get osd path
shell: "df | grep {{ item }} | awk '{print $6}'"
with_items: devices
changed_when: false
ignore_errors: true
register: osd_path
- name: get OSD id
- name: get osd id
command: cat {{ item.stdout }}/whoami
with_items: osd_path.results
changed_when: false
ignore_errors: true
register: osd_id
- name: create a Ceph fragment and assemble directory
- name: create a ceph fragment and assemble directory
file: >
path={{ item }}
state=directory
@ -24,7 +24,7 @@
- /etc/ceph/ceph.d/
- /etc/ceph/ceph.d/osd_fragments
- name: create the OSD fragment
- name: create the osd fragment
template: >
src=osd.conf.j2
dest=/etc/ceph/ceph.d/osd_fragments/osd.{{ item.stdout }}.conf
@ -34,7 +34,7 @@
command: cp /etc/ceph/ceph.conf /etc/ceph/ceph.d/
changed_when: false
- name: assemble OSD sections
- name: assemble osd sections
assemble: >
src=/etc/ceph/ceph.d/osd_fragments/
dest=/etc/ceph/ceph.d/osd.conf
@ -42,7 +42,7 @@
group=root
mode=0644
- name: assemble Ceph conf and OSD fragments
- name: assemble ceph conf and osd fragments
assemble: >
src=/etc/ceph/ceph.d/
dest=/etc/ceph/ceph.conf

View File

@ -11,7 +11,7 @@
state=present
when: ansible_os_family == 'RedHat'
- name: copy OSD bootstrap key
- name: copy osd bootstrap key
copy: >
src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring
dest=/var/lib/ceph/bootstrap-osd/ceph.keyring

View File

@ -13,14 +13,14 @@
name=docker-py
version=1.1.0 # https://github.com/ansible/ansible-modules-core/issues/1227
- name: stat for Ceph config and keys
- name: stat for ceph config and keys
stat: >
path={{ item }}
with_items: ceph_config_keys
ignore_errors: true
register: statconfig
- name: try to fetch Ceph config and keys
- name: try to fetch ceph config and keys
copy: >
src=fetch/docker_mon_files/"{{ item }}"
dest=/etc/ceph/
@ -32,7 +32,7 @@
- statconfig.results
when: item.1.stat.exists == False
- name: run the Ceph OSD docker image
- name: run the ceph osd docker image
docker: >
image="{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}"
name={{ ansible_hostname }}-osd-{{ item | regex_replace('/', '') }}

View File

@ -1,8 +1,8 @@
---
## SCENARIO 1: JOURNAL AND OSD_DATA ON THE SAME DEVICE
- include: check_devices.yml
- include: zap_devices.yml
- include: ../check_devices.yml
- include: ../zap_devices.yml
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
@ -10,7 +10,7 @@
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: automatic prepare OSD disk(s) without partitions
- name: automatic prepare osd disk(s) without partitions
command: ceph-disk prepare "/dev/{{ item.key }}"
ignore_errors: true
register: prepared_osds
@ -22,7 +22,7 @@
journal_collocation and
osd_auto_discovery
- name: manually Prepare OSD disk(s)
- name: manually Prepare osd disk(s)
command: "ceph-disk prepare {{ item.2 }}"
ignore_errors: true
with_together:
@ -35,4 +35,4 @@
journal_collocation and not
osd_auto_discovery
- include: activate_osds.yml
- include: ../activate_osds.yml

View File

@ -4,7 +4,7 @@
# NOTE (leseb): we do not check the filesystem underneath the directory
# so it is really up to you to configure this properly.
# Declaring more than one directory on the same filesystem will confuse Ceph.
- name: create OSD directories
- name: create osd directories
file: >
path={{ item }}
state=directory

View File

@ -1,8 +1,8 @@
---
## SCENARIO 3: N JOURNAL DEVICES FOR N OSDS
- include: check_devices.yml
- include: zap_devices.yml
- include: ../check_devices.yml
- include: ../zap_devices.yml
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
@ -10,7 +10,7 @@
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: prepare OSD disk(s)
- name: prepare osd disk(s)
command: "ceph-disk prepare {{ item.2 }} {{ item.3 }}"
with_together:
- parted.results
@ -24,4 +24,4 @@
item.1.rc != 0 and
raw_multi_journal
- include: activate_osds.yml
- include: ../activate_osds.yml

View File

@ -2,7 +2,7 @@
# NOTE (leseb): some devices might miss partition label which which will result
# in ceph-disk failing to prepare OSD. Thus zapping them prior to prepare the OSD
# ensures that the device will get successfully prepared.
- name: erasing partitions and labels from OSD disk(s)
- name: erasing partitions and labels from osd disk(s)
command: ceph-disk zap {{ item.2 }}
changed_when: false
with_together:

View File

@ -13,14 +13,14 @@
name=docker-py
version=1.1.0 # https://github.com/ansible/ansible-modules-core/issues/1227
- name: stat for Ceph config and keys
- name: stat for ceph config and keys
stat: >
path={{ item }}
with_items: ceph_config_keys
ignore_errors: true
register: statconfig
- name: try to fetch Ceph config and keys
- name: try to fetch ceph config and keys
copy: >
src=fetch/docker_mon_files/"{{ item }}"
dest=/etc/ceph/

View File

@ -2,13 +2,13 @@
- include: pre_requisite.yml
when: not ceph_containerized_deployment
- include: install_redhat.yml
- include: ./installs/install_redhat.yml
when:
ansible_os_family == 'RedHat' and
radosgw_frontend == 'apache' and not
ceph_containerized_deployment
- include: install_debian.yml
- include: ./installs/install_debian.yml
when:
ansible_os_family == 'Debian' and
radosgw_frontend == 'apache' and not

View File

@ -10,14 +10,14 @@
name=docker-py
version=1.1.0 # https://github.com/ansible/ansible-modules-core/issues/1227
- name: stat for Ceph config and keys
- name: stat for ceph config and keys
stat: >
path={{ item }}
with_items: ceph_config_keys
ignore_errors: true
register: statconfig
- name: try to fetch Ceph config and keys
- name: try to fetch ceph config and keys
copy: >
src=fetch/docker_mon_files/"{{ item }}"
dest=/etc/ceph/
@ -29,7 +29,7 @@
- statconfig.results
when: item.1.stat.exists == False
- name: run the Ceph REST API docker image
- name: run the ceph rest api docker image
docker: >
image="{{ ceph_restapi_docker_username }}/{{ ceph_restapi_docker_imagename }}"
name={{ ansible_hostname }}-ceph-restapi
@ -39,7 +39,7 @@
env="RESTAPI_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_restapi_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=RESTAPI"
volumes="/etc/ceph:/etc/ceph"
- name: ensure Ceph REST API service is running
- name: ensure ceph rest api service is running
docker: >
image="{{ ceph_restapi_docker_username }}/{{ ceph_restapi_docker_imagename }}"
name="ceph-{{ ansible_hostname }}"

View File

@ -1,5 +1,5 @@
---
- name: create Ceph REST API directory
- name: create ceph rest api directory
file: >
path=/var/lib/ceph/restapi/ceph-restapi
state=directory
@ -7,7 +7,7 @@
group=root
mode=0755
- name: copy Ceph REST API keyring
- name: copy ceph rest api keyring
copy: >
src=fetch/{{ fsid }}/etc/ceph/ceph.client.restapi.keyring
dest=/var/lib/ceph/restapi/ceph-restapi/keyring
@ -16,7 +16,7 @@
mode=600
when: cephx
- name: activate Ceph REST API with upstart
- name: activate ceph rest api with upstart
file: >
path=/var/lib/ceph/restapi/{{ item }}
state=touch
@ -29,7 +29,7 @@
changed_when: false
when: ansible_distribution == "Ubuntu"
- name: activate Ceph REST API with sysvinit
- name: activate ceph rest api with sysvinit
file: >
path=/var/lib/ceph/restapi/{{ item }}
state=touch

View File

@ -1,11 +1,11 @@
---
- name: check if Ceph REST API is already started
- name: check if ceph rest api is already started
shell: "pgrep ceph-rest-api"
changed_when: false
ignore_errors: true
register: restapi_status
- name: start Ceph REST API
- name: start ceph rest api
shell: "nohup ceph-rest-api &"
changed_when: false
when: restapi_status.rc != 0