Consmetic changes

This branch has been sitting on my local repo for a while. I guess I had
time to spend on a plane :).

Signed-off-by: Sébastien Han <sebastien.han@enovance.com>
pull/263/head
Sébastien Han 2015-05-15 18:27:41 +02:00
parent c030786db1
commit 8b39214ab8
32 changed files with 152 additions and 220 deletions

View File

@ -1,16 +1,16 @@
---
- name: Make sure journal_size configured
- name: make sure journal_size configured
fail: msg="journal_size must be configured. See http://ceph.com/docs/master/rados/configuration/osd-config-ref/"
when: journal_size|int == 0
- name: Make sure monitor_interface configured
- name: make sure monitor_interface configured
fail: msg="monitor_interface must be configured. Interface for the monitor to listen on"
when: monitor_interface == 'interface'
- name: Make sure cluster_network configured
- name: make sure cluster_network configured
fail: msg="cluster_network must be configured. Ceph replication network"
when: cluster_network == '0.0.0.0/0'
- name: Make sure public_network configured
- name: make sure public_network configured
fail: msg="public_network must be configured. Ceph public network"
when: public_network == '0.0.0.0/0'

View File

@ -1,5 +1,5 @@
---
- name: Install dependencies
- name: install dependencies
apt: >
pkg={{ item }}
state=present
@ -10,43 +10,43 @@
- ntp
- hdparm
- name: Install the Ceph repository stable key
- name: install the Ceph repository stable key
apt_key: >
data="{{ lookup('file', 'cephstable.asc') }}"
state=present
when: ceph_stable
- name: Install the Ceph development repository key
- name: install the Ceph development repository key
apt_key: >
data="{{ lookup('file', 'cephdev.asc') }}"
state=present
when: ceph_dev
- name: Install Intank Ceph Enterprise repository key
- name: install Intank Ceph Enterprise repository key
apt_key: >
data="{{ lookup('file', 'cephstableice.asc') }}"
state=present
when: ceph_stable_ice
- name: Add Ceph stable repository
- name: add Ceph stable repository
apt_repository: >
repo="deb http://ceph.com/debian-{{ ceph_stable_release }}/ {{ ansible_lsb.codename }} main"
state=present
when: ceph_stable
- name: Add Ceph development repository
- name: add Ceph development repository
apt_repository: >
repo="deb http://gitbuilder.ceph.com/ceph-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/{{ ceph_dev_branch }} {{ ansible_lsb.codename }} main"
state=present
when: ceph_dev
- name: Add Inktank Ceph Enterprise repository
- name: add Inktank Ceph Enterprise repository
apt_repository: >
repo="deb file://{{ ceph_stable_ice_temp_path }} {{ ansible_lsb.codename }} main"
state=present
when: ceph_stable_ice
- name: Install Ceph
- name: install Ceph
apt: >
pkg={{ item }}
state=latest

View File

@ -1,5 +1,5 @@
---
- name: Install dependencies
- name: install dependencies
yum: >
name={{ item }}
state=present
@ -10,39 +10,39 @@
- yum-plugin-priorities.noarch
- epel-release
- name: Install the Ceph stable repository key
- name: install the Ceph stable repository key
rpm_key: >
key={{ ceph_stable_key }}
state=present
when: ceph_stable
- name: Install the Ceph development repository key
- name: install the Ceph development repository key
rpm_key: >
key={{ ceph_dev_key }}
state=present
when: ceph_dev
- name: Install Inktank Ceph Enterprise repository key
- name: install Inktank Ceph Enterprise repository key
rpm_key: >
key={{ ceph_stable_ice_temp_path }}/release.asc
state=present
when: ceph_stable_ice
- name: Add Ceph stable repository
- name: add Ceph stable repository
yum: name=http://ceph.com/rpm-{{ ceph_stable_release }}/{{ ceph_stable_redhat_distro }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro|replace('rhel', 'el') }}.noarch.rpm
register: repo_result
when: ceph_stable
failed_when: repo_result.rc > 1
changed_when: repo_result.rc == 0
- name: Add Ceph development repository
- name: add Ceph development repository
yum: name=http://gitbuilder.ceph.com/ceph-rpm-{{ ceph_dev_redhat_distro }}-x86_64-basic/ref/{{ ceph_dev_branch }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro }}.noarch.rpm
register: repo_result
when: ceph_dev
failed_when: repo_result.rc > 1
changed_when: repo_result.rc == 0
- name: Add Inktank Ceph Enterprise repository
- name: add Inktank Ceph Enterprise repository
template: >
src=redhat_ice_repo.j2
dest=/etc/yum.repos.d/ice.repo
@ -51,12 +51,12 @@
mode=0644
when: ceph_stable_ice
- name: Install Ceph
- name: install Ceph
yum: >
name=ceph
state=latest
- name: Install Inktank Ceph Enterprise RBD Kernel modules
- name: install Inktank Ceph Enterprise RBD Kernel modules
yum: >
name={{ item }}
with_items:

View File

@ -12,13 +12,13 @@
- include: install_on_debian.yml
when: ansible_os_family == 'Debian'
- name: Check for a Ceph socket
- name: check for a Ceph socket
shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1"
ignore_errors: true
register: socket
changed_when: False
- name: Generate cluster UUID
- name: generate cluster UUID
shell: >
uuidgen | tee fetch/ceph_cluster_uuid.conf
creates=fetch/ceph_cluster_uuid.conf
@ -26,7 +26,7 @@
sudo: false
register: cluster_uuid
- name: Read cluster UUID if it already exists
- name: read cluster UUID if it already exists
command: >
cat fetch/ceph_cluster_uuid.conf
removes=fetch/ceph_cluster_uuid.conf
@ -37,7 +37,7 @@
- include: check-mandatory-vars.yml
- name: Generate Ceph configuration file
- name: generate Ceph configuration file
template: >
src=ceph.conf.j2
dest=/etc/ceph/ceph.conf

View File

@ -1,12 +1,12 @@
---
- name: Fail on unsupported system
- name: fail on unsupported system
fail: "msg=System not supported {{ ansible_system }}"
when: "ansible_system not in ['Linux']"
- name: Fail on unsupported architecture
- name: fail on unsupported architecture
fail: "msg=Architecture not supported {{ ansible_architecture }}"
when: "ansible_architecture not in ['x86_64']"
- name: Fail on unsupported distribution
- name: fail on unsupported distribution
fail: "msg=Distribution not supported {{ ansible_os_family }}"
when: "ansible_os_family not in ['Debian', 'RedHat']"

View File

@ -1,22 +1,22 @@
---
- name: Disable OSD directory parsing by updatedb
- name: disable OSD directory parsing by updatedb
command: updatedb -e /var/lib/ceph
ignore_errors: true
changed_when: False
- name: Disable transparent hugepage
- name: disable transparent hugepage
command: "echo never > /sys/kernel/mm/transparent_hugepage/enabled"
when: disable_transparent_hugepage
ignore_errors: true
changed_when: False
- name: Disable swap
- name: disable swap
command: swapoff -a
when: disable_swap
ignore_errors: true
changed_when: False
- name: Apply OS tuning
- name: apply OS tuning
sysctl: >
name={{ item.name }}
value={{ item.value }}

View File

@ -1,5 +1,5 @@
---
- name: Create ICE package directory
- name: create ICE package directory
file: >
path={{ ceph_stable_ice_temp_path }}
state=directory
@ -8,7 +8,7 @@
mode=0644
when: ceph_stable_ice
- name: Get ICE packages
- name: get ICE packages
get_url: >
url_username={{ ceph_stable_ice_user }}
url_password={{ ceph_stable_ice_password }}
@ -16,7 +16,7 @@
dest={{ ceph_stable_ice_temp_path }}/ICE-{{ ceph_stable_ice_version }}-{{ ceph_stable_ice_distro }}.tar.gz
when: ceph_stable_ice
- name: Get ICE Kernel Modules
- name: get ICE Kernel Modules
get_url: >
url_username={{ ceph_stable_ice_user }}
url_password={{ ceph_stable_ice_password }}
@ -27,20 +27,20 @@
- kmod-rbd-{{ ceph_stable_ice_kmod }}.rpm
when: ceph_stable_ice and ansible_os_family == 'RedHat'
- name: Stat extracted ICE repo files
- name: stat extracted ICE repo files
stat: >
path={{ ceph_stable_ice_temp_path }}/ice_setup.py
register: repo_exist
when: ceph_stable_ice
- name: Extract ICE packages
- name: extract ICE packages
shell: >
tar -xzf ICE-{{ ceph_stable_ice_version }}-{{ ceph_stable_ice_distro }}.tar.gz
chdir={{ ceph_stable_ice_temp_path }}
when: ceph_stable_ice and repo_exist.stat.exists == False
changed_when: False
- name: Move ICE extracted packages
- name: move ICE extracted packages
shell: "mv {{ ceph_stable_ice_temp_path }}/ceph/*/* {{ ceph_stable_ice_temp_path }}"
when: ceph_stable_ice and repo_exist.stat.exists == False
changed_when: False

View File

@ -1,5 +1,5 @@
---
- name: Fetch Ceph config and keys
- name: fetch Ceph config and keys
copy: >
src=fetch/docker_mon_files/{{ item }}
dest=/etc/ceph/
@ -12,7 +12,7 @@
- /etc/ceph/monmap
- /etc/ceph/ceph.mon.keyring
- name: Run the Ceph Medata docker image
- name: run the Ceph Medata docker image
docker: >
image="{{ ceph_mds_docker_username }}/{{ ceph_mds_docker_imagename }}"
name=ceph-mds-{{ ansible_hostname }}
@ -21,7 +21,7 @@
env="MDS_NAME=ceph-mds-{{ ansible_hostname }}, MDS_CIVETWEB_PORT={{ ceph_mds_civetweb_port }}"
volumes="/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"
- name: Ensure ceph_mds service is running
- name: ensure ceph_mds service is running
docker: >
image="{{ ceph_mds_docker_username }}/{{ ceph_mds_docker_imagename }}"
name="ceph-{{ ansible_hostname }}"

View File

@ -1,7 +1,7 @@
---
# Deploy Ceph metadata server(s)
- name: Copy MDS bootstrap key
- name: copy MDS bootstrap key
copy: >
src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-mds/ceph.keyring
dest=/var/lib/ceph/bootstrap-mds/ceph.keyring
@ -10,7 +10,7 @@
mode=600
when: cephx
- name: Create MDS directory
- name: create MDS directory
file: >
path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}
state=directory
@ -19,14 +19,14 @@
mode=0644
when: cephx
- name: Create MDS keyring
- name: create MDS keyring
command: >
ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.{{ ansible_hostname }} osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring
creates=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring
when: cephx
changed_when: False
- name: Set MDS key permissions
- name: set MDS key permissions
file: >
path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring
mode=0600
@ -34,7 +34,7 @@
group=root
when: cephx
- name: Activate metadata server with upstart
- name: activate metadata server with upstart
file: >
path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/{{ item }}
state=touch
@ -46,7 +46,7 @@
- upstart
when: ansible_distribution == "Ubuntu"
- name: Activate metadata server with sysvinit
- name: activate metadata server with sysvinit
file: >
path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/{{ item }}
state=touch
@ -58,7 +58,7 @@
- sysvinit
when: ansible_distribution != "Ubuntu"
- name: Start and add that the metadata service to the init sequence (Ubuntu)
- name: start and add that the metadata service to the init sequence (Ubuntu)
service: >
name=ceph-mds
state=started
@ -66,7 +66,7 @@
args="id={{ ansible_hostname }}"
when: ansible_distribution == "Ubuntu"
- name: Start and add that the metadata service to the init sequence
- name: start and add that the metadata service to the init sequence
service: >
name=ceph
state=started

View File

@ -1,11 +1,10 @@
---
# Wait for mon discovery and quorum resolution
# NOTE (leseb): wait for mon discovery and quorum resolution
# the admin key is not instantanely created so we have to wait a bit
- name: wait for client.admin key exists
wait_for: path=/etc/ceph/ceph.client.admin.keyring
- name: Create RGW keyring
- name: create RGW keyring
command: >
ceph auth get-or-create client.radosgw.{{ hostvars[item]['ansible_hostname'] }} osd 'allow rwx' mon 'allow rw' -o /etc/ceph/ceph.client.radosgw.{{ hostvars[item]['ansible_hostname'] }}.keyring
creates=/etc/ceph/ceph.client.radosgw.{{ hostvars[item]['ansible_hostname'] }}.keyring
@ -13,7 +12,7 @@
with_items: groups.rgws
changed_when: False
- name: Create Ceph REST API keyring
- name: create Ceph REST API keyring
command: >
ceph auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/ceph.client.restapi.keyring
creates=/etc/ceph/ceph.client.restapi.keyring
@ -23,13 +22,13 @@
- include: openstack_config.yml
when: openstack_config and cephx
- name: Find Ceph keys
- name: find Ceph keys
shell: ls -1 /etc/ceph/*.keyring
register: ceph_keys
when: cephx
changed_when: False
- name: Set keys permissions
- name: set keys permissions
file: >
path={{ item }}
mode=0600
@ -38,7 +37,7 @@
with_items:
- "{{ ceph_keys.stdout_lines }}"
- name: Copy keys to the ansible server
- name: copy keys to the ansible server
fetch: >
src={{ item }}
dest=fetch/{{ fsid }}/{{ item }}
@ -49,7 +48,7 @@
- /var/lib/ceph/bootstrap-osd/ceph.keyring
- /var/lib/ceph/bootstrap-mds/ceph.keyring
- name: Drop in a motd script to report status when logging in
- name: drop in a motd script to report status when logging in
copy: >
src=precise/92-ceph
dest=/etc/update-motd.d/92-ceph

View File

@ -1,11 +1,11 @@
---
- name: Create filesystem pools
- name: create filesystem pools
command: ceph osd pool create {{ item }} {{ pool_default_pg_num }}
with_items:
- cephfs_data
- cephfs_metadata
changed_when: False
- name: Create Ceph Filesystem
- name: create Ceph Filesystem
command: ceph fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}
changed_when: False

View File

@ -1,17 +1,17 @@
---
- name: Create monitor initial keyring
- name: create monitor initial keyring
command: >
ceph-authtool /var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }} --create-keyring --name=mon. --add-key={{ monitor_secret | mandatory }} --cap mon 'allow *'
creates=/var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }}
- name: Set initial monitor key permissions
- name: set initial monitor key permissions
file: >
path=/var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }}
mode=0600
owner=root
group=root
- name: Create monitor directory
- name: create monitor directory
file: >
path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}
state=directory
@ -19,7 +19,7 @@
group=root
mode=0644
- name: Ceph monitor mkfs
- name: ceph monitor mkfs
command: >
ceph-mon --mkfs -i {{ ansible_hostname }} --fsid {{ fsid }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }}
creates=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/keyring

View File

@ -1,5 +1,5 @@
---
- name: Try to fetch Ceph config and keys
- name: try to fetch Ceph config and keys
copy: >
src=fetch/docker_mon_files/{{ item }}
dest=/etc/ceph/
@ -13,7 +13,7 @@
- /etc/ceph/ceph.mon.keyring
ignore_errors: true
- name: Run the Ceph Monitor docker image
- name: run the Ceph Monitor docker image
docker: >
image="{{ ceph_mon_docker_username }}/{{ ceph_mon_docker_imagename }}"
name=ceph-{{ ansible_hostname }}
@ -22,7 +22,7 @@
env="MON_NAME=ceph-{{ ansible_hostname }},MON_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_mon_docker_interface]['ipv4']['address'] }}"
volumes="/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"
- name: Ensure ceph_mon service is running
- name: ensure ceph_mon service is running
docker: >
image="{{ ceph_mon_docker_username }}/{{ ceph_mon_docker_imagename }}"
name="ceph-{{ ansible_hostname }}"
@ -30,7 +30,7 @@
detach=yes
state=running
- name: Collect Ceph files to the Ansible server
- name: collect Ceph files to the Ansible server
fetch: >
src={{ item }}
dest=fetch/docker_mon_files/{{ item }}

View File

@ -1,5 +1,5 @@
---
- name: Create OpenStack pool
- name: create OpenStack pool
command: rados mkpool {{ item }}
with_items:
- "{{ openstack_glance_pool }}"
@ -9,7 +9,7 @@
ignore_errors: True
changed_when: False
- name: Create OpenStack keys
- name: create OpenStack keys
command: >
ceph auth get-or-create {{ item.name }} {{ item.value }} -o /etc/ceph/ceph.{{ item.name }}.keyring
creates=/etc/ceph/ceph.{{ item.name }}.keyring

View File

@ -1,5 +1,5 @@
---
- name: Activate monitor with upstart
- name: activate monitor with upstart
file: >
path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/{{ item }}
state=touch
@ -12,7 +12,7 @@
when: ansible_distribution == "Ubuntu"
changed_when: False
- name: Activate monitor with sysvinit
- name: activate monitor with sysvinit
file: >
path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/{{ item }}
state=touch
@ -24,7 +24,7 @@
- sysvinit
when: ansible_distribution != "Ubuntu"
- name: Start and add that the monitor service to the init sequence (Ubuntu)
- name: start and add that the monitor service to the init sequence (Ubuntu)
service: >
name=ceph-mon
state=started
@ -32,7 +32,7 @@
args="id={{ ansible_hostname }}"
when: ansible_distribution == "Ubuntu"
- name: Start and add that the monitor service to the init sequence
- name: start and add that the monitor service to the init sequence
service: >
name=ceph
state=started
@ -40,7 +40,7 @@
args=mon
when: ansible_distribution != "Ubuntu"
- name: Get Ceph monitor version
- name: get Ceph monitor version
shell: ceph daemon mon."{{ ansible_hostname }}" version | cut -d '"' -f 4 | cut -f 1,2 -d '.'
register: ceph_version
changed_when: False

View File

@ -1,15 +1,7 @@
---
# Activate means:
# - mount the volume in a temp location
# - allocate an osd id (if needed)
# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
# - start ceph-osd
#
# This task is for disk devices only because of the explicit use of the first
# NOTE (leseb) : this task is for disk devices only because of the explicit use of the first
# partition.
- name: Activate OSD(s) when device is a disk
- name: activate OSD(s) when device is a disk
command: |
ceph-disk activate {{ item.2 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1
with_together:
@ -20,9 +12,8 @@
ignore_errors: True
changed_when: False
# This task is for partitions because we don't explicitly use a partition.
- name: Activate OSD(s) when device is a partition
# NOTE (leseb): this task is for partitions because we don't explicitly use a partition.
- name: activate OSD(s) when device is a partition
command: "ceph-disk activate {{ item.1 }}"
with_together:
- ispartition.results
@ -34,7 +25,7 @@
- include: osd_fragment.yml
when: crush_location
- name: Start and add that the OSD service to the init sequence
- name: start and add that the OSD service to the init sequence
service: >
name=ceph
state=started

View File

@ -4,14 +4,14 @@
# it should exist we rc=0 and don't do anything unless we do something like --force
# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True"
# I believe it's safer
- name: Check if the device is a partition or a disk
- name: check if the device is a partition or a disk
shell: "echo '{{ item }}' | egrep '/dev/(([a-z]{3,4}[0-9]$)|(cciss/c[0-9]{1}d[0-9]{1}p[0-9]$))'"
ignore_errors: true
with_items: devices
register: ispartition
changed_when: False
- name: If partition named 'ceph' exists
- name: if partition named 'ceph' exists
shell: "parted --script {{ item }} print | egrep -sq '^ 1.*ceph'"
ignore_errors: True
with_items: devices

View File

@ -1,5 +1,5 @@
---
- name: Fetch Ceph config and keys
- name: fetch Ceph config and keys
copy: >
src=fetch/docker_mon_files/{{ item }}
dest=/etc/ceph/
@ -12,7 +12,7 @@
- /etc/ceph/monmap
- /etc/ceph/ceph.mon.keyring
- name: Run the Ceph OSD docker image
- name: run the Ceph OSD docker image
docker: >
image="{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}"
name=ceph-{{ ansible_hostname }}
@ -21,7 +21,7 @@
env="OSD_NAME=ceph-{{ ansible_hostname }}"
volumes="/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"
- name: Ensure ceph_osd service is running
- name: ensure ceph_osd service is running
docker: >
image="{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}"
name="ceph-{{ ansible_hostname }}"

View File

@ -4,29 +4,20 @@
- include: zap_devices.yml
- include: check_devices.yml
# Prepare means
# - create GPT partition for a disk, or a loop label for a partition
# - mark the partition with the ceph type uuid
# - create a file system
# - mark the fs as ready for ceph consumption
# - entire data disk is used (one big partition)
# - a new partition is added to the journal disk (so it can be easily shared)
#
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: Automatic prepare OSD disk(s) without partitions
- name: automatic prepare OSD disk(s) without partitions
command: ceph-disk prepare "/dev/{{ item.key }}"
when: ansible_devices is defined and item.value.removable == "0" and item.value.partitions|count == 0 and journal_collocation and osd_auto_discovery
ignore_errors: True
with_dict: ansible_devices
register: prepared_osds
- name: Manually Prepare OSD disk(s)
- name: manually Prepare OSD disk(s)
command: "ceph-disk prepare {{ item.2 }}"
when: (item.0.rc != 0 or item.1.rc != 0) and journal_collocation and not osd_auto_discovery
ignore_errors: True

View File

@ -4,8 +4,7 @@
# NOTE (leseb): we do not check the filesystem underneath the directory
# so it is really up to you to configure this properly.
# Declaring more than one directory on the same filesystem will confuse Ceph.
- name: Create OSD directories
- name: create OSD directories
file: >
path={{ item }}
state=directory
@ -13,38 +12,21 @@
group=root
with_items: osd_directories
# Prepare means
# - create GPT partition
# - mark the partition with the ceph type uuid
# - create a file system
# - mark the fs as ready for ceph consumption
# - entire data disk is used (one big partition)
# - a new partition is added to the journal disk (so it can be easily shared)
#
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
- name: Prepare OSD disk(s)
- name: prepare OSD disk(s)
command: "ceph-disk prepare {{ item }}"
when: osd_directory
with_items: osd_directories
changed_when: False
# Activate means:
# - mount the volume in a temp location
# - allocate an osd id (if needed)
# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
# - start ceph-osd
#
- name: Activate OSD(s)
- name: activate OSD(s)
command: "ceph-disk activate {{ item }}"
with_items: osd_directories
changed_when: False
- name: Start and add that the OSD service to the init sequence
- name: start and add that the OSD service to the init sequence
service: >
name=ceph
state=started

View File

@ -1,19 +1,19 @@
---
- name: Get OSD path
- name: get OSD path
shell: "df | grep {{ item }} | awk '{print $6}'"
with_items: devices
register: osd_path
ignore_errors: true
changed_when: False
- name: Get OSD id
- name: get OSD id
command: cat {{ item.stdout }}/whoami
register: osd_id
with_items: osd_path.results
ignore_errors: true
changed_when: False
- name: Create a Ceph fragment and assemble directory
- name: create a Ceph fragment and assemble directory
file: >
path={{ item }}
state=directory
@ -24,17 +24,17 @@
- /etc/ceph/ceph.d/
- /etc/ceph/ceph.d/osd_fragments
- name: Create the OSD fragment
- name: create the OSD fragment
template: >
src=osd.conf.j2
dest=/etc/ceph/ceph.d/osd_fragments/osd.{{ item.stdout }}.conf
with_items: osd_id.results
- name: Copy ceph.conf for assembling
- name: copy ceph.conf for assembling
command: cp /etc/ceph/ceph.conf /etc/ceph/ceph.d/
changed_when: False
- name: Assemble OSD sections
- name: assemble OSD sections
assemble: >
src=/etc/ceph/ceph.d/osd_fragments/
dest=/etc/ceph/ceph.d/osd.conf
@ -42,7 +42,7 @@
group=root
mode=0644
- name: Assemble Ceph conf and OSD fragments
- name: assemble Ceph conf and OSD fragments
assemble: >
src=/etc/ceph/ceph.d/
dest=/etc/ceph/ceph.conf

View File

@ -1,19 +1,17 @@
---
## Deploy Ceph Oject Storage Daemon(s)
- name: Install dependencies
- name: install dependencies
apt: >
pkg=parted
state=present
when: ansible_os_family == 'Debian'
- name: Install dependencies
- name: install dependencies
yum: >
name=parted
state=present
when: ansible_os_family == 'RedHat'
- name: Copy OSD bootstrap key
- name: copy OSD bootstrap key
copy: >
src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring
dest=/var/lib/ceph/bootstrap-osd/ceph.keyring

View File

@ -4,23 +4,13 @@
- include: zap_devices.yml
- include: check_devices.yml
# Prepare means
# - create GPT partition for a disk, or a loop label for a partition
# - mark the partition with the ceph type uuid
# - create a file system
# - mark the fs as ready for ceph consumption
# - entire data disk is used (one big partition)
# - a new partition is added to the journal disk (so it can be easily shared)
#
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: Prepare OSD disk(s)
- name: prepare OSD disk(s)
command: "ceph-disk prepare {{ item.2 }} {{ item.3 }}"
when: (item.0.rc != 0 or item.1.rc != 0) and raw_multi_journal
ignore_errors: True

View File

@ -2,13 +2,13 @@
# NOTE (leseb): some devices might miss partition label which which will result
# in ceph-disk failing to prepare OSD. Thus zapping them prior to prepare the OSD
# ensures that the device will get successfully prepared.
- name: Erasing partitions and labels from OSD disk(s)
- name: erasing partitions and labels from OSD disk(s)
command: ceph-disk zap {{ item }}
when: zap_devices and (journal_collocation or raw_multi_journal)
with_items: devices
changed_when: False
- name: Erasing partitions and labels from the journal device(s)
- name: erasing partitions and labels from the journal device(s)
command: ceph-disk zap {{ item }}
when: zap_devices and raw_multi_journal
with_items: raw_journal_devices

View File

@ -1,5 +1,5 @@
---
- name: Fetch Ceph config and keys
- name: fetch Ceph config and keys
copy: >
src=fetch/docker_mon_files/{{ item }}
dest=/etc/ceph/
@ -12,7 +12,7 @@
- /etc/ceph/monmap
- /etc/ceph/ceph.mon.keyring
- name: Run the Ceph Monitor docker image
- name: run the Ceph Monitor docker image
docker: >
image="{{ ceph_rgw_docker_username }}/{{ ceph_rgw_docker_imagename }}"
name=ceph-rgw-{{ ansible_hostname }}
@ -21,7 +21,7 @@
env="RGW_NAME=ceph-rgw-{{ ansible_hostname }}, RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}"
volumes="/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"
- name: Ensure ceph_rgw service is running
- name: ensure ceph_rgw service is running
docker: >
image="{{ ceph_rgw_docker_username }}/{{ ceph_rgw_docker_imagename }}"
name="ceph-{{ ansible_hostname }}"

View File

@ -1,15 +1,12 @@
---
## Deploy RADOS Gateway
#
- name: Add Ceph extra
- name: add Ceph extra
apt_repository: >
repo="deb http://ceph.com/packages/ceph-extras/debian {{ ansible_lsb.codename }} main"
state=present
when: ansible_lsb.codename in ['natty', 'oneiric', 'precise', 'quantal', 'raring', 'sid', 'squeeze', 'wheezy']
# Needed for Ubuntu 12.04 to have access to libapache2-mod-fastcgi if 100-continue isn't being used
- name: Enable multiverse repo for Precise
# NOTE (leseb): needed for Ubuntu 12.04 to have access to libapache2-mod-fastcgi if 100-continue isn't being used
- name: enable multiverse repo for Precise
apt_repository: >
repo="{{ item }}"
state=present
@ -19,8 +16,8 @@
- deb http://security.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-security multiverse
when: ansible_lsb.codename in ['precise'] and not http_100_continue
# Disable the repo when we are using the Ceph repo for 100-continue packages
- name: Disable multiverse repo for Precise
# NOTE (leseb): disable the repo when we are using the Ceph repo for 100-continue packages
- name: disable multiverse repo for Precise
apt_repository: >
repo="{{ item }}"
state=absent
@ -30,27 +27,27 @@
- deb http://security.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-security multiverse
when: ansible_lsb.codename in ['precise'] and http_100_continue
# Needed for Ubuntu 14.04 to have access to libapache2-mod-fastcgi if 100-continue isn't being used
- name: Enable multiverse repo for Trusty
# NOTE (leseb): needed for Ubuntu 14.04 to have access to libapache2-mod-fastcgi if 100-continue isn't being used
- name: enable multiverse repo for Trusty
command: "apt-add-repository multiverse"
when: ansible_lsb.codename in ['trusty'] and not http_100_continue
changed_when: False
# Disable the repo when we are using the Ceph repo for 100-continue packages
- name: Disable multiverse repo for Trusty
# NOTE (leseb): disable the repo when we are using the Ceph repo for 100-continue packages
- name: disable multiverse repo for Trusty
command: "apt-add-repository -r multiverse"
when: ansible_lsb.codename in ['trusty'] and http_100_continue
changed_when: False
# If using 100-continue, add Ceph dev key
- name: Install the Ceph development repository key
# NOTE (leseb): if using 100-continue, add Ceph dev key
- name: install the Ceph development repository key
apt_key: >
data="{{ lookup('file', 'cephdev.asc') }}"
state=present
when: http_100_continue
# If using 100-continue, add Ceph sources and update
- name: Add Ceph Apache and FastCGI sources
# NOTE (leseb): if using 100-continue, add Ceph sources and update
- name: add Ceph Apache and FastCGI sources
apt_repository: >
repo="{{ item }}"
state=present
@ -60,8 +57,8 @@
register: purge_default_apache
when: http_100_continue
# Else remove them to ensure you use the default packages
- name: Remove Ceph Apache and FastCGI sources
# NOTE (leseb): else remove them to ensure you use the default packages
- name: remove Ceph Apache and FastCGI sources
apt_repository: >
repo="{{ item }}"
state=absent
@ -71,8 +68,8 @@
register: purge_ceph_apache
when: not http_100_continue
# Purge Ceph Apache and FastCGI packages if needed
- name: "Purge Ceph Apache and FastCGI packages"
# NOTE (leseb): purge Ceph Apache and FastCGI packages if needed
- name: purge Ceph Apache and FastCGI packages
apt: >
pkg="{{ item }}"
state=absent
@ -88,7 +85,7 @@
- libapache2-mod-fastcgi
when: purge_default_apache.changed or purge_ceph_apache.changed
- name: "Install Apache and fastcgi"
- name: install Apache and fastcgi
apt: >
pkg={{ item }}
state=present
@ -97,34 +94,28 @@
- apache2
- libapache2-mod-fastcgi
## Prepare Apache
#
- name: Install default httpd.conf
- name: install default httpd.conf
template: >
src=httpd.conf
dest=/etc/apache2/httpd.conf
owner=root
group=root
- name: Enable some apache mod rewrite and fastcgi
- name: enable some apache mod rewrite and fastcgi
command: "{{ item }}"
with_items:
- a2enmod rewrite
- a2enmod fastcgi
changed_when: False
- name: Install Rados Gateway vhost
- name: install Rados Gateway vhost
template: >
src=rgw.conf
dest=/etc/apache2/sites-available/rgw.conf
owner=root
group=root
## Prepare RGW
#
- name: Enable Rados Gateway vhost and disable default site
- name: enable Rados Gateway vhost and disable default site
command: "{{ item }}"
with_items:
- a2ensite rgw.conf
@ -134,7 +125,7 @@
- restart apache2
changed_when: False
- name: Install s3gw.fcgi script
- name: install s3gw.fcgi script
template: >
src=s3gw.fcgi.j2
dest=/var/www/s3gw.fcgi

View File

@ -1,22 +1,19 @@
---
## Deploy RADOS Gateway
#
- name: Add Ceph extra
- name: add Ceph extra
template: >
src=ceph-extra.repo
dest=/etc/yum.repos.d
owner=root
group=root
- name: Add special fastcgi repository key
- name: add special fastcgi repository key
rpm_key: key=http://dag.wieers.com/rpm/packages/RPM-GPG-KEY.dag.txt
- name: Add special fastcgi repository
- name: add special fastcgi repository
command: rpm -ivh http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm
changed_when: False
- name: "Install Apache and fastcgi"
- name: install Apache and fastcgi
yum: >
name={{ item }}
state=present
@ -25,20 +22,14 @@
- mod_fastcgi
- mod_fcgid
## Prepare Apache
#
- name: Install Rados Gateway vhost
- name: install Rados Gateway vhost
template: >
src=rgw.conf
dest=/etc/httpd/conf.d/rgw.conf
owner=root
group=root
## Prepare RGW
#
- name: Install s3gw.fcgi script
- name: install s3gw.fcgi script
template: >
src=s3gw.fcgi.j2
dest=/var/www/s3gw.fcgi
@ -46,7 +37,7 @@
owner=root
group=root
- name: Disable default site
- name: disable default site
shell: sed -i "s/^[^+#]/#/g" /etc/httpd/conf.d/welcome.conf
changed_when: False
notify:

View File

@ -1,5 +1,4 @@
---
- include: pre_requisite.yml
when: not ceph_containerized_deployment
@ -9,14 +8,14 @@
- include: install_debian.yml
when: ansible_os_family == 'Debian' and radosgw_frontend == 'apache' and not ceph_containerized_deployment
- name: "Install Rados Gateway"
- name: install Rados Gateway
apt: >
pkg=radosgw
state=present
update_cache=yes
when: ansible_os_family == 'Debian' and not ceph_containerized_deployment
- name: "Install Rados Gateway"
- name: install Rados Gateway
yum: >
name=ceph-radosgw
state=present

View File

@ -1,5 +1,5 @@
---
- name: Create RGW directory
- name: create RGW directory
file: >
path=/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_hostname }}
state=directory
@ -7,7 +7,7 @@
group=root
mode=0644
- name: Copy RGW bootstrap key
- name: copy RGW bootstrap key
copy: >
src=fetch/{{ fsid }}/etc/ceph/ceph.client.radosgw.{{ ansible_hostname }}.keyring
dest=/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_hostname }}/keyring
@ -16,7 +16,7 @@
mode=600
when: cephx
- name: Activate RGW with upstart
- name: activate RGW with upstart
file: >
path=/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_hostname }}/{{ item }}
state=touch

View File

@ -1,14 +1,14 @@
---
## If we don't perform this check Ansible will start multiple instance of radosgw
- name: Check if RGW is started
# NOTE (leseb): if we don't perform this check Ansible will start multiple instance of radosgw
- name: check if RGW is started
command: /etc/init.d/radosgw status
register: rgwstatus
ignore_errors: True
- name: Start RGW
- name: start RGW
service: name=radosgw-all state=started
when: ansible_distribution == "Ubuntu"
- name: Start RGW
- name: start RGW
command: /etc/init.d/radosgw start
when: rgwstatus.rc != 0 and ansible_distribution != "Ubuntu"

View File

@ -1,12 +1,12 @@
---
- include: pre_requisite.yml
- name: Check if Ceph REST API is already started
- name: check if Ceph REST API is already started
shell: "ps aux|grep [c]eph-rest-api"
register: restapi_status
ignore_errors: True
- name: Start Ceph REST API
- name: start Ceph REST API
shell: "nohup ceph-rest-api &"
when: restapi_status.rc != 0
changed_when: False

View File

@ -1,5 +1,5 @@
---
- name: Create Ceph REST API directory
- name: create Ceph REST API directory
file: >
path=/var/lib/ceph/restapi/ceph-restapi
state=directory
@ -7,7 +7,7 @@
group=root
mode=0644
- name: Copy Ceph REST API keyring
- name: copy Ceph REST API keyring
copy: >
src=fetch/{{ fsid }}/etc/ceph/ceph.client.restapi.keyring
dest=/var/lib/ceph/restapi/ceph-restapi/keyring
@ -16,7 +16,7 @@
mode=600
when: cephx
- name: Activate Ceph REST API with upstart
- name: activate Ceph REST API with upstart
file: >
path=/var/lib/ceph/restapi/{{ item }}
state=touch
@ -29,7 +29,7 @@
when: ansible_distribution == "Ubuntu"
changed_when: False
- name: Activate Ceph REST API with sysvinit
- name: activate Ceph REST API with sysvinit
file: >
path=/var/lib/ceph/restapi/{{ item }}
state=touch
@ -42,7 +42,7 @@
when: ansible_distribution != "Ubuntu"
# NOTE (leseb): will uncomment this when this https://github.com/ceph/ceph/pull/4144 lands
#- name: Start and add that the Ceph REST API service to the init sequence (Ubuntu)
#- name: start and add that the Ceph REST API service to the init sequence (Ubuntu)
# service: >
# name=ceph-restapi
# state=started
@ -50,7 +50,7 @@
# args="id={{ ansible_hostname }}"
# when: ansible_distribution == "Ubuntu"
#
#- name: Start and add that the Ceph REST API service to the init sequence
#- name: start and add that the Ceph REST API service to the init sequence
# service: >
# name=ceph
# state=started