Merge branch 'master' of https://github.com/jjoos/ceph-ansible into jjoos-master

Signed-off-by: Sébastien Han <sebastien.han@enovance.com>

Conflicts:
	roles/ceph-common/defaults/main.yml
	roles/ceph-common/tasks/Debian.yml
	roles/ceph-osd/tasks/journal_collocation.yml
	roles/ceph-osd/tasks/osd_directory.yml
	roles/ceph-osd/tasks/raw_journal.yml
	roles/ceph-osd/tasks/raw_multi_journal.yml
pull/133/merge
Sébastien Han 2014-11-05 17:57:28 +01:00
commit 74ede6e166
35 changed files with 617 additions and 614 deletions

View File

@ -1,89 +0,0 @@
---
# Variables here are applicable to the current role
## Setup options
#
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
#fsid:
## Packages branch
ceph_key: https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
ceph_stable: true # use ceph stable branch
ceph_stable_release: firefly # ceph stable release
# This option is needed for _both_ stable and dev version, so please always fill the right version
# # supported distros are el6, rhel6, f18, f19, opensuse12.2, sles11, centos7 (see http://ceph.com/rpm-firefly/)
ceph_stable_redhat_distro: el7
ceph_dev: false # use ceph developement branch
ceph_dev_branch: master # developement branch you would like to use e.g: master, wip-hack
# supported distros are centos6, centos7, fc17, fc18, fc19, fc20, fedora17, fedora18,
# fedora19, fedora20, opensuse12, sles0. (see http://gitbuilder.ceph.com/).
# For rhel, please pay attention to the versions: 'rhel6 3' or 'rhel 4', the fullname is _very_ important.
ceph_dev_redhat_distro: centos7
## Ceph options
#
cephx: true
cephx_require_signatures: true
cephx_cluster_require_signatures: true
cephx_service_require_signatures: false
disable_in_memory_logs: true
## Monitor options
#
monitor_interface: eth1
mon_osd_down_out_interval: 600
mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
mon_clock_drift_allowed: .15
mon_clock_drift_warn_backoff: 30
mon_osd_full_ratio: .95
mon_osd_nearfull_ratio: .85
mon_osd_report_timeout: 300
## OSD options
#
journal_size: 100
pool_default_pg_num: 128
pool_default_pgp_num: 128
pool_default_size: 2
pool_default_min_size: 1
cluster_network: 192.168.42.0/24
public_network: 192.168.42.0/24
osd_mkfs_type: xfs
osd_mkfs_options_xfs: -f -i size=2048
osd_mount_options_xfs: noatime
osd_mon_heartbeat_interval: 30
# CRUSH
pool_default_crush_rule: 0
osd_crush_update_on_start: "true"
# Object backend
osd_objectstore: filestore
# Performance tuning
filestore_merge_threshold: 40
filestore_split_multiple: 8
osd_op_threads: 8
filestore_op_threads: 8
filestore_max_sync_interval: 5
osd_max_scrubs: 1
# Recovery tuning
osd_recovery_max_active: 5
osd_max_backfills: 2
osd_recovery_op_priority: 2
osd_recovery_max_chunk: 8388608
osd_recovery_threads: 1
## MDS options
#
mds: true # disable mds configuration in ceph.conf
# Rados Gateway options
#
radosgw: true # referenced in monitor role too.
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
## Testing mode
# enable this mode _only_ when you have a single node
# if you don't want it keep the option commented
#common_single_host_mode: true

View File

@ -1,12 +1,11 @@
--- ---
- name: update apt cache
apt: update-cache=yes
- name: "update apt cache" - name: restart ceph
action: apt update-cache=yes
- name: "restart ceph ubuntu"
shell: service ceph restart ; service ceph-osd-all restart
when: socket.rc == 0
- name: "restart ceph debian redhat"
command: service ceph restart command: service ceph restart
when: socket.rc == 0 when: socket.rc == 0
- name: restart ceph-osd-all on ubuntu
shell: service ceph restart ; service ceph-osd-all restart
when: socket.rc == 0 and ansible_distribution == 'Ubuntu'

View File

@ -1,67 +0,0 @@
---
## Common to all the Ceph Debian nodes
#
- name: Fail on unsupported system
fail: msg="System not supported {{ ansible_system }}"
when: ansible_system not in ['Linux']
- name: Fail on unsupported architecture
fail: msg="Architecture not supported {{ ansible_architecture }}"
when: ansible_architecture not in ['x86_64']
- name: Fail on unsupported distribution
fail: msg="Distribution not supported {{ ansible_os_family }}"
when: ansible_os_family not in ['Debian', 'RedHat']
- name: Install dependencies
apt: pkg={{ item }} state=present update_cache=yes cache_valid_time=3600 # we update the cache just in case...
with_items:
- python-pycurl
- ntp
- hdparm
- name: Install the Ceph repository stable key
apt_key: data="{{ lookup('file', 'cephstable.asc') }}" state=present
when: ceph_stable
- name: Install the Ceph developement repository key
apt_key: data="{{ lookup('file', 'cephdev.asc') }}" state=present
when: ceph_dev
- name: Add Ceph stable repository
apt_repository: repo='deb http://ceph.com/debian-{{ ceph_stable_release }}/ {{ ansible_lsb.codename }} main' state=present
when: ceph_stable
- name: Add Ceph development repository
apt_repository: repo='deb http://gitbuilder.ceph.com/ceph-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/{{ ceph_dev_branch }} {{ ansible_lsb.codename }} main' state=present
when: ceph_dev
- name: Install Ceph
apt: pkg={{ item }} state=latest
with_items:
- ceph
- ceph-common #|
- ceph-fs-common #|--> yes, they are already all dependencies from 'ceph'
- ceph-fuse #|--> however while proceding to rolling upgrades and the 'ceph' package upgrade
- ceph-mds #|--> they don't get update so we need to force them
- libcephfs1 #|
- name: Check for a Ceph socket
shell: stat /var/run/ceph/*.asok > /dev/null 2>&1
ignore_errors: true
register: socket
- name: Generate Ceph configuration file
template: src=ceph.conf.j2 dest=/etc/ceph/ceph.conf owner=root group=root mode=0644
notify: restart ceph ubuntu
when: ansible_distribution == 'Ubuntu'
- name: Generate Ceph configuration file
template: src=ceph.conf.j2 dest=/etc/ceph/ceph.conf owner=root group=root mode=0644
notify: restart ceph debian redhat
when: ansible_distribution == 'Debian' or ansible_os_family == 'RedHat'
- name: Disable OSD directory parsing by updatedb
command: updatedb -e /var/lib/ceph
ignore_errors: true

View File

@ -1,54 +0,0 @@
---
## Common to all the Ceph RedHat nodes
#
- name: Fail on unsupported system
fail: msg="System not supported {{ ansible_system }}"
when: ansible_system not in ['Linux']
- name: Fail on unsupported architecture
fail: msg="Architecture not supported {{ ansible_architecture }}"
when: ansible_architecture not in ['x86_64']
- name: Fail on unsupported distribution
fail: msg="Distribution not supported {{ ansible_os_family }}"
when: ansible_os_family not in ['Debian', 'RedHat']
- name: Install dependencies
yum: name={{ item }} state=present
with_items:
- python-pycurl
- ntp
- hdparm
- name: Install the Ceph stable repository key
rpm_key: key={{ ceph_key }} state=present
when: ceph_stable
- name: Install the Ceph developement repository key
rpm_key: key={{ ceph_key }} state=present
when: ceph_dev
- name: Add Ceph stable repository
command: rpm -U http://ceph.com/rpm-{{ ceph_stable_release }}/{{ redhat_distro }}/noarch/ceph-release-1-0.el6.noarch.rpm creates=/etc/yum.repos.d/ceph.repo
when: ceph_stable
- name: Add Ceph development repository
command: rpm -U http://gitbuilder.ceph.com/ceph-rpm-{{ ceph_dev_redhat_distro }}-x86_64-basic/ref/{{ ceph_dev_branch }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro }}.noarch.rpm creates=/etc/yum.repos.d/ceph.repo
when: ceph_dev
- name: Install Ceph
yum: name=ceph state=latest
- name: Check for a Ceph socket
shell: stat /var/run/ceph/*.asok > /dev/null 2>&1
ignore_errors: true
register: socket
- name: Generate Ceph configuration file
template: src=ceph.conf.j2 dest=/etc/ceph/ceph.conf owner=root group=root mode=0644
notify: restart ceph debian redhat
- name: Disable OSD directory parsing by updatedb
command: updatedb -e /var/lib/ceph
ignore_errors: true

View File

@ -0,0 +1,47 @@
---
- name: Install dependencies
apt: >
pkg={{ item }}
state=present
update_cache=yes
cache_valid_time=3600
with_items:
- python-pycurl
- ntp
- hdparm
- name: Install the Ceph repository stable key
apt_key: >
data="{{ lookup('file', 'cephstable.asc') }}"
state=present
when: ceph_stable
- name: Install the Ceph developement repository key
apt_key: >
data="{{ lookup('file', 'cephdev.asc') }}"
state=present
when: ceph_dev
- name: Add Ceph stable repository
apt_repository: >
repo="deb http://ceph.com/debian-{{ ceph_stable_release }}/ {{ ansible_lsb.codename }} main"
state=present
when: ceph_stable
- name: Add Ceph development repository
apt_repository: >
repo="deb http://gitbuilder.ceph.com/ceph-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/{{ ceph_dev_branch }} {{ ansible_lsb.codename }} main"
state=present
when: ceph_dev
- name: Install Ceph
apt: >
pkg={{ item }}
state=latest
with_items:
- ceph
- ceph-common #|
- ceph-fs-common #|--> yes, they are already all dependencies from 'ceph'
- ceph-fuse #|--> however while proceding to rolling upgrades and the 'ceph' package upgrade
- ceph-mds #|--> they don't get update so we need to force them
- libcephfs1 #|

View File

@ -0,0 +1,35 @@
---
- name: Install dependencies
yum: >
name={{ item }}
state=present
with_items:
- python-pycurl
- ntp
- hdparm
- name: Install the Ceph stable repository key
rpm_key: >
key={{ ceph_key }}
state=present
when: ceph_stable
- name: Install the Ceph developement repository key
rpm_key: >
key={{ ceph_key }}
state=present
when: ceph_dev
- name: Add Ceph stable repository
command: "rpm -U http://ceph.com/rpm-{{ ceph_stable_release }}/{{ redhat_distro }}/noarch/ceph-release-1-0.el6.noarch.rpm creates=/etc/yum.repos.d/ceph.repo"
when: ceph_stable
- name: Add Ceph development repository
command: "rpm -U http://gitbuilder.ceph.com/ceph-rpm-{{ ceph_dev_redhat_distro }}-x86_64-basic/ref/{{ ceph_dev_branch }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro }}.noarch.rpm creates=/etc/yum.repos.d/ceph.repo"
when: ceph_dev
- name: Install Ceph
yum: >
name=ceph
state=latest

View File

@ -1,8 +1,38 @@
--- ---
## Check OS family - name: Fail on unsupported system
# fail: "msg=System not supported {{ ansible_system }}"
when: "ansible_system not in ['Linux']"
- include: RedHat.yml - name: Fail on unsupported architecture
fail: "msg=Architecture not supported {{ ansible_architecture }}"
when: "ansible_architecture not in ['x86_64']"
- name: Fail on unsupported distribution
fail: "msg=Distribution not supported {{ ansible_os_family }}"
when: "ansible_os_family not in ['Debian', 'RedHat']"
- include: install_on_redhat.yml
when: ansible_os_family == 'RedHat' when: ansible_os_family == 'RedHat'
- include: Debian.yml
- include: install_on_debian.yml
when: ansible_os_family == 'Debian' when: ansible_os_family == 'Debian'
- name: Check for a Ceph socket
shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1"
ignore_errors: true
register: socket
- name: Generate Ceph configuration file
template: >
src=ceph.conf.j2
dest=/etc/ceph/ceph.conf
owner=root
group=root
mode=0644
notify:
- restart ceph
- restart ceph-osd-all on ubuntu
- name: Disable OSD directory parsing by updatedb
command: updatedb -e /var/lib/ceph
ignore_errors: true

View File

@ -1,3 +1,89 @@
--- ---
# You can override default vars defined in defaults/main.yml here, # You can override vars by using host or group vars
# but I would advice to use host or group vars instead
## Setup options
#
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
#fsid:
## Packages branch
ceph_key: https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
ceph_stable: true # use ceph stable branch
ceph_stable_release: firefly # ceph stable release
# This option is needed for _both_ stable and dev version, so please always fill the right version
# # supported distros are el6, rhel6, f18, f19, opensuse12.2, sles11, centos7 (see http://ceph.com/rpm-firefly/)
ceph_stable_redhat_distro: el7
ceph_dev: false # use ceph developement branch
ceph_dev_branch: master # developement branch you would like to use e.g: master, wip-hack
# supported distros are centos6, centos7, fc17, fc18, fc19, fc20, fedora17, fedora18,
# fedora19, fedora20, opensuse12, sles0. (see http://gitbuilder.ceph.com/).
# For rhel, please pay attention to the versions: 'rhel6 3' or 'rhel 4', the fullname is _very_ important.
ceph_dev_redhat_distro: centos7
## Ceph options
#
cephx: true
cephx_require_signatures: true
cephx_cluster_require_signatures: true
cephx_service_require_signatures: false
disable_in_memory_logs: true
## Monitor options
#
monitor_interface: eth1
mon_osd_down_out_interval: 600
mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
mon_clock_drift_allowed: .15
mon_clock_drift_warn_backoff: 30
mon_osd_full_ratio: .95
mon_osd_nearfull_ratio: .85
mon_osd_report_timeout: 300
## OSD options
#
journal_size: 100
pool_default_pg_num: 128
pool_default_pgp_num: 128
pool_default_size: 2
pool_default_min_size: 1
cluster_network: 192.168.42.0/24
public_network: 192.168.42.0/24
osd_mkfs_type: xfs
osd_mkfs_options_xfs: -f -i size=2048
osd_mount_options_xfs: noatime
osd_mon_heartbeat_interval: 30
# CRUSH
pool_default_crush_rule: 0
osd_crush_update_on_start: "true"
# Object backend
osd_objectstore: filestore
# Performance tuning
filestore_merge_threshold: 40
filestore_split_multiple: 8
osd_op_threads: 8
filestore_op_threads: 8
filestore_max_sync_interval: 5
osd_max_scrubs: 1
# Recovery tuning
osd_recovery_max_active: 5
osd_max_backfills: 2
osd_recovery_op_priority: 2
osd_recovery_max_chunk: 1048576
osd_recovery_threads: 1
## MDS options
#
mds: true # disable mds configuration in ceph.conf
# Rados Gateway options
#
radosgw: true # referenced in monitor role too.
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
## Testing mode
# enable this mode _only_ when you have a single node
# if you don't want it keep the option commented
#common_single_host_mode: true

View File

@ -1,7 +0,0 @@
---
# Variables here are applicable to the current role
## Ceph options
#
cephx: true

View File

@ -1,13 +1,22 @@
--- ---
## Deploy Ceph metadata server(s) ## Deploy Ceph metadata server(s)
#
- name: Copy MDS bootstrap key - name: Copy MDS bootstrap key
copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-mds/ceph.keyring dest=/var/lib/ceph/bootstrap-mds/ceph.keyring owner=root group=root mode=600 copy: >
src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-mds/ceph.keyring
dest=/var/lib/ceph/bootstrap-mds/ceph.keyring
owner=root
group=root
mode=600
when: cephx when: cephx
- name: Create MDS directory - name: Create MDS directory
action: file path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }} state=directory owner=root group=root mode=0644 file: >
path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}
state=directory
owner=root
group=root
mode=0644
when: cephx when: cephx
- name: Create MDS keyring - name: Create MDS keyring
@ -16,8 +25,16 @@
changed_when: False changed_when: False
- name: Set MDS key permissions - name: Set MDS key permissions
file: path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring mode=0600 owner=root group=root file: >
path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring
mode=0600
owner=root
group=root
when: cephx when: cephx
- name: Start and add that the MDS service to the init sequence - name: Start and add that the MDS service to the init sequence
service: name=ceph state=started enabled=yes args=mds service: >
name=ceph
state=started
enabled=yes
args=mds

View File

@ -1,3 +1,4 @@
--- ---
# You can override default vars defined in defaults/main.yml here, # You can override vars by using host or group vars
# but I would advice to use host or group vars instead
cephx: true

View File

@ -1,14 +0,0 @@
---
# Variables here are applicable to the current role
## Ceph options
#
# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
# fsid:
# monitor_secret:
cephx: true
# Rados Gateway options
# referenced in common role too.
radosgw: true

View File

@ -0,0 +1,28 @@
---
- name: Create monitor initial keyring
command: "ceph-authtool /var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }} --create-keyring --name=mon. --add-key={{ monitor_secret }} --cap mon 'allow *' creates=/var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }}"
- name: Set initial monitor key permissions
file: >
path=/var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }}
mode=0600
owner=root
group=root
- name: Create monitor directory
file: >
path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}
state=directory
owner=root
group=root
mode=0644
- name: Ceph monitor mkfs
command: "ceph-mon --mkfs -i {{ ansible_hostname }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }} creates=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/keyring"
- name: Start and add that the monitor service to the init sequence
service: >
name=ceph
state=started
enabled=yes
args=mon

View File

@ -1,26 +1,8 @@
--- ---
## Deploy Ceph monitor(s) - include: deploy_monitors.yml
#
- name: Create monitor initial keyring
command: ceph-authtool /var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }} --create-keyring --name=mon. --add-key={{ monitor_secret }} --cap mon 'allow *' creates=/var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }}
- name: Set initial monitor key permissions
file: path=/var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }} mode=0600 owner=root group=root
- name: Create monitor directory
file: path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }} state=directory owner=root group=root mode=0644
- name: Ceph monitor mkfs
command: ceph-mon --mkfs -i {{ ansible_hostname }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }} creates=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/keyring
- name: Start and add that the monitor service to the init sequence
service: name=ceph state=started enabled=yes args=mon
# Wait for mon discovery and quorum resolution # Wait for mon discovery and quorum resolution
# the admin key is not instantanely created so we have to wait a bit # the admin key is not instantanely created so we have to wait a bit
#
- name: If client.admin key exists - name: If client.admin key exists
command: stat /etc/ceph/ceph.client.admin.keyring command: stat /etc/ceph/ceph.client.admin.keyring
register: result register: result
@ -33,7 +15,10 @@
changed_when: False changed_when: False
- name: Copy keys to the ansible server - name: Copy keys to the ansible server
fetch: src={{ item }} dest=fetch/{{ fsid }}/{{ item }} flat=yes fetch: >
src={{ item }}
dest=fetch/{{ fsid }}/{{ item }}
flat=yes
when: cephx when: cephx
with_items: with_items:
- /etc/ceph/ceph.client.admin.keyring # just in case another application needs it - /etc/ceph/ceph.client.admin.keyring # just in case another application needs it
@ -42,5 +27,10 @@
- /etc/ceph/keyring.radosgw.gateway - /etc/ceph/keyring.radosgw.gateway
- name: Drop in a motd script to report status when logging in - name: Drop in a motd script to report status when logging in
copy: src=precise/92-ceph dest=/etc/update-motd.d/92-ceph owner=root group=root mode=0755 copy: >
src=precise/92-ceph
dest=/etc/update-motd.d/92-ceph
owner=root
group=root
mode=0755
when: ansible_distribution_release == 'precise' when: ansible_distribution_release == 'precise'

View File

@ -1,3 +1,11 @@
--- ---
# You can override default vars defined in defaults/main.yml here, # You can override vars by using host or group vars
# but I would advice to use host or group vars instead
# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
# fsid:
# monitor_secret:
cephx: true
# Rados Gateway options
# referenced in common role too.
radosgw: true

View File

@ -1,65 +0,0 @@
---
# Variables here are applicable to the current role
#
## Ceph options
#
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
# fsid:
cephx: true
# Devices to be used as OSDs
# You can pre-provision disks that are not present yet.
# Ansible will just skip them. Newly added disk will be
# automatically configured during the next run.
#
# !! WARNING !!
#
# /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
#
# !! WARNING !!
# Declare devices
# All the scenarii inherit from the following device declaration
#
devices: [ '/dev/sdb', '/dev/sdc', '/dev/sdd', '/dev/sde']
# I. First scenario: journal and osd_data on the same device
# Use 'true' to enable this scenario
# This will collocate both journal and data on the same disk
# creating a partition at the beginning of the device
#
journal_collocation: true
# II. Second scenario: single journal device for N OSDs
# Use 'true' to enable this scenario
#
raw_journal: false
raw_journal_device: /dev/sdb
# III. Third scenario: N journal devices for N OSDs
# Use 'true' to enable this scenario
#
# In the following example:
# * sdd and sde will get sdb as a journal
# * sdf and sdg will get sdc as a journal
# While starting you have 2 options:
# 1. Pre-allocate all the devices
# 2. Progressively add new devices
raw_multi_journal: false
raw_journal_devices: [ '/dev/sdb', '/dev/sdb', '/dev/sdc', '/dev/sdc' ]
# IV. Fourth scenario: use directory instead of disk for OSDs
# Use 'true' to enable this scenario
osd_directory: false
osd_directories: [ '/var/lib/ceph/osd/mydir1', '/var/lib/ceph/osd/mydir2', '/var/lib/ceph/osd/mydir3', '/var/lib/ceph/osd/mydir4']

View File

@ -0,0 +1,38 @@
---
# Activate means:
# - mount the volume in a temp location
# - allocate an osd id (if needed)
# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
# - start ceph-osd
#
# This task is for disk devices only because of the explicit use of the first
# partition.
- name: Activate OSD(s) when device is a disk
command: |
ceph-disk activate {{ item.2 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1
with_together:
- parted.results
- ispartition.results
- devices
when: item.0.rc == 0 and item.1.rc != 0
ignore_errors: True
changed_when: False
# This task is for partitions because we don't explicitly use a partition.
- name: Activate OSD(s) when device is a partition
command: "ceph-disk activate {{ item.1 }}"
with_together:
- ispartition.results
- devices
when: item.0.rc == 0
ignore_errors: True
changed_when: False
- name: Start and add that the OSD service to the init sequence
service: >
name=ceph
state=started
enabled=yes

View File

@ -0,0 +1,19 @@
---
# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1"
# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition
# it should exist we rc=0 and don't do anything unless we do something like --force
# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True"
# I believe it's safer
- name: Check if the device is a partition or a disk
shell: "echo '{{ item }}' | egrep '/dev/(([a-z]{3,4}[0-9]$)|(cciss/c[0-9]{1}d[0-9]{1}p[0-9]$))'"
ignore_errors: true
with_items: devices
register: ispartition
changed_when: False
- name: If partition named 'ceph' exists
shell: "parted --script {{ item }} print | egrep -sq '^ 1.*ceph'"
ignore_errors: True
with_items: devices
register: parted
changed_when: False

View File

@ -1,39 +1,7 @@
--- ---
## SCENARIO 1: JOURNAL AND OSD_DATA ON THE SAME DEVICE ## SCENARIO 1: JOURNAL AND OSD_DATA ON THE SAME DEVICE
#
- name: Install dependencies - include: check_devices.yml
apt: pkg=parted state=present
when: ansible_os_family == 'Debian'
- name: Install dependencies
yum: name=parted state=present
when: ansible_os_family == 'RedHat'
- name: Copy OSD bootstrap key
copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600
when: cephx
# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1"
# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition
# it should exist we rc=0 and don't do anything unless we do something like --force
# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True"
# I believe it's safer
#
- name: Check if the device is a partition or a disk
shell: echo '{{ item }}' | egrep '/dev/(([a-z]{3,4}[0-9]$)|(cciss/c[0-9]{1}d[0-9]{1}p[0-9]$))'
ignore_errors: true
with_items: devices
register: ispartition
changed_when: False
- name: If partition named 'ceph' exists
shell: parted --script {{ item }} print | egrep -sq '^ 1.*ceph'
ignore_errors: True
with_items: devices
register: parted
changed_when: False
# Prepare means # Prepare means
# - create GPT partition for a disk, or a loop label for a partition # - create GPT partition for a disk, or a loop label for a partition
@ -52,7 +20,7 @@
# failed, this is why we check if the device is a partition too. # failed, this is why we check if the device is a partition too.
- name: Prepare OSD disk(s) - name: Prepare OSD disk(s)
command: ceph-disk prepare {{ item.2 }} command: "ceph-disk prepare {{ item.2 }}"
when: (item.0.rc != 0 or item.1.rc != 0) and journal_collocation when: (item.0.rc != 0 or item.1.rc != 0) and journal_collocation
ignore_errors: True ignore_errors: True
with_together: with_together:
@ -60,36 +28,4 @@
- ispartition.results - ispartition.results
- devices - devices
# Activate means: - include: activate_osds.yml
# - mount the volume in a temp location
# - allocate an osd id (if needed)
# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
# - start ceph-osd
#
# This task is for disk devices only because of the explicit use of the first
# partition.
- name: Activate OSD(s) when device is a disk
command: ceph-disk activate {{ item.2 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1
with_together:
- parted.results
- ispartition.results
- devices
when: item.0.rc == 0 and item.1.rc != 0
ignore_errors: True
changed_when: False
# This task is for partitions because we don't explicitly use a partition.
- name: Activate OSD(s) when device is a partition
command: ceph-disk activate {{ item.1 }}
with_together:
- ispartition.results
- devices
when: item.0.rc == 0
ignore_errors: True
changed_when: False
- name: Start and add that the OSD service to the init sequence
service: name=ceph state=started enabled=yes args=osd

View File

@ -1,13 +1,30 @@
--- ---
## Deploy Ceph Oject Storage Daemon(s) ## Deploy Ceph Oject Storage Daemon(s)
#
- name: Install dependencies
apt: >
pkg=parted
state=present
when: ansible_os_family == 'Debian'
- name: Install dependencies
yum: >
name=parted
state=present
when: ansible_os_family == 'RedHat'
- name: Copy OSD bootstrap key
copy: >
src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring
dest=/var/lib/ceph/bootstrap-osd/ceph.keyring
owner=root
group=root
mode=600
when: cephx
- include: journal_collocation.yml - include: journal_collocation.yml
when: journal_collocation when: journal_collocation
- include: raw_journal.yml
when: raw_journal
- include: raw_multi_journal.yml - include: raw_multi_journal.yml
when: raw_multi_journal when: raw_multi_journal

View File

@ -1,25 +1,16 @@
--- ---
## SCENARIO 4: USE A DIRECTORY INSTEAD OF A DISK FOR OSD ## SCENARIO 4: USE A DIRECTORY INSTEAD OF A DISK FOR OSD
#
- name: Install dependencies
apt: pkg=parted state=present
when: ansible_os_family == 'Debian'
- name: Install dependencies
yum: name=parted state=present
when: ansible_os_family == 'RedHat'
- name: Copy OSD bootstrap key
copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600
when: cephx
# NOTE (leseb): we do not check the filesystem underneath the directory # NOTE (leseb): we do not check the filesystem underneath the directory
# so it is really up to you to configure this properly. # so it is really up to you to configure this properly.
# Declaring more than one directory on the same filesystem will confuse Ceph. # Declaring more than one directory on the same filesystem will confuse Ceph.
- name: Create OSD directories - name: Create OSD directories
file: path={{ item }} state=directory owner=root group=root file: >
path={{ item }}
state=directory
owner=root
group=root
with_items: osd_directories with_items: osd_directories
# Prepare means # Prepare means
@ -36,7 +27,7 @@
# since Ansible will sequential process the loop # since Ansible will sequential process the loop
- name: Prepare OSD disk(s) - name: Prepare OSD disk(s)
command: ceph-disk prepare {{ item }} command: "ceph-disk prepare {{ item }}"
when: osd_directory when: osd_directory
with_items: osd_directories with_items: osd_directories
@ -48,9 +39,12 @@
# #
- name: Activate OSD(s) - name: Activate OSD(s)
command: ceph-disk activate {{ item }} command: "ceph-disk activate {{ item }}"
with_items: osd_directories with_items: osd_directories
changed_when: False changed_when: False
- name: Start and add that the OSD service to the init sequence - name: Start and add that the OSD service to the init sequence
service: name=ceph state=started enabled=yes args=osd service: >
name=ceph
state=started
enabled=yes

View File

@ -1,39 +1,7 @@
--- ---
## SCENARIO 3: N JOURNAL DEVICES FOR N OSDS ## SCENARIO 3: N JOURNAL DEVICES FOR N OSDS
#
- name: Install dependencies - include: check_devices.yml
apt: pkg=parted state=present
when: ansible_os_family == 'Debian'
- name: Install dependencies
yum: name=parted state=present
when: ansible_os_family == 'RedHat'
- name: Copy OSD bootstrap key
copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600
when: cephx
# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1"
# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition
# it should exist we rc=0 and don't do anything unless we do something like --force
# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True"
# I believe it's safer
#
- name: Check if the device is a partition or a disk
shell: echo '{{ item }}' | egrep '/dev/[a-z]{3}[0-9]$'
ignore_errors: true
with_items: devices
register: ispartition
changed_when: False
- name: If partition named 'ceph' exists
shell: parted --script {{ item }} print | egrep -sq '^ 1.*ceph'
ignore_errors: True
with_items: devices
register: parted
changed_when: False
# Prepare means # Prepare means
# - create GPT partition for a disk, or a loop label for a partition # - create GPT partition for a disk, or a loop label for a partition
@ -52,7 +20,7 @@
# failed, this is why we check if the device is a partition too. # failed, this is why we check if the device is a partition too.
- name: Prepare OSD disk(s) - name: Prepare OSD disk(s)
command: ceph-disk prepare {{ item.2 }} {{ item.3 }} command: "ceph-disk prepare {{ item.2 }} {{ item.3 }}"
when: (item.0.rc != 0 or item.1.rc != 0) and raw_multi_journal when: (item.0.rc != 0 or item.1.rc != 0) and raw_multi_journal
ignore_errors: True ignore_errors: True
with_together: with_together:
@ -61,36 +29,4 @@
- devices - devices
- raw_journal_devices - raw_journal_devices
# Activate means: - include: activate_osds.yml
# - mount the volume in a temp location
# - allocate an osd id (if needed)
# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
# - start ceph-osd
#
# This task is for disk devices only because of the explicit use of the first
# partition.
- name: Activate OSD(s) when device is a disk
command: ceph-disk activate {{ item.2 }}1
with_together:
- parted.results
- ispartition.results
- devices
when: item.0.rc == 0 and item.1.rc != 0
ignore_errors: True
changed_when: False
# This task is for partitions because we don't explicitly use a partition.
- name: Activate OSD(s) when device is a partition
command: ceph-disk activate {{ item.1 }}
with_together:
- ispartition.results
- devices
when: item.0.rc == 0
ignore_errors: True
changed_when: False
- name: Start and add that the OSD service to the init sequence
service: name=ceph state=started enabled=yes args=osd

View File

@ -1,3 +1,76 @@
--- ---
# You can override default vars defined in defaults/main.yml here, # You can override default vars defined in defaults/main.yml here,
# but I would advice to use host or group vars instead # but I would advice to use host or group vars instead
## Ceph options
#
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
# fsid:
cephx: true
# Devices to be used as OSDs
# You can pre-provision disks that are not present yet.
# Ansible will just skip them. Newly added disk will be
# automatically configured during the next run.
#
# !! WARNING !!
#
# /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
#
# !! WARNING !!
# Declare devices
# All the scenarii inherit from the following device declaration
#
devices:
- /dev/sdb
- /dev/sdc
- /dev/sdd
- /dev/sde
# I. First scenario: journal and osd_data on the same device
# Use 'true' to enable this scenario
# This will collocate both journal and data on the same disk
# creating a partition at the beginning of the device
journal_collocation: true
# II. Second scenario: single journal device for N OSDs
# Use 'true' to enable this scenario
# deprecated, please use scenario III with a single raw_journal_device
# III. Third scenario: N journal devices for N OSDs
# Use 'true' to enable this scenario
#
# In the following example:
# * sdd and sde will get sdb as a journal
# * sdf and sdg will get sdc as a journal
# While starting you have 2 options:
# 1. Pre-allocate all the devices
# 2. Progressively add new devices
raw_multi_journal: false
raw_journal_devices:
- /dev/sdb
- /dev/sdb
- /dev/sdc
- /dev/sdc
# IV. Fourth scenario: use directory instead of disk for OSDs
# Use 'true' to enable this scenario
osd_directory: false
osd_directories:
- /var/lib/ceph/osd/mydir1
- /var/lib/ceph/osd/mydir2
- /var/lib/ceph/osd/mydir3
- /var/lib/ceph/osd/mydir4

View File

@ -1,12 +0,0 @@
---
# Variables here are applicable to the current role
## Ceph options
#
cephx: true
# Rados Gateway options
redhat_distro_ceph_extra: centos6.4 # supported distros are centos6.3, centos6.4, centos6, fedora18, fedora19, opensuse12.2, rhel6.3, rhel6.4, rhel6.5, rhel6, sles11sp2
email_address: foo@bar.com

View File

@ -1,8 +1,14 @@
--- ---
- name: restart apache2 - name: restart apache2
service: name=apache2 state=restarted enabled=yes service: >
name=apache2
state=restarted
enabled=yes
when: ansible_os_family == 'Debian' when: ansible_os_family == 'Debian'
- name: restart apache2 - name: restart apache2
service: name=httpd state=restarted enabled=yes service: >
name=httpd
state=restarted
enabled=yes
when: ansible_os_family == 'RedHat' when: ansible_os_family == 'RedHat'

View File

@ -1,70 +0,0 @@
---
## Deploy RADOS Gateway
#
- name: Copy RGW bootstrap key
copy: src=fetch/{{ fsid }}/etc/ceph/keyring.radosgw.gateway dest=/etc/ceph/keyring.radosgw.gateway owner=root group=root mode=600
when: cephx
- name: Set RGW bootstrap key permissions
file: path=/etc/ceph/keyring.radosgw.gateway mode=0600 owner=root group=root
when: cephx
#- name: Add optimized version of the apache2 package repository
# apt_repository: repo='deb http://gitbuilder.ceph.com/apache2-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main' state=present
#
#- name: Add optimized version of the fastcgi package repository
# apt_repository: repo='deb http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main' state=present
#
- name: Add Ceph extra
apt_repository: repo='deb http://ceph.com/packages/ceph-extras/debian {{ ansible_lsb.codename }} main' state=present
- name: Install Apache, fastcgi and Rados Gateway
apt: pkg={{ item }} state=present
with_items:
- apache2
- libapache2-mod-fastcgi
- radosgw
## Prepare Apache
#
- name: Install default httpd.conf
template: src=httpd.conf dest=/etc/apache2/httpd.conf owner=root group=root
- name: Enable some apache mod rewrite and fastcgi
command: "{{ item }}"
with_items:
- a2enmod rewrite
- a2enmod fastcgi
- name: Install Rados Gateway vhost
template: src=rgw.conf dest=/etc/apache2/sites-available/rgw.conf owner=root group=root
## Prepare RGW
#
- name: Create RGW directory
file: path=/var/lib/ceph/radosgw/{{ ansible_fqdn }} state=directory owner=root group=root mode=0644
- name: Enable Rados Gateway vhost and disable default site
command: "{{ item }}"
with_items:
- a2ensite rgw.conf
- a2dissite default
notify:
- restart apache2
- name: Install s3gw.fcgi script
copy: src=s3gw.fcgi dest=/var/www/s3gw.fcgi mode=0555 owner=root group=root
## If we don't perform this check Ansible will start multiple instance of radosgw
- name: Check if RGW is started
command: /etc/init.d/radosgw status
register: rgwstatus
ignore_errors: True
- name: Start RGW
command: /etc/init.d/radosgw start
when: rgwstatus.rc != 0

View File

@ -0,0 +1,73 @@
---
## Deploy RADOS Gateway
#
- name: Add Ceph extra
apt_repository: >
repo="deb http://ceph.com/packages/ceph-extras/debian {{ ansible_lsb.codename }} main"
state=present
- name: "Install Apache, fastcgi and Rados Gateway"
apt: >
pkg={{ item }}
state=present
with_items:
- apache2
- libapache2-mod-fastcgi
- radosgw
## Prepare Apache
#
- name: Install default httpd.conf
template: src=httpd.conf dest=/etc/apache2/httpd.conf owner=root group=root
- name: Enable some apache mod rewrite and fastcgi
command: "{{ item }}"
with_items:
- a2enmod rewrite
- a2enmod fastcgi
- name: Install Rados Gateway vhost
template: >
src=rgw.conf
dest=/etc/apache2/sites-available/rgw.conf
owner=root
group=root
## Prepare RGW
#
- name: Create RGW directory
file: >
path=/var/lib/ceph/radosgw/{{ ansible_fqdn }}
state=directory
owner=root
group=root
mode=0644
- name: Enable Rados Gateway vhost and disable default site
command: "{{ item }}"
with_items:
- a2ensite rgw.conf
- a2dissite default
notify:
- restart apache2
- name: Install s3gw.fcgi script
copy: >
src=s3gw.fcgi
dest=/var/www/s3gw.fcgi
mode=0555
owner=root
group=root
## If we don't perform this check Ansible will start multiple instance of radosgw
- name: Check if RGW is started
command: /etc/init.d/radosgw status
register: rgwstatus
ignore_errors: True
- name: Start RGW
command: /etc/init.d/radosgw start
when: rgwstatus.rc != 0

View File

@ -2,16 +2,12 @@
## Deploy RADOS Gateway ## Deploy RADOS Gateway
# #
- name: Copy RGW bootstrap key
copy: src=fetch/{{ fsid }}/etc/ceph/keyring.radosgw.gateway dest=/etc/ceph/keyring.radosgw.gateway owner=root group=root mode=600
when: cephx
- name: Set RGW bootstrap key permissions
file: path=/etc/ceph/keyring.radosgw.gateway mode=0644 owner=root group=root
when: cephx
- name: Add Ceph extra - name: Add Ceph extra
template: src=ceph-extra.repo dest=/etc/yum.repos.d owner=root group=root template: >
src=ceph-extra.repo
dest=/etc/yum.repos.d
owner=root
group=root
- name: Add special fastcgi repository key - name: Add special fastcgi repository key
rpm_key: key=http://dag.wieers.com/rpm/packages/RPM-GPG-KEY.dag.txt rpm_key: key=http://dag.wieers.com/rpm/packages/RPM-GPG-KEY.dag.txt
@ -19,8 +15,10 @@
- name: Add special fastcgi repository - name: Add special fastcgi repository
command: rpm -ivh http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm command: rpm -ivh http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm
- name: Install Apache, fastcgi, and Rados Gateway - name: "Install Apache, fastcgi, and Rados Gateway"
yum: name={{ item }} state=present yum: >
name={{ item }}
state=present
with_items: with_items:
- httpd - httpd
- mod_fastcgi - mod_fastcgi
@ -31,16 +29,30 @@
# #
- name: Install Rados Gateway vhost - name: Install Rados Gateway vhost
template: src=rgw.conf dest=/etc/httpd/conf.d/rgw.conf owner=root group=root template: >
src=rgw.conf
dest=/etc/httpd/conf.d/rgw.conf
owner=root
group=root
## Prepare RGW ## Prepare RGW
# #
- name: Create RGW directory - name: Create RGW directory
file: path=/var/lib/ceph/radosgw/{{ ansible_fqdn }} state=directory owner=root group=root mode=0644 file: >
path=/var/lib/ceph/radosgw/{{ ansible_fqdn }}
state=directory
owner=root
group=root
mode=0644
- name: Install s3gw.fcgi script - name: Install s3gw.fcgi script
copy: src=s3gw.fcgi dest=/var/www/s3gw.fcgi mode=0555 owner=root group=root copy: >
src=s3gw.fcgi
dest=/var/www/s3gw.fcgi
mode=0555
owner=root
group=root
- name: Disable default site - name: Disable default site
shell: sed -i "s/^[^+#]/#/g" /etc/httpd/conf.d/welcome.conf shell: sed -i "s/^[^+#]/#/g" /etc/httpd/conf.d/welcome.conf

View File

@ -1,9 +1,17 @@
--- ---
- name: Copy RGW bootstrap key
copy: src=fetch/{{ fsid }}/etc/ceph/keyring.radosgw.gateway dest=/etc/ceph/keyring.radosgw.gateway owner=root group=root mode=600
when: cephx
- name: Set RGW bootstrap key permissions
file: path=/etc/ceph/keyring.radosgw.gateway mode=0600 owner=root group=root
when: cephx
## Check OS family ## Check OS family
# #
- include: RedHat.yml - include: install_redhat.yml
when: ansible_os_family == 'RedHat' when: ansible_os_family == 'RedHat'
- include: Debian.yml - include: install_debian.yml
when: ansible_os_family == 'Debian' when: ansible_os_family == 'Debian'

View File

@ -1,3 +1,10 @@
--- ---
# You can override default vars defined in defaults/main.yml here, # You can override vars by using host or group vars
# but I would advice to use host or group vars instead
## Ceph options
#
cephx: true
# Rados Gateway options
redhat_distro_ceph_extra: centos6.4 # supported distros are centos6.3, centos6.4, centos6, fedora18, fedora19, opensuse12.2, rhel6.3, rhel6.4, rhel6.5, rhel6, sles11sp2
email_address: foo@bar.com

View File

@ -1,6 +0,0 @@
---
# Variables here are applicable to the current role
# Rados Gateway options
radosgw_interface: eth1 # the public interface which the radosgw talks to the world with, this variable is used in the haproxy role, this does not need to be set if haproxy is not used.

View File

@ -1,3 +1,5 @@
--- ---
- name: restart haproxy - name: restart haproxy
service: name=haproxy state=restarted service: >
name=haproxy
state=restarted

View File

@ -1,15 +1,32 @@
--- ---
- apt_repository: repo=ppa:vbernat/haproxy-1.5 state=present - name: Add repository
apt_repository: >
repo=ppa:vbernat/haproxy-1.5
state=present
- apt: name={{ item }} state=present - name: Install haproxy
apt: >
name={{ item }}
state=present
with_items: with_items:
- haproxy - haproxy
- socat - socat
- copy: src=precise/haproxy dest=/etc/default/haproxy - name: Copy default configuration
copy: >
src=precise/haproxy
dest=/etc/default/haproxy
notify: restart haproxy notify: restart haproxy
- template: src=precise/haproxy.cfg dest=/etc/haproxy/haproxy.cfg backup=yes - name: Create configuration
template: >
src=precise/haproxy.cfg
dest=/etc/haproxy/haproxy.cfg
backup=yes
notify: restart haproxy notify: restart haproxy
- service: name=haproxy state=started enabled=yes - name: Start and enable haproxy
service: >
name=haproxy
state=started
enabled=yes

View File

@ -1,3 +1,5 @@
--- ---
# You can override default vars defined in defaults/main.yml here, # You can override vars by using host or group vars
# but I would advice to use host or group vars instead
# Rados Gateway options
radosgw_interface: eth1 # the public interface which the radosgw talks to the world with, this variable is used in the haproxy role, this does not need to be set if haproxy is not used.

View File

@ -25,7 +25,10 @@
- ceph-mon - ceph-mon
post_tasks: post_tasks:
- name: restart monitor(s) - name: restart monitor(s)
service: name=ceph state=restarted args=mon service: >
name=ceph
state=restarted
args=mon
- hosts: osds - hosts: osds
serial: 1 serial: 1
@ -47,4 +50,7 @@
- ceph-mds - ceph-mds
post_tasks: post_tasks:
- name: restart metadata server(s) - name: restart metadata server(s)
service: name=ceph state=restarted args=mds service: >
name=ceph
state=restarted
args=mds