Merge pull request #648 from ceph/docker

Docker
pull/649/head
Leseb 2016-03-24 18:15:33 +01:00
commit a8a2151ca8
42 changed files with 856 additions and 37 deletions

View File

@ -185,6 +185,57 @@ $ vagrant provision
If you want to use "backports", you can set "true" to `ceph_use_distro_backports`. If you want to use "backports", you can set "true" to `ceph_use_distro_backports`.
Attention, ceph-common doesn't manage backports repository, you must add it yourself. Attention, ceph-common doesn't manage backports repository, you must add it yourself.
### For Atomic systems
If you want to run containerized deployment on Atomic systems (RHEL/CentOS Atomic), please copy
[vagrant.yml.atomic](vagrant_variables.yml.atomic) to vagrant_variables.yml, and copy [group_vars/all.docker](group_vars/all.docker) to `group_vars/all`.
Since `centos/atomic-host` doesn't have spare storage controller to attach more disks, it is likely the first time `vagrant up --provider=virtualbox` runs, it will fail to attach to a storage controller. In such case, run the following command:
```console
VBoxManage storagectl `VBoxManage list vms |grep ceph-ansible_osd0|awk '{print $1}'|tr \" ' '` --name "SATA" --add sata
```
then run `vagrant up --provider=virtualbox` again.
## Setup for Vagrant using OpenStack provider
Install the Vagrant plugin for the openstack provider: `vagrant plugin install vagrant-openstack-provider`.
```bash
$ cp site.yml.sample site.yml
$ cp group_vars/all.docker.sample group_vars/all
$ cp vagrant_variables.yml.openstack vagrant_variables.yml
```
* Edit `vagrant_variables.yml`:
Set `mon_vms` and `osd_vms` to the numbers you want.
If you are using an Atomic image, un-comment out the `skip_tags` line.
Un-comment the `os_` lines.
Set `os_ssh_username` to 'centos' for Centos and 'cloud-user' for
RHEL images.
Set `os_ssh_private_key_path` to '~/.ssh/id_rsa'
Set `os_openstack_auth_url` to the auth url of your Open Stack cloud
Set `os_username` and `os_password` to what you provided for Open Stack
registration or leave them as ENV vars if you have set the
corresponding env vars for your user.
Set `os_tenant_name` to your Open Stack cloud project name.
Set `os_region` to your Open Stack cloud region name.
Set `os_flavor` to 'm3.medium'. This size has ephemeral storage that will
be used by the OSD for the /dev/vdb disk
Set the `os_image` to an image found in the Images list in the Open Stack
cloud Dashboard (i.e. 'centos-atomic-host').
Set the `os_keypair_name` to the keypair name you used when you did the
Open Stack registration.
```
$ vagrant up --provider=openstack
```
Once the playbook is finished, you should be able to do `vagrant ssh mon0` or
`vagrant ssh osd0` to get to the VMs.
`sudo docker ps` should show the running containers
When you are done, use `vagrant destroy` to get rid of the VMs. You should
also remove the associated entries in .ssh/known_hosts so that if the IP
addresses get reused by future Open Stack Cloud instances there will not be
old known_hosts entries.
# Want to contribute? # Want to contribute?

58
Vagrantfile vendored
View File

@ -19,9 +19,20 @@ STORAGECTL = settings['vagrant_storagectl']
ETH = settings['eth'] ETH = settings['eth']
DOCKER = settings['docker'] DOCKER = settings['docker']
if BOX == 'openstack'
require 'vagrant-openstack-provider'
OSVM = true
USER = settings['os_ssh_username']
else
OSVM = false
end
ansible_provision = proc do |ansible| ansible_provision = proc do |ansible|
if DOCKER then if DOCKER then
ansible.playbook = 'site-docker.yml' ansible.playbook = 'site-docker.yml'
if settings['skip_tags']
ansible.skip_tags = settings['skip_tags']
end
else else
ansible.playbook = 'site.yml' ansible.playbook = 'site.yml'
end end
@ -83,11 +94,32 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
override.vm.synced_folder '.', '/home/vagrant/sync', disabled: true override.vm.synced_folder '.', '/home/vagrant/sync', disabled: true
end end
if BOX == 'openstack'
# OpenStack VMs
config.vm.provider :openstack do |os|
config.vm.synced_folder ".", "/home/#{USER}/vagrant", disabled: true
config.ssh.username = USER
config.ssh.private_key_path = settings['os_ssh_private_key_path']
config.ssh.pty = true
os.openstack_auth_url = settings['os_openstack_auth_url']
os.username = settings['os_username']
os.password = settings['os_password']
os.tenant_name = settings['os_tenant_name']
os.region = settings['os_region']
os.flavor = settings['os_flavor']
os.image = settings['os_image']
os.keypair_name = settings['os_keypair_name']
os.security_groups = ['default']
config.vm.provision "shell", inline: "true", upload_path: "/home/#{USER}/vagrant-shell"
end
end
(0..CLIENTS - 1).each do |i| (0..CLIENTS - 1).each do |i|
config.vm.define "client#{i}" do |client| config.vm.define "client#{i}" do |client|
client.vm.hostname = "ceph-client#{i}" client.vm.hostname = "ceph-client#{i}"
client.vm.network :private_network, ip: "#{SUBNET}.4#{i}" if !OSVM
client.vm.network :private_network, ip: "#{SUBNET}.4#{i}"
end
# Virtualbox # Virtualbox
client.vm.provider :virtualbox do |vb| client.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
@ -114,7 +146,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
(0..NRGWS - 1).each do |i| (0..NRGWS - 1).each do |i|
config.vm.define "rgw#{i}" do |rgw| config.vm.define "rgw#{i}" do |rgw|
rgw.vm.hostname = "ceph-rgw#{i}" rgw.vm.hostname = "ceph-rgw#{i}"
rgw.vm.network :private_network, ip: "#{SUBNET}.5#{i}" if !OSVM
rgw.vm.network :private_network, ip: "#{SUBNET}.5#{i}"
end
# Virtualbox # Virtualbox
rgw.vm.provider :virtualbox do |vb| rgw.vm.provider :virtualbox do |vb|
@ -142,8 +176,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
(0..NMDSS - 1).each do |i| (0..NMDSS - 1).each do |i|
config.vm.define "mds#{i}" do |mds| config.vm.define "mds#{i}" do |mds|
mds.vm.hostname = "ceph-mds#{i}" mds.vm.hostname = "ceph-mds#{i}"
mds.vm.network :private_network, ip: "#{SUBNET}.7#{i}" if !OSVM
mds.vm.network :private_network, ip: "#{SUBNET}.7#{i}"
end
# Virtualbox # Virtualbox
mds.vm.provider :virtualbox do |vb| mds.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
@ -158,7 +193,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
mds.vm.provider :libvirt do |lv| mds.vm.provider :libvirt do |lv|
lv.memory = MEMORY lv.memory = MEMORY
end end
# Parallels # Parallels
mds.vm.provider "parallels" do |prl| mds.vm.provider "parallels" do |prl|
prl.name = "ceph-mds#{i}" prl.name = "ceph-mds#{i}"
@ -170,8 +204,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
(0..NMONS - 1).each do |i| (0..NMONS - 1).each do |i|
config.vm.define "mon#{i}" do |mon| config.vm.define "mon#{i}" do |mon|
mon.vm.hostname = "ceph-mon#{i}" mon.vm.hostname = "ceph-mon#{i}"
mon.vm.network :private_network, ip: "#{SUBNET}.1#{i}" if !OSVM
mon.vm.network :private_network, ip: "#{SUBNET}.1#{i}"
end
# Virtualbox # Virtualbox
mon.vm.provider :virtualbox do |vb| mon.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
@ -198,9 +233,10 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
(0..NOSDS - 1).each do |i| (0..NOSDS - 1).each do |i|
config.vm.define "osd#{i}" do |osd| config.vm.define "osd#{i}" do |osd|
osd.vm.hostname = "ceph-osd#{i}" osd.vm.hostname = "ceph-osd#{i}"
osd.vm.network :private_network, ip: "#{SUBNET}.10#{i}" if !OSVM
osd.vm.network :private_network, ip: "#{SUBNET}.20#{i}" osd.vm.network :private_network, ip: "#{SUBNET}.10#{i}"
osd.vm.network :private_network, ip: "#{SUBNET}.20#{i}"
end
# Virtualbox # Virtualbox
osd.vm.provider :virtualbox do |vb| osd.vm.provider :virtualbox do |vb|
(0..1).each do |d| (0..1).each do |d|

View File

@ -0,0 +1,57 @@
---
dummy:
#######
# MON #
#######
#mon_containerized_deployment: true
#mon_containerized_deployment_with_kv: false
#mon_containerized_default_ceph_conf_with_kv: true
#kv_type: etcd
#kv_endpoint: 127.0.0.1
#kv_port: 4001
#mon_docker_privileged: true
#ceph_mon_docker_username: ceph
#ceph_mon_docker_imagename: daemon
#ceph_mon_docker_interface: "{{ monitor_interface }}"
#ceph_mon_docker_subnet: "{{ public_network }}" # subnet of the ceph_mon_docker_interface
#ceph_mon_extra_envs: "MON_NAME={{ ansible_hostname }}" # comma separated variables
#######
# OSD #
#######
#osd_containerized_deployment: true
#ceph_osd_docker_username: ceph
#ceph_osd_docker_imagename: daemon
#ceph_osd_docker_extra_env: "CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE" # comma separated variables
#ceph_osd_docker_prepare_env: "CEPH_DAEMON=OSD_CEPH_DISK_PREPARE,OSD_FORCE_ZAP=1" # comma separated variables
#ceph_osd_docker_devices:
# - /dev/sdb
# - /dev/sdc
#######
# MDS #
#######
#mds_containerized_deployment: false
#ceph_mds_docker_username: ceph
#ceph_mds_docker_imagename: daemon
#ceph_mds_docker_extra_env: "MDS_NAME={{ ansible_hostname }}" # comma separated variables
#######
# RGW #
#######
#rgw_containerized_deployment: true
#ceph_rgw_docker_username: ceph
#ceph_rgw_docker_imagename: daemon
#ceph_rgw_civetweb_port: 80
#ceph_rgw_docker_extra_env: "RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}" # comma separated variables
###########
# RESTAPI #
###########
#restapi_containerized_deployment: false
#ceph_restapi_docker_interface: eth0
#ceph_restapi_port: 5000
#ceph_restapi_docker_username: ceph
#ceph_restapi_docker_imagename: daemon
#ceph_restapi_docker_extra_env: "RESTAPI_IP=0.0.0.0" # comma separated variables

View File

@ -0,0 +1,17 @@
---
# Variables here are applicable to all host groups NOT roles
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
#coreos_pypy_version: 4.0.1
#coreos_pypy_arch: linux64
#coreos_pypy_url: https://bitbucket.org/pypy/pypy/downloads/pypy-{{coreos_pypy_version}}-{{coreos_pypy_arch}}.tar.bz2
#pypy_directory: /opt/pypy
#pypy_binary_directory: /opt/bin
#pip_url: https://bootstrap.pypa.io/get-pip.py
#local_temp_directory: /tmp

View File

@ -32,4 +32,5 @@ dummy:
#ceph_mds_docker_username: ceph #ceph_mds_docker_username: ceph
#ceph_mds_docker_imagename: daemon #ceph_mds_docker_imagename: daemon
#ceph_mds_docker_extra_env: "MDS_NAME={{ ansible_hostname }}" # comma separated variables #ceph_mds_docker_extra_env: "MDS_NAME={{ ansible_hostname }}" # comma separated variables
#ceph_docker_on_openstack: false

View File

@ -71,9 +71,13 @@ dummy:
########## ##########
#mon_containerized_deployment: false #mon_containerized_deployment: false
#mon_containerized_deployment_with_kv: false
#mon_containerized_default_ceph_conf_with_kv: false
#ceph_mon_docker_interface: eth0 #ceph_mon_docker_interface: eth0
#ceph_mon_docker_subnet: # subnet of the ceph_mon_docker_interface #ceph_mon_docker_subnet: # subnet of the ceph_mon_docker_interface
#ceph_mon_docker_username: ceph #ceph_mon_docker_username: ceph
#ceph_mon_docker_imagename: daemon #ceph_mon_docker_imagename: daemon
#ceph_mon_extra_envs: "MON_NAME={{ ansible_hostname }}" # comma separated variables #ceph_mon_extra_envs: "MON_NAME={{ ansible_hostname }}" # comma separated variables
#ceph_docker_on_openstack: false
#mon_docker_privileged: true

View File

@ -128,4 +128,5 @@ dummy:
#ceph_osd_docker_extra_env: "CEPH_DAEMON=OSD_CEPH_DISK" # comma separated variables #ceph_osd_docker_extra_env: "CEPH_DAEMON=OSD_CEPH_DISK" # comma separated variables
#ceph_osd_docker_devices: #ceph_osd_docker_devices:
# - /dev/sdb # - /dev/sdb
#ceph_docker_on_openstack: false

View File

@ -24,4 +24,5 @@ dummy:
#ceph_restapi_docker_username: ceph #ceph_restapi_docker_username: ceph
#ceph_restapi_docker_imagename: daemon #ceph_restapi_docker_imagename: daemon
#ceph_restapi_docker_extra_env: "RESTAPI_IP=0.0.0.0" # comma separated variables #ceph_restapi_docker_extra_env: "RESTAPI_IP=0.0.0.0" # comma separated variables
#ceph_docker_on_openstack: false

View File

@ -39,4 +39,5 @@ dummy:
#ceph_rgw_docker_username: ceph #ceph_rgw_docker_username: ceph
#ceph_rgw_docker_imagename: daemon #ceph_rgw_docker_imagename: daemon
#ceph_rgw_docker_extra_env: "RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}" # comma separated variables #ceph_rgw_docker_extra_env: "RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}" # comma separated variables
#ceph_docker_on_openstack: false

View File

@ -0,0 +1,44 @@
# Ansible role: Ceph Storage Agent
This role installs python and pip on CoreOS.
# Requirements
This role has to be run without gathering facts and with sudo attribute.
# Role variables
Have a look at: `defaults/main.yml`.
## Mandatory variables
None.
# Dependencies
New CoreOS releases support pypy in version 2.4 and above. Unfortunetly CoreOS stable channel
has to be used with pypy in version 2.4 and below due to some dependency issues.
# Example Playbook
```
- hosts: servers
remote_user: core
become: True
gather_facts: false
roles:
- { role: ceph-common-coreos }
```
# Contribution
**THIS REPOSITORY DOES NOT ACCEPT PULL REQUESTS**
**PULL REQUESTS MUST GO THROUGH [CEPH-ANSIBLE](https://github.com/ceph/ceph-ansible)**
# License
Apache
# Author Information
This role was created by Piotr Prokop.

View File

@ -0,0 +1,7 @@
coreos_pypy_version: 4.0.1
coreos_pypy_arch: linux64
coreos_pypy_url: https://bitbucket.org/pypy/pypy/downloads/pypy-{{coreos_pypy_version}}-{{coreos_pypy_arch}}.tar.bz2
pypy_directory: /opt/pypy
pypy_binary_directory: /opt/bin
pip_url: https://bootstrap.pypa.io/get-pip.py
local_temp_directory: /tmp

View File

@ -0,0 +1,30 @@
---
- name: download get_pip.py
raw: cd $HOME && wget {{pip_url}}
- name: run get-pip.py
raw: "{{pypy_binary_directory}}/python $HOME/get-pip.py"
- name: create local temp directory
local_action: raw mkdir -p {{local_temp_directory}}
become: no
- name: prepare install_pip.sh
local_action: template src=install_pip.sh.j2 dest={{local_temp_directory}}/install_pip.sh
become: no
- name: run pip.sh
script: "{{local_temp_directory}}/install_pip.sh"
- name: add execute permission
raw: chmod a+x {{pypy_directory}}/pip
- name: move python to binary directory
raw: mv {{pypy_directory}}/pip {{pypy_binary_directory}}/pip
- name: create .pip
raw: touch $HOME/.pip
- name: remove pip.sh
local_action: file path="{{local_temp_directory}}/pip.sh" state=absent
become: no

View File

@ -0,0 +1,33 @@
---
- name: download python
raw: cd $HOME && wget -O - {{coreos_pypy_url}} |tar -xjf -
- name: move pypy to pypy_install_directory
raw: mv $HOME/pypy-{{coreos_pypy_version}}-{{coreos_pypy_arch}} {{pypy_directory}}
- name: create local temp directory
local_action: raw mkdir -p {{local_temp_directory}}
become: no
- name: prepare python executable
local_action: template src=install_python.sh.j2 dest={{local_temp_directory}}/install_python.sh
become: no
- name: fix library
raw: ln -s /lib64/libncurses.so.5.9 {{pypy_directory}}/lib_pypy/libtinfo.so.5
- name: run install_python.sh
script: "{{local_temp_directory}}/install_python.sh"
- name: add execute permission
raw: chmod a+x {{pypy_directory}}/python
- name: move python to binary directory
raw: mv {{pypy_directory}}/python {{pypy_binary_directory}}/python
- name: create .python
raw: touch $HOME/.python
- name: remove install_python.sh
local_action: file path="{{local_temp_directory}}/install_python.sh" state=absent
become: no

View File

@ -0,0 +1,16 @@
---
- name: check if there is python
raw: stat $HOME/.python
register: need_python
ignore_errors: true
- include: install_pypy.yml
when: need_python | failed
- name: check if there is pip
raw: stat $HOME/.pip
register: need_pip
ignore_errors: true
- include: install_pip.yml
when: need_pip | failed and need_python | failed

View File

@ -0,0 +1,6 @@
#!/bin/bash
cat > {{pypy_directory}}/pip << EOF
#!/bin/bash
LD_LIBRARY_PATH={{pypy_directory}}/lib_pypy:$LD_LIBRARY_PATH exec {{pypy_directory}}/bin/pip "\$@"\

View File

@ -0,0 +1,5 @@
#!/bin/bash
cat > {{pypy_directory}}/python << EOF
#!/bin/bash
LD_LIBRARY_PATH={{pypy_directory}}/lib_pypy:$LD_LIBRARY_PATH exec {{pypy_directory}}/bin/pypy "\$@"\

View File

@ -15,7 +15,7 @@ auth service required = none
auth client required = none auth client required = none
auth supported = none auth supported = none
{% endif %} {% endif %}
fsid = {{ fsid }} {% if not mon_containerized_deployment_with_kv %}fsid = {{ fsid }}{% endif %}
max open files = {{ max_open_files }} max open files = {{ max_open_files }}
osd pool default pg num = {{ pool_default_pg_num }} osd pool default pg num = {{ pool_default_pg_num }}
osd pool default pgp num = {{ pool_default_pgp_num }} osd pool default pgp num = {{ pool_default_pgp_num }}
@ -88,6 +88,7 @@ debug mon = {{ debug_mon_level }}
debug paxos = {{ debug_mon_level }} debug paxos = {{ debug_mon_level }}
debug auth = {{ debug_mon_level }} debug auth = {{ debug_mon_level }}
{% endif %} {% endif %}
{% if not mon_containerized_deployment_with_kv %}
{% for host in groups[mon_group_name] %} {% for host in groups[mon_group_name] %}
{% if hostvars[host]['ansible_fqdn'] is defined and mon_use_fqdn %} {% if hostvars[host]['ansible_fqdn'] is defined and mon_use_fqdn %}
[mon.{{ hostvars[host]['ansible_fqdn'] }}] [mon.{{ hostvars[host]['ansible_fqdn'] }}]
@ -102,6 +103,7 @@ host = {{ hostvars[host]['ansible_hostname'] }}
{% include 'mon_addr_address.j2' %} {% include 'mon_addr_address.j2' %}
{% endif %} {% endif %}
{% endfor %} {% endfor %}
{% endif %}
[osd] [osd]
osd mkfs type = {{ osd_mkfs_type }} osd mkfs type = {{ osd_mkfs_type }}

View File

@ -24,3 +24,4 @@ mds_containerized_deployment: false
ceph_mds_docker_username: ceph ceph_mds_docker_username: ceph
ceph_mds_docker_imagename: daemon ceph_mds_docker_imagename: daemon
ceph_mds_docker_extra_env: "MDS_NAME={{ ansible_hostname }}" # comma separated variables ceph_mds_docker_extra_env: "MDS_NAME={{ ansible_hostname }}" # comma separated variables
ceph_docker_on_openstack: false

View File

@ -50,6 +50,13 @@
ansible_os_family == 'RedHat' and ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "dnf" ansible_pkg_mgr == "dnf"
# NOTE (jimcurtis): need at least version 1.9.0 of six or we get:
# re:NameError: global name 'DEFAULT_DOCKER_API_VERSION' is not defined
- name: install six
pip:
name: six
version: 1.9.0
# NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227 # NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
- name: install docker-py - name: install docker-py
pip: pip:

View File

@ -63,8 +63,12 @@ openstack_keys:
########## ##########
mon_containerized_deployment: false mon_containerized_deployment: false
mon_containerized_deployment_with_kv: false
mon_containerized_default_ceph_conf_with_kv: false
ceph_mon_docker_interface: eth0 ceph_mon_docker_interface: eth0
#ceph_mon_docker_subnet: # subnet of the ceph_mon_docker_interface #ceph_mon_docker_subnet: # subnet of the ceph_mon_docker_interface
ceph_mon_docker_username: ceph ceph_mon_docker_username: ceph
ceph_mon_docker_imagename: daemon ceph_mon_docker_imagename: daemon
ceph_mon_extra_envs: "MON_NAME={{ ansible_hostname }}" # comma separated variables ceph_mon_extra_envs: "MON_NAME={{ ansible_hostname }}" # comma separated variables
ceph_docker_on_openstack: false
mon_docker_privileged: true

View File

@ -5,7 +5,7 @@
wait_for: wait_for:
path: /etc/ceph/ceph.client.admin.keyring path: /etc/ceph/ceph.client.admin.keyring
- name: create ceph rest api keyring - name: create ceph rest api keyring when mon is not containerized
command: ceph auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/ceph.client.restapi.keyring command: ceph auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/ceph.client.restapi.keyring
args: args:
creates: /etc/ceph/ceph.client.restapi.keyring creates: /etc/ceph/ceph.client.restapi.keyring

View File

@ -0,0 +1,18 @@
---
- name: create ceph conf directory
file:
path: /etc/ceph
state: directory
owner: root
group: root
mode: 0644
- name: generate ceph configuration file
config_template:
src: "{{ playbook_dir }}/roles/ceph-common/templates/ceph.conf.j2"
dest: /etc/ceph/ceph.conf
owner: "root"
group: "root"
mode: "0644"
config_overrides: "{{ ceph_conf_overrides }}"
config_type: ini

View File

@ -2,8 +2,8 @@
- name: set config and keys paths - name: set config and keys paths
set_fact: set_fact:
ceph_config_keys: ceph_config_keys:
- /etc/ceph/ceph.client.admin.keyring
- /etc/ceph/ceph.conf - /etc/ceph/ceph.conf
- /etc/ceph/ceph.client.admin.keyring
- /etc/ceph/monmap - /etc/ceph/monmap
- /etc/ceph/ceph.mon.keyring - /etc/ceph/ceph.mon.keyring
- /var/lib/ceph/bootstrap-osd/ceph.keyring - /var/lib/ceph/bootstrap-osd/ceph.keyring
@ -20,7 +20,7 @@
- name: try to fetch ceph config and keys - name: try to fetch ceph config and keys
copy: copy:
src: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}" src: "{{ playbook_dir }}/{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
dest: "{{ item.0 }}" dest: "{{ item.0 }}"
owner: root owner: root
group: root group: root

View File

@ -5,13 +5,42 @@
changed_when: false changed_when: false
failed_when: false failed_when: false
- name: check if it is Atomic host
stat: path=/run/ostree-booted
register: stat_ostree
- name: set fact for using Atomic host
set_fact:
is_atomic='{{ stat_ostree.stat.exists }}'
- include: checks.yml - include: checks.yml
when: ceph_health.rc != 0 when: ceph_health.rc != 0 and not mon_containerized_deployment_with_kv
- include: pre_requisite.yml - include: pre_requisite.yml
- include: selinux.yml - include: selinux.yml
when: ansible_os_family == 'RedHat' when: ansible_os_family == 'RedHat'
# let the first mon create configs and keyrings
- include: create_configs.yml
when: inventory_hostname == groups.mons[0] and mon_containerized_default_ceph_conf_with_kv
- include: fetch_configs.yml - include: fetch_configs.yml
when: not mon_containerized_deployment_with_kv
- include: start_docker_monitor.yml - include: start_docker_monitor.yml
- include: copy_configs.yml - include: copy_configs.yml
when: not mon_containerized_deployment_with_kv
- name: create ceph rest api keyring when mon is containerized
command: docker exec {{ ansible_hostname }} ceph auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/ceph.client.restapi.keyring
args:
creates: /etc/ceph/ceph.client.restapi.keyring
changed_when: false
when:
cephx and
mon_containerized_deployment and
groups[restapi_group_name] is defined and
inventory_hostname == groups.mons|last and
not mon_containerized_deployment_with_kv

View File

@ -19,6 +19,8 @@
- docker - docker
- docker.io - docker.io
when: ansible_distribution == 'Ubuntu' when: ansible_distribution == 'Ubuntu'
tags:
with_pkg
- name: install pip and docker on debian - name: install pip and docker on debian
apt: apt:
@ -29,6 +31,8 @@
- python-pip - python-pip
- docker-engine - docker-engine
when: ansible_distribution == 'Debian' when: ansible_distribution == 'Debian'
tags:
with_pkg
- name: install pip and docker on redhat - name: install pip and docker on redhat
yum: yum:
@ -40,6 +44,8 @@
when: when:
ansible_os_family == 'RedHat' and ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "yum" ansible_pkg_mgr == "yum"
tags:
with_pkg
- name: install pip and docker on redhat - name: install pip and docker on redhat
dnf: dnf:
@ -51,9 +57,44 @@
when: when:
ansible_os_family == 'RedHat' and ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "dnf" ansible_pkg_mgr == "dnf"
tags:
with_pkg
- name: install epel-release on redhat
yum:
name: epel-release
state: present
when: ansible_os_family == 'RedHat'
tags:
with_pkg
# NOTE (jimcurtis): need at least version 1.9.0 of six or we get:
# re:NameError: global name 'DEFAULT_DOCKER_API_VERSION' is not defined
- name: install six
pip:
name: six
version: 1.9.0
tags:
with_pkg
- name: pause after docker install before starting (on openstack vms)
pause: seconds=5
when: ceph_docker_on_openstack
tags:
with_pkg
- name: start docker service
service:
name: docker
state: started
enabled: yes
tags:
with_pkg
# NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227 # NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
- name: install docker-py - name: install docker-py
pip: pip:
name: docker-py name: docker-py
version: 1.1.0 version: 1.1.0
tags:
with_pkg

View File

@ -1,9 +1,104 @@
--- ---
- name: run the ceph Monitor docker image - name: pull ceph daemon image
shell: "docker pull {{ ceph_mon_docker_username }}/{{ ceph_mon_docker_imagename }}"
- name: populate kv_store with default ceph.conf
docker:
name: populate-kv-store
image: ceph/daemon
command: populate_kvstore
net: host
env:
KV_TYPE: "{{kv_type}}"
KV_IP: "{{kv_endpoint}}"
KV_PORT: "{{kv_port}}"
run_once: true
when:
inventory_hostname == groups.mons[0] and
mon_containerized_deployment_with_kv and
mon_containerized_default_ceph_conf_with_kv
- name: populate kv_store with custom ceph.conf
docker:
name: populate-kv-store
image: ceph/daemon
command: populate_kvstore
net: host
env:
KV_TYPE: "{{kv_type}}"
KV_IP: "{{kv_endpoint}}"
KV_PORT: "{{kv_port}}"
volumes:
- /etc/ceph/ceph.conf:/etc/ceph/ceph.defaults
run_once: true
when:
inventory_hostname == groups.mons[0] and
mon_containerized_deployment_with_kv and
not mon_containerized_default_ceph_conf_with_kv
- name: delete populate-kv-store docker
docker:
name: populate-kv-store
state: absent
image: ceph/daemon
# Use systemd to manage container on Atomic host and CoreOS
- name: generate systemd unit file
become: true
template:
src: ceph-mon.service.j2
dest: /var/lib/ceph/ceph-mon@.service
owner: "root"
group: "root"
mode: "0644"
- name: link systemd unit file for mon instance
file:
src: /var/lib/ceph/ceph-mon@.service
dest: /etc/systemd/system/multi-user.target.wants/ceph-mon@{{ ansible_hostname }}.service
state: link
when:
is_atomic or
ansible_os_family == 'CoreOS'
- name: enable systemd unit file for mon instance
shell: systemctl enable /etc/systemd/system/multi-user.target.wants/ceph-mon@{{ ansible_hostname }}.service
failed_when: false
changed_when: false
when:
is_atomic or
ansible_os_family == 'CoreOS'
- name: reload systemd unit files
shell: systemctl daemon-reload
when:
is_atomic or
ansible_os_family == 'CoreOS'
- name: systemd start mon container
service:
name: ceph-mon@{{ ansible_hostname }}
state: started
enabled: yes
changed_when: false
when:
is_atomic or
ansible_os_family == 'CoreOS'
- name: wait for ceph.conf exists
wait_for:
path: /etc/ceph/ceph.conf
when: is_atomic
- name: run the ceph monitor docker image
docker: docker:
image: "{{ ceph_mon_docker_username }}/{{ ceph_mon_docker_imagename }}" image: "{{ ceph_mon_docker_username }}/{{ ceph_mon_docker_imagename }}"
name: "{{ ansible_hostname }}" name: "{{ ansible_hostname }}"
net: "host" net: "host"
state: "running" state: "running"
privileged: "{{ mon_docker_privileged }}"
env: "MON_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_mon_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=MON,CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }},{{ ceph_mon_extra_envs }}" env: "MON_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_mon_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=MON,CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }},{{ ceph_mon_extra_envs }}"
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph" volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"
when:
not is_atomic and
ansible_os_family != 'CoreOS'

View File

@ -0,0 +1,31 @@
[Unit]
Description=Ceph Monitor
After=docker.service
[Service]
EnvironmentFile=-/etc/environment
ExecStartPre=-/usr/bin/docker rm %i
ExecStartPre=/usr/bin/mkdir -p /etc/ceph /var/lib/ceph/mon
ExecStart=/usr/bin/docker run --rm --name %i --net=host \
{% if not mon_containerized_deployment_with_kv -%}
-v /var/lib/ceph:/var/lib/ceph \
-v /etc/ceph:/etc/ceph \
{% else -%}
-e KV_TYPE={{kv_type}} \
-e KV_IP={{kv_endpoint}}\
{% endif -%}
--privileged \
-e CEPH_DAEMON=MON \
-e MON_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_mon_docker_interface]['ipv4']['address'] }} \
-e CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }} \
-e MON_NAME={{ ansible_hostname }} \
--name={{ ansible_hostname }} \
{{ ceph_mon_docker_username }}/{{ ceph_mon_docker_imagename }}
ExecStopPost=-/usr/bin/docker stop %i
Restart=always
RestartSec=10s
TimeoutStartSec=120
TimeoutStopSec=15
[Install]
WantedBy=multi-user.target

View File

@ -120,3 +120,4 @@ ceph_osd_docker_imagename: daemon
ceph_osd_docker_extra_env: "CEPH_DAEMON=OSD_CEPH_DISK" # comma separated variables ceph_osd_docker_extra_env: "CEPH_DAEMON=OSD_CEPH_DISK" # comma separated variables
#ceph_osd_docker_devices: #ceph_osd_docker_devices:
# - /dev/sdb # - /dev/sdb
ceph_docker_on_openstack: false

View File

@ -5,6 +5,14 @@
- /etc/ceph/ceph.conf - /etc/ceph/ceph.conf
- /var/lib/ceph/bootstrap-osd/ceph.keyring - /var/lib/ceph/bootstrap-osd/ceph.keyring
- name: wait for ceph.conf and keys
local_action: >
wait_for
path="{{ playbook_dir }}/{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
become: false
with_together:
- ceph_config_keys
- name: stat for ceph config and keys - name: stat for ceph config and keys
local_action: stat path={{ fetch_directory }}/docker_mon_files/{{ item }} local_action: stat path={{ fetch_directory }}/docker_mon_files/{{ item }}
with_items: ceph_config_keys with_items: ceph_config_keys
@ -13,9 +21,9 @@
failed_when: false failed_when: false
register: statconfig register: statconfig
- name: try to fetch ceph config and keys - name: try to copy ceph config and keys
copy: copy:
src: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}" src: "{{ playbook_dir }}/{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
dest: "{{ item.0 }}" dest: "{{ item.0 }}"
owner: root owner: root
group: root group: root
@ -23,5 +31,3 @@
changed_when: false changed_when: false
with_together: with_together:
- ceph_config_keys - ceph_config_keys
- statconfig.results
when: item.1.stat.exists == true

View File

@ -8,6 +8,14 @@
- include: checks.yml - include: checks.yml
when: ceph_health.rc != 0 when: ceph_health.rc != 0
- name: check if it is Atomic host
stat: path=/run/ostree-booted
register: stat_ostree
- name: set fact for using Atomic host
set_fact:
is_atomic='{{ stat_ostree.stat.exists }}'
- include: pre_requisite.yml - include: pre_requisite.yml
- include: selinux.yml - include: selinux.yml
when: ansible_os_family == 'RedHat' when: ansible_os_family == 'RedHat'

View File

@ -17,6 +17,8 @@
- docker - docker
- docker.io - docker.io
when: ansible_distribution == 'Ubuntu' when: ansible_distribution == 'Ubuntu'
tags:
with_pkg
- name: install pip and docker on debian - name: install pip and docker on debian
apt: apt:
@ -27,6 +29,8 @@
- python-pip - python-pip
- docker-engine - docker-engine
when: ansible_distribution == 'Debian' when: ansible_distribution == 'Debian'
tags:
with_pkg
- name: install pip and docker on redhat - name: install pip and docker on redhat
yum: yum:
@ -38,6 +42,8 @@
when: when:
ansible_os_family == 'RedHat' and ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "yum" ansible_pkg_mgr == "yum"
tags:
with_pkg
- name: install pip and docker on redhat - name: install pip and docker on redhat
dnf: dnf:
@ -49,9 +55,44 @@
when: when:
ansible_os_family == 'RedHat' and ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "dnf" ansible_pkg_mgr == "dnf"
tags:
with_pkg
- name: install epel-release on redhat
yum:
name: epel-release
state: present
when: ansible_os_family == 'RedHat'
tags:
with_pkg
# NOTE (jimcurtis): need at least version 1.9.0 of six or we get:
# re:NameError: global name 'DEFAULT_DOCKER_API_VERSION' is not defined
- name: install six
pip:
name: six
version: 1.9.0
tags:
with_pkg
- name: pause after docker install before starting (on openstack vms)
pause: seconds=5
when: ceph_docker_on_openstack
tags:
with_pkg
- name: start docker service
service:
name: docker
state: started
enabled: yes
tags:
with_pkg
# NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227 # NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
- name: install docker-py - name: install docker-py
pip: pip:
name: docker-py name: docker-py
version: 1.1.0 version: 1.1.0
tags:
with_pkg

View File

@ -1,4 +1,70 @@
--- ---
# For openstack VMs modify the mount point below depending on if the Openstack
# VM deploy tool defaults to mounting ephemeral disks
- name: umount ceph disk (if on openstack)
mount:
name: /mnt
src: /dev/vdb
fstype: ext3
state: unmounted
when: ceph_docker_on_openstack
# (rootfs) for reasons I haven't figured out, docker pull and run will fail.
- name: pull ceph daemon image
shell: "docker pull {{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}"
- name: prepare ceph osd disk
docker:
image: "{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}"
name: "{{ ansible_hostname }}-osd-prepare-{{ item | regex_replace('/', '') }}"
net: host
pid: host
state: running
privileged: yes
env: "OSD_DEVICE={{ item }},{{ ceph_osd_docker_prepare_env }}"
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/dev/:/dev/"
with_items: ceph_osd_docker_devices
when: ceph_osd_docker_prepare_env is defined
# Use systemd to manage container on Atomic host
- name: generate systemd unit file
become: true
template:
src: ceph-osd.service.j2
dest: /var/lib/ceph/ceph-osd@.service
owner: "root"
group: "root"
mode: "0644"
failed_when: false
- name: link systemd unit file for osd instance
file:
src: /var/lib/ceph/ceph-osd@.service
dest: /etc/systemd/system/multi-user.target.wants/ceph-osd@{{ item | basename }}.service
state: link
with_items: ceph_osd_docker_devices
when: is_atomic
- name: enable systemd unit file for osd instance
shell: systemctl enable /etc/systemd/system/multi-user.target.wants/ceph-osd@{{ item | basename }}.service
failed_when: false
changed_when: false
with_items: ceph_osd_docker_devices
when: is_atomic
- name: reload systemd unit files
shell: systemctl daemon-reload
when: is_atomic
- name: systemd start osd container
service:
name: ceph-osd@{{ item | basename }}
state: started
enabled: yes
changed_when: false
with_items: ceph_osd_docker_devices
when: is_atomic
- name: run the ceph osd docker image - name: run the ceph osd docker image
docker: docker:
image: "{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}" image: "{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}"
@ -10,3 +76,4 @@
env: "OSD_DEVICE={{ item }},{{ ceph_osd_docker_extra_env }}" env: "OSD_DEVICE={{ item }},{{ ceph_osd_docker_extra_env }}"
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/dev/:/dev/" volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/dev/:/dev/"
with_items: ceph_osd_docker_devices with_items: ceph_osd_docker_devices
when: not is_atomic

View File

@ -0,0 +1,25 @@
[Unit]
Description=Ceph OSD
After=docker.service
[Service]
EnvironmentFile=/etc/environment
ExecStartPre=-/usr/bin/docker stop {{ ansible_hostname }}-osd-dev%i
ExecStartPre=-/usr/bin/docker rm {{ ansible_hostname }}-osd-dev%i
ExecStart=/usr/bin/docker run --rm --net=host --pid=host\
-v /var/lib/ceph:/var/lib/ceph \
-v /etc/ceph:/etc/ceph \
-v /dev:/dev \
--privileged \
-e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
-e OSD_DEVICE=/dev/%i \
--name={{ ansible_hostname }}-osd-dev%i \
{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}
ExecStop=-/usr/bin/docker stop {{ ansible_hostname }}-osd-dev%i
Restart=always
RestartSec=10s
TimeoutStartSec=120
TimeoutStopSec=15
[Install]
WantedBy=multi-user.target

View File

@ -16,3 +16,4 @@ ceph_restapi_port: 5000
ceph_restapi_docker_username: ceph ceph_restapi_docker_username: ceph
ceph_restapi_docker_imagename: daemon ceph_restapi_docker_imagename: daemon
ceph_restapi_docker_extra_env: "RESTAPI_IP=0.0.0.0" # comma separated variables ceph_restapi_docker_extra_env: "RESTAPI_IP=0.0.0.0" # comma separated variables
ceph_docker_on_openstack: false

View File

@ -15,7 +15,7 @@
- name: try to fetch ceph config and keys - name: try to fetch ceph config and keys
copy: copy:
src: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}" src: "{{ playbook_dir }}/{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
dest: "{{ item.0 }}" dest: "{{ item.0 }}"
owner: root owner: root
group: root group: root

View File

@ -9,6 +9,8 @@
- docker - docker
- docker.io - docker.io
when: ansible_distribution == 'Ubuntu' when: ansible_distribution == 'Ubuntu'
tags:
with_pkg
- name: install pip and docker on debian - name: install pip and docker on debian
apt: apt:
@ -19,6 +21,8 @@
- python-pip - python-pip
- docker-engine - docker-engine
when: ansible_distribution == 'Debian' when: ansible_distribution == 'Debian'
tags:
with_pkg
- name: install pip and docker on redhat - name: install pip and docker on redhat
yum: yum:
@ -30,6 +34,8 @@
when: when:
ansible_os_family == 'RedHat' and ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "yum" ansible_pkg_mgr == "yum"
tags:
with_pkg
- name: install pip and docker on redhat - name: install pip and docker on redhat
dnf: dnf:
@ -41,9 +47,36 @@
when: when:
ansible_os_family == 'RedHat' and ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "dnf" ansible_pkg_mgr == "dnf"
tags:
with_pkg
# NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227 - name: install epel-release on redhat
- name: install docker-py yum:
name: epel-release
state: present
when: ansible_os_family == 'RedHat'
tags:
with_pkg
# NOTE (jimcurtis): need at least version 1.9.0 of six or we get:
# re:NameError: global name 'DEFAULT_DOCKER_API_VERSION' is not defined
- name: install six
pip: pip:
name: docker-py name: six
version: 1.1.0 version: 1.9.0
tags:
with_pkg
- name: pause after docker install before starting (on openstack vms)
pause: seconds=5
when: ceph_docker_on_openstack
tags:
with_pkg
- name: start docker service
service:
name: docker
state: started
enabled: yes
tags:
with_pkg

View File

@ -31,3 +31,4 @@ ceph_rgw_civetweb_port: 80
ceph_rgw_docker_username: ceph ceph_rgw_docker_username: ceph
ceph_rgw_docker_imagename: daemon ceph_rgw_docker_imagename: daemon
ceph_rgw_docker_extra_env: "RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}" # comma separated variables ceph_rgw_docker_extra_env: "RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}" # comma separated variables
ceph_docker_on_openstack: false

View File

@ -15,7 +15,7 @@
- name: try to fetch ceph config and keys - name: try to fetch ceph config and keys
copy: copy:
src: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}" src: "{{ playbook_dir }}/{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
dest: "{{ item.0 }}" dest: "{{ item.0 }}"
owner: root owner: root
group: root group: root

View File

@ -17,6 +17,8 @@
- docker - docker
- docker.io - docker.io
when: ansible_distribution == 'Ubuntu' when: ansible_distribution == 'Ubuntu'
tags:
with_pkg
- name: install pip and docker on debian - name: install pip and docker on debian
apt: apt:
@ -27,6 +29,8 @@
- python-pip - python-pip
- docker-engine - docker-engine
when: ansible_distribution == 'Debian' when: ansible_distribution == 'Debian'
tags:
with_pkg
- name: install pip and docker on redhat - name: install pip and docker on redhat
yum: yum:
@ -38,6 +42,8 @@
when: when:
ansible_os_family == 'RedHat' and ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "yum" ansible_pkg_mgr == "yum"
tags:
with_pkg
- name: install pip and docker on redhat - name: install pip and docker on redhat
dnf: dnf:
@ -49,9 +55,36 @@
when: when:
ansible_os_family == 'RedHat' and ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "dnf" ansible_pkg_mgr == "dnf"
tags:
with_pkg
# NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227 - name: install epel-release on redhat
- name: install docker-py yum:
name: epel-release
state: present
when: ansible_os_family == 'RedHat'
tags:
with_pkg
# NOTE (jimcurtis): need at least version 1.9.0 of six or we get:
# re:NameError: global name 'DEFAULT_DOCKER_API_VERSION' is not defined
- name: install six
pip: pip:
name: docker-py name: six
version: 1.1.0 version: 1.9.0
tags:
with_pkg
- name: pause after docker install before starting (on openstack vms)
pause: seconds=5
when: ceph_docker_on_openstack
tags:
with_pkg
- name: start docker service
service:
name: docker
state: started
enabled: yes
tags:
with_pkg

View File

@ -1,10 +1,13 @@
--- ---
- name: pull ceph daemon image
shell: "docker pull {{ ceph_rgw_docker_username }}/{{ ceph_rgw_docker_imagename }}"
- name: run the rados gateway docker image - name: run the rados gateway docker image
docker: docker:
image: "{{ ceph_rgw_docker_username }}/{{ ceph_rgw_docker_imagename }}" image: "{{ ceph_rgw_docker_username }}/{{ ceph_rgw_docker_imagename }}"
name: ceph-{{ ansible_hostname }}-rgw name: ceph-{{ ansible_hostname }}-rgw
expose=: "{{ ceph_rgw_civetweb_port }}" expose: "{{ ceph_rgw_civetweb_port }}"
ports: "{{ ceph_rgw_civetweb_port }}:{{ ceph_rgw_civetweb_port }}" ports: "{{ ceph_rgw_civetweb_port }}:{{ ceph_rgw_civetweb_port }}"
state: running state: running
env=: "CEPH_DAEMON=RGW,{{ ceph_rgw_docker_extra_env }}" env: "CEPH_DAEMON=RGW,{{ ceph_rgw_docker_extra_env }}"
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph" volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"

View File

@ -0,0 +1,25 @@
---
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
client_vms: 0
# SUBNET TO USE FOR THE VMS
subnet: 192.168.0
# MEMORY
memory: 1024
disks: "[ '/dev/sdb', '/dev/sdc' ]"
eth: 'enp0s8'
vagrant_box: centos/atomic-host
# if vagrant fails to attach storage controller, add the storage controller name by:
# VBoxManage storagectl `VBoxManage list vms |grep ceph-ansible_osd0|awk '{print $1}'|tr \" ' '` --name "SATA" --add sata
# and "vagrant up" again
vagrant_storagectl: 'SATA'
skip_tags: 'with_pkg'

View File

@ -0,0 +1,37 @@
---
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
client_vms: 0
# SUBNET TO USE FOR THE VMS
# Use whatever private subnet your Openstack VMs are given
subnet: 172.17.72
# For Openstack VMs, the disk will depend on what you are allocated
disks: "[ '/dev/vdb' ]"
# For Openstack VMs, the lan is usually eth0
eth: 'eth0'
# For Openstack VMs, choose the following box instead
vagrant_box: 'openstack'
# For Atomic (RHEL or Cento) uncomment the line below
#skip_tags: 'with_pkg'
# For deploying on OpenStack VMs uncomment these vars and assign values.
# You can use env vars for the values if it makes sense.
#os_ssh_username :
#os_ssh_private_key_path :
#os_openstack_auth_url :
#os_username : ENV['OS_USERNAME']
#os_password : ENV['OS_PASSWORD']
#os_tenant_name :
#os_region :
#os_flavor :
#os_image :
#os_keypair_name :