Merge pull request #1287 from ceph/no-priviledge-mode

No priviledge mode
pull/1317/head
Andrew Schoen 2017-02-22 15:24:18 -06:00 committed by GitHub
commit e874c7e8ac
21 changed files with 1321 additions and 67 deletions

View File

@ -88,4 +88,5 @@ dummy:
#mon_docker_privileged: false
#mon_docker_net_host: true
#ceph_config_keys: [] # DON'T TOUCH ME
#ceph_mon_docker_enable_centos_extra_repo: false

View File

@ -134,6 +134,11 @@ dummy:
# - /dev/sdf
# - /dev/sdg
# - /dev/sdg
#
# NOTE(leseb):
# On a containerized scenario we only support A SINGLE journal
# for all the OSDs on a given machine. If you don't, bad things will happen
# This is a limitation we plan to fix at some point.
#raw_journal_devices: []
@ -176,11 +181,31 @@ dummy:
#kv_type: etcd
#kv_endpoint: 127.0.0.1
#kv_port: 4001
#ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
#ceph_docker_image: "ceph/daemon"
#ceph_docker_image_tag: latest
#ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }}
#ceph_osd_docker_devices: "{{ devices }}"
#ceph_docker_on_openstack: false
#ceph_config_keys: [] # DON'T TOUCH ME
#ceph_docker_on_openstack: false
# PREPARE DEVICE
# Make sure you only pass a single device to raw_journal_devices, otherwise this will fail horribly.
# This is why we use [0] in the example.
#
# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
#
# Examples:
# Journal collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
# Dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }}
# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1
# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }}
#
#ceph_osd_docker_devices: "{{ devices }}"
#ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
# ACTIVATE DEVICE
# Examples:
# Journal collocated or Dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }}
# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1
#
#ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }}
#ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command

View File

@ -283,44 +283,36 @@
with_items: "{{ ceph_osd_docker_devices }}"
ignore_errors: true
- name: zap ceph osd disk
docker:
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
name: "{{ ansible_hostname }}-osd-zap-{{ item | regex_replace('/', '') }}"
net: host
pid: host
state: started
privileged: yes
env: "CEPH_DAEMON=zap_device,OSD_DEVICE={{ item }}"
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/dev:/dev,/run:/run"
with_items: "{{ ceph_osd_docker_devices }}"
- name: zap ceph osd disks
shell: |
docker run \
--privileged=true \
--name {{ ansible_hostname }}-osd-zap-{{ item | regex_replace('/', '') }} \
-v /dev/:/dev/ \
-e OSD_DEVICE={{ item }} \
{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
zap_device
with_items:
- "{{ ceph_osd_docker_devices }}"
- "{{ raw_journal_devices }}"
- name: wait until the zap containers die
shell: |
docker ps | grep -sq {{ ansible_hostname }}-osd-zap
register: zap_alive
failed_when: false
until: zap_alive.rc != 0
retries: 5
delay: 10
- name: remove ceph osd zap disk container
docker:
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
name: "{{ ansible_hostname }}-osd-zap-{{ item | regex_replace('/', '') }}"
state: absent
with_items: "{{ ceph_osd_docker_devices }}"
# zap twice
- name: zap ceph osd disk
docker:
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
name: "{{ ansible_hostname }}-osd-zap-{{ item | regex_replace('/', '') }}"
net: host
pid: host
state: started
privileged: yes
env: "CEPH_DAEMON=zap_device,OSD_DEVICE={{ item }}"
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/dev:/dev,/run:/run"
with_items: "{{ ceph_osd_docker_devices }}"
- name: remove ceph osd zap disk container
docker:
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
name: "{{ ansible_hostname }}-osd-zap-{{ item | regex_replace('/', '') }}"
state: absent
with_items: "{{ ceph_osd_docker_devices }}"
with_items:
- "{{ ceph_osd_docker_devices }}"
- "{{ raw_journal_devices }}"
- name: remove ceph osd service
file:

View File

@ -80,3 +80,4 @@ ceph_docker_on_openstack: false
mon_docker_privileged: false
mon_docker_net_host: true
ceph_config_keys: [] # DON'T TOUCH ME
ceph_mon_docker_enable_centos_extra_repo: false

View File

@ -41,6 +41,7 @@
enabled: yes
when:
- ansible_distribution == 'CentOS'
- ceph_mon_docker_enable_centos_extra_repo
tags:
with_pkg

View File

@ -126,6 +126,11 @@ raw_multi_journal: false
# - /dev/sdf
# - /dev/sdg
# - /dev/sdg
#
# NOTE(leseb):
# On a containerized scenario we only support A SINGLE journal
# for all the OSDs on a given machine. If you don't, bad things will happen
# This is a limitation we plan to fix at some point.
raw_journal_devices: []
@ -168,10 +173,30 @@ osd_containerized_deployment_with_kv: false
kv_type: etcd
kv_endpoint: 127.0.0.1
kv_port: 4001
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
ceph_docker_image: "ceph/daemon"
ceph_docker_image_tag: latest
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }}
ceph_osd_docker_devices: "{{ devices }}"
ceph_docker_on_openstack: false
ceph_config_keys: [] # DON'T TOUCH ME
ceph_docker_on_openstack: false
# PREPARE DEVICE
# Make sure you only pass a single device to raw_journal_devices, otherwise this will fail horribly.
# This is why we use [0] in the example.
#
# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
#
# Examples:
# Journal collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
# Dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }}
# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1
# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }}
#
ceph_osd_docker_devices: "{{ devices }}"
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
# ACTIVATE DEVICE
# Examples:
# Journal collocated or Dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }}
# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1
#
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }}
ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command

View File

@ -24,13 +24,13 @@
docker run --net=host \
--pid=host \
--privileged=true \
--name="{{ ansible_hostname }}-osd-prepare-{{ item.0 |
regex_replace('/', '') }}" \
--name="{{ ansible_hostname }}-osd-prepare-{{ item.0 | regex_replace('/', '') }}" \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /dev:/dev \
-v /etc/localtime:/etc/localtime:ro \
-e "OSD_DEVICE={{ item.0 }}" \
-e "OSD_JOURNAL_UUID=$(python -c "import uuid; print uuid.uuid5(uuid.NAMESPACE_DNS, '{{ ansible_machine_id }}{{ item.0 }}')")" \
-e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
{{ ceph_osd_docker_prepare_env }} \
"{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
@ -47,11 +47,11 @@
docker run --net=host \
--pid=host \
--privileged=true \
--name="{{ ansible_hostname }}-osd-prepare-{{ item.0 |
regex_replace('/', '') }}" \
--name="{{ ansible_hostname }}-osd-prepare-{{ item.0 | regex_replace('/', '') }}" \
-v /dev:/dev \
-v /etc/localtime:/etc/localtime:ro \
-e "OSD_DEVICE={{ item.0 }}" \
-e "OSD_JOURNAL_UUID=$(python -c "import uuid; print uuid.uuid5(uuid.NAMESPACE_DNS, '{{ ansible_machine_id }}{{ item.0 }}')")" \
-e "{{ ceph_osd_docker_prepare_env }}" \
-e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
-e KV_TYPE={{kv_type}} \
@ -67,6 +67,15 @@
- ceph_osd_docker_prepare_env is defined
- osd_containerized_deployment_with_kv
- name: generate ceph osd docker run script
become: true
template:
src: "{{ role_path }}/templates/ceph-osd-run.sh.j2"
dest: "{{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh"
owner: "root"
group: "root"
mode: "0744"
- name: generate systemd unit file
become: true
template:
@ -75,18 +84,15 @@
owner: "root"
group: "root"
mode: "0644"
failed_when: false
- name: enable systemd unit file for osd instance
shell: systemctl enable ceph-osd@{{ item | basename }}.service
failed_when: false
changed_when: false
with_items: "{{ ceph_osd_docker_devices }}"
- name: reload systemd unit files
shell: systemctl daemon-reload
changed_when: false
failed_when: false
- name: systemd start osd container
service:

View File

@ -0,0 +1,50 @@
#!/bin/bash
# {{ ansible_managed }}
if [[ "$(blkid -t TYPE=crypto_LUKS -o value -s PARTUUID /dev/${1}* | wc -l)" -gt 0 ]] ; then
for part in /dev/${1}*; do
if [[ "$(blkid -t TYPE=crypto_LUKS -o value -s PARTUUID ${part} | wc -l)" -gt 0 ]]; then
DEVICES="${DEVICES} --device=/dev/disk/by-partuuid/$(blkid -t TYPE=crypto_LUKS -o value -s PARTUUID ${part}) "
fi
done
# we test if the dm exist, if it does we add it to --device list
# if not we don't add it, the first activation will fail
# however the dm will be created, on the second run it'll added to the device list
# the second run will succeed
blkid -t TYPE=crypto_LUKS -o value -s PARTUUID /dev/${1}1
# make sure blkid returns 0 otherwise we will test /dev/mapper/ which always exists
if [[ -e /dev/mapper/$(blkid -t TYPE=crypto_LUKS -o value -s PARTUUID /dev/${1}1) && "$?" -eq 0 ]]; then
DEVICES="${DEVICES} --device=/dev/disk/by-partuuid/$(blkid -t PARTLABEL="ceph lockbox" -o value -s PARTUUID /dev/${1}3) --device=/dev/${1}3 --device=/dev/mapper/control --device=/dev/mapper/$(blkid -t TYPE=crypto_LUKS -o value -s PARTUUID /dev/${1}2) --device=/dev/mapper/$(blkid -t TYPE=crypto_LUKS -o value -s PARTUUID /dev/${1}1)"
else
DEVICES="${DEVICES} --device=/dev/disk/by-partuuid/$(blkid -t PARTLABEL="ceph lockbox" -o value -s PARTUUID /dev/${1}3) --device=/dev/${1}3 --device=/dev/mapper/control --device=/dev/mapper/$(blkid -t TYPE=crypto_LUKS -o value -s PARTUUID /dev/${1}2)"
fi
fi
/usr/bin/docker run \
--rm \
--net=host \
--cap-add SYS_ADMIN \
--pid=host \
{% if not osd_containerized_deployment_with_kv -%}
-v /var/lib/ceph:/var/lib/ceph \
-v /etc/ceph:/etc/ceph \
{% else -%}
-e KV_TYPE={{kv_type}} \
-e KV_IP={{kv_endpoint}} \
-e KV_PORT={{kv_port}} \
{% endif -%}
-v /etc/localtime:/etc/localtime:ro \
--device=/dev/${1} \
--device=/dev/${1}1 \
{% if raw_journal_devices|length > 0 -%}
-e OSD_JOURNAL={{ raw_journal_devices[0] }} \
--device={{ raw_journal_devices[0] }} \
{% else -%}
--device=/dev/${1}2 \
{% endif -%}
--device=/dev/disk/by-partuuid/$(python -c "import uuid; f = open('/etc/machine-id', 'r').read(); print uuid.uuid5(uuid.NAMESPACE_DNS, f.strip() + '/dev/$1')") ${DEVICES} \
-e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
-e OSD_DEVICE=/dev/${1} \
{{ ceph_osd_docker_extra_env }} \
--name={{ ansible_hostname }}-osd-dev${1} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}

View File

@ -1,3 +1,4 @@
# {{ ansible_managed }}
[Unit]
Description=Ceph OSD
After=docker.service
@ -5,24 +6,8 @@ After=docker.service
[Service]
EnvironmentFile=-/etc/environment
ExecStartPre=-/usr/bin/docker stop {{ ansible_hostname }}-osd-dev%i
ExecStartPre=-/usr/bin/docker rm -f {{ ansible_hostname }}-osd-dev%i
ExecStart=/usr/bin/docker run --rm --net=host --pid=host\
{% if not osd_containerized_deployment_with_kv -%}
-v /var/lib/ceph:/var/lib/ceph \
-v /etc/ceph:/etc/ceph \
{% else -%}
-e KV_TYPE={{kv_type}} \
-e KV_IP={{kv_endpoint}} \
-e KV_PORT={{kv_port}} \
{% endif -%}
-v /etc/localtime:/etc/localtime:ro \
-v /dev:/dev \
--privileged \
-e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
-e OSD_DEVICE=/dev/%i \
{{ ceph_osd_docker_extra_env }} \
--name={{ ansible_hostname }}-osd-dev%i \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
ExecStartPre=-/usr/bin/docker rm -f {{ ansible_hostname }}-osd-dev%i
ExecStart={{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh %i
ExecStop=-/usr/bin/docker stop {{ ansible_hostname }}-osd-dev%i
Restart=always
RestartSec=10s

View File

@ -0,0 +1,498 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
require 'yaml'
require 'time'
VAGRANTFILE_API_VERSION = '2'
DEBUG = false
config_file=File.expand_path(File.join(File.dirname(__FILE__), 'vagrant_variables.yml'))
settings=YAML.load_file(config_file)
LABEL_PREFIX = settings['label_prefix'] ? settings['label_prefix'] + "-" : ""
NMONS = settings['mon_vms']
NOSDS = settings['osd_vms']
NMDSS = settings['mds_vms']
NRGWS = settings['rgw_vms']
NNFSS = settings['nfs_vms']
RESTAPI = settings['restapi']
NRBD_MIRRORS = settings['rbd_mirror_vms']
CLIENTS = settings['client_vms']
NISCSI_GWS = settings['iscsi_gw_vms']
PUBLIC_SUBNET = settings['public_subnet']
CLUSTER_SUBNET = settings['cluster_subnet']
BOX = settings['vagrant_box']
BOX_URL = settings['vagrant_box_url']
SYNC_DIR = settings['vagrant_sync_dir']
MEMORY = settings['memory']
ETH = settings['eth']
DOCKER = settings['docker']
USER = settings['ssh_username']
ASSIGN_STATIC_IP = !(BOX == 'openstack' or BOX == 'linode')
DISABLE_SYNCED_FOLDER = settings.fetch('vagrant_disable_synced_folder', false)
DISK_UUID = Time.now.utc.to_i
ansible_provision = proc do |ansible|
if DOCKER then
ansible.playbook = 'site-docker.yml'
if settings['skip_tags']
ansible.skip_tags = settings['skip_tags']
end
else
ansible.playbook = 'site.yml'
end
# Note: Can't do ranges like mon[0-2] in groups because
# these aren't supported by Vagrant, see
# https://github.com/mitchellh/vagrant/issues/3539
ansible.groups = {
'mons' => (0..NMONS - 1).map { |j| "#{LABEL_PREFIX}mon#{j}" },
'osds' => (0..NOSDS - 1).map { |j| "#{LABEL_PREFIX}osd#{j}" },
'mdss' => (0..NMDSS - 1).map { |j| "#{LABEL_PREFIX}mds#{j}" },
'rgws' => (0..NRGWS - 1).map { |j| "#{LABEL_PREFIX}rgw#{j}" },
'nfss' => (0..NNFSS - 1).map { |j| "#{LABEL_PREFIX}nfs#{j}" },
'rbd_mirrors' => (0..NRBD_MIRRORS - 1).map { |j| "#{LABEL_PREFIX}rbd_mirror#{j}" },
'clients' => (0..CLIENTS - 1).map { |j| "#{LABEL_PREFIX}client#{j}" },
'iscsi_gw' => (0..NISCSI_GWS - 1).map { |j| "#{LABEL_PREFIX}iscsi_gw#{j}" }
}
if RESTAPI then
ansible.groups['restapis'] = (0..NMONS - 1).map { |j| "#{LABEL_PREFIX}mon#{j}" }
end
ansible.extra_vars = {
cluster_network: "#{CLUSTER_SUBNET}.0/24",
journal_size: 100,
public_network: "#{PUBLIC_SUBNET}.0/24",
}
# In a production deployment, these should be secret
if DOCKER then
ansible.extra_vars = ansible.extra_vars.merge({
mon_containerized_deployment: 'true',
osd_containerized_deployment: 'true',
mds_containerized_deployment: 'true',
rgw_containerized_deployment: 'true',
nfs_containerized_deployment: 'true',
restapi_containerized_deployment: 'true',
rbd_mirror_containerized_deployment: 'true',
ceph_mon_docker_interface: ETH,
ceph_mon_docker_subnet: "#{PUBLIC_SUBNET}.0/24",
ceph_osd_docker_devices: settings['disks'],
devices: settings['disks'],
ceph_docker_on_openstack: BOX == 'openstack',
ceph_rgw_civetweb_port: 8080,
generate_fsid: 'true',
})
else
ansible.extra_vars = ansible.extra_vars.merge({
devices: settings['disks'],
journal_collocation: 'true',
monitor_interface: ETH,
os_tuning_params: settings['os_tuning_params'],
pool_default_size: '2',
})
end
if BOX == 'linode' then
ansible.sudo = true
# Use monitor_address_block instead of monitor_interface:
ansible.extra_vars.delete(:monitor_interface)
ansible.extra_vars = ansible.extra_vars.merge({
cluster_network: "#{CLUSTER_SUBNET}.0/16",
devices: ['/dev/sdc'], # hardcode leftover disk
journal_collocation: 'true',
monitor_address_block: "#{PUBLIC_SUBNET}.0/16",
public_network: "#{PUBLIC_SUBNET}.0/16",
})
end
if DEBUG then
ansible.verbose = '-vvv'
end
ansible.limit = 'all'
end
def create_vmdk(name, size)
dir = Pathname.new(__FILE__).expand_path.dirname
path = File.join(dir, '.vagrant', name + '.vmdk')
`vmware-vdiskmanager -c -s #{size} -t 0 -a scsi #{path} \
2>&1 > /dev/null` unless File.exist?(path)
end
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = BOX
config.vm.box_url = BOX_URL
config.ssh.insert_key = false # workaround for https://github.com/mitchellh/vagrant/issues/5048
config.ssh.private_key_path = settings['ssh_private_key_path']
config.ssh.username = USER
# Faster bootup. Disables mounting the sync folder for libvirt and virtualbox
if DISABLE_SYNCED_FOLDER
config.vm.provider :virtualbox do |v,override|
override.vm.synced_folder '.', SYNC_DIR, disabled: true
end
config.vm.provider :libvirt do |v,override|
override.vm.synced_folder '.', SYNC_DIR, disabled: true
end
end
if BOX == 'openstack'
# OpenStack VMs
config.vm.provider :openstack do |os|
config.vm.synced_folder ".", "/home/#{USER}/vagrant", disabled: true
config.ssh.pty = true
os.openstack_auth_url = settings['os_openstack_auth_url']
os.username = settings['os_username']
os.password = settings['os_password']
os.tenant_name = settings['os_tenant_name']
os.region = settings['os_region']
os.flavor = settings['os_flavor']
os.image = settings['os_image']
os.keypair_name = settings['os_keypair_name']
os.security_groups = ['default']
if settings['os.networks'] then
os.networks = settings['os_networks']
end
if settings['os.floating_ip_pool'] then
os.floating_ip_pool = settings['os_floating_ip_pool']
end
config.vm.provision "shell", inline: "true", upload_path: "/home/#{USER}/vagrant-shell"
end
elsif BOX == 'linode'
config.vm.provider :linode do |provider, override|
provider.token = ENV['LINODE_API_KEY']
provider.distribution = settings['cloud_distribution'] # 'Ubuntu 16.04 LTS'
provider.datacenter = settings['cloud_datacenter']
provider.plan = MEMORY.to_s
provider.private_networking = true
# root install generally takes <1GB
provider.xvda_size = 4*1024
# add some swap as the Linode distros require it
provider.swap_size = 128
end
end
(0..CLIENTS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}client#{i}" do |client|
client.vm.hostname = "#{LABEL_PREFIX}ceph-client#{i}"
if ASSIGN_STATIC_IP
client.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.4#{i}"
end
# Virtualbox
client.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
end
# VMware
client.vm.provider :vmware_fusion do |v|
v.vmx['memsize'] = "#{MEMORY}"
end
# Libvirt
client.vm.provider :libvirt do |lv|
lv.memory = MEMORY
lv.random_hostname = true
end
# Parallels
client.vm.provider "parallels" do |prl|
prl.name = "ceph-client#{i}"
prl.memory = "#{MEMORY}"
end
client.vm.provider :linode do |provider|
provider.label = client.vm.hostname
end
end
end
(0..NRGWS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}rgw#{i}" do |rgw|
rgw.vm.hostname = "#{LABEL_PREFIX}ceph-rgw#{i}"
if ASSIGN_STATIC_IP
rgw.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.5#{i}"
end
# Virtualbox
rgw.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
end
# VMware
rgw.vm.provider :vmware_fusion do |v|
v.vmx['memsize'] = "#{MEMORY}"
end
# Libvirt
rgw.vm.provider :libvirt do |lv|
lv.memory = MEMORY
lv.random_hostname = true
end
# Parallels
rgw.vm.provider "parallels" do |prl|
prl.name = "ceph-rgw#{i}"
prl.memory = "#{MEMORY}"
end
rgw.vm.provider :linode do |provider|
provider.label = rgw.vm.hostname
end
end
end
(0..NNFSS - 1).each do |i|
config.vm.define "nfs#{i}" do |nfs|
nfs.vm.hostname = "ceph-nfs#{i}"
if ASSIGN_STATIC_IP
nfs.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.6#{i}"
end
# Virtualbox
nfs.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
end
# VMware
nfs.vm.provider :vmware_fusion do |v|
v.vmx['memsize'] = "#{MEMORY}"
end
# Libvirt
nfs.vm.provider :libvirt do |lv|
lv.memory = MEMORY
lv.random_hostname = true
end
# Parallels
nfs.vm.provider "parallels" do |prl|
prl.name = "ceph-nfs#{i}"
prl.memory = "#{MEMORY}"
end
nfs.vm.provider :linode do |provider|
provider.label = nfs.vm.hostname
end
end
end
(0..NMDSS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}mds#{i}" do |mds|
mds.vm.hostname = "#{LABEL_PREFIX}ceph-mds#{i}"
if ASSIGN_STATIC_IP
mds.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.7#{i}"
end
# Virtualbox
mds.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
end
# VMware
mds.vm.provider :vmware_fusion do |v|
v.vmx['memsize'] = "#{MEMORY}"
end
# Libvirt
mds.vm.provider :libvirt do |lv|
lv.memory = MEMORY
lv.random_hostname = true
end
# Parallels
mds.vm.provider "parallels" do |prl|
prl.name = "ceph-mds#{i}"
prl.memory = "#{MEMORY}"
end
mds.vm.provider :linode do |provider|
provider.label = mds.vm.hostname
end
end
end
(0..NRBD_MIRRORS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}rbd_mirror#{i}" do |rbd_mirror|
rbd_mirror.vm.hostname = "#{LABEL_PREFIX}ceph-rbd-mirror#{i}"
if ASSIGN_STATIC_IP
rbd_mirror.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.8#{i}"
end
# Virtualbox
rbd_mirror.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
end
# VMware
rbd_mirror.vm.provider :vmware_fusion do |v|
v.vmx['memsize'] = "#{MEMORY}"
end
# Libvirt
rbd_mirror.vm.provider :libvirt do |lv|
lv.memory = MEMORY
lv.random_hostname = true
end
# Parallels
rbd_mirror.vm.provider "parallels" do |prl|
prl.name = "ceph-rbd-mirror#{i}"
prl.memory = "#{MEMORY}"
end
rbd_mirror.vm.provider :linode do |provider|
provider.label = rbd_mirror.vm.hostname
end
end
end
(0..NISCSI_GWS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}iscsi_gw#{i}" do |iscsi_gw|
iscsi_gw.vm.hostname = "#{LABEL_PREFIX}ceph-iscsi-gw#{i}"
if ASSIGN_STATIC_IP
iscsi_gw.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.9#{i}"
end
# Virtualbox
iscsi_gw.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
end
# VMware
iscsi_gw.vm.provider :vmware_fusion do |v|
v.vmx['memsize'] = "#{MEMORY}"
end
# Libvirt
iscsi_gw.vm.provider :libvirt do |lv|
lv.memory = MEMORY
lv.random_hostname = true
end
# Parallels
iscsi_gw.vm.provider "parallels" do |prl|
prl.name = "ceph-iscsi-gw#{i}"
prl.memory = "#{MEMORY}"
end
iscsi_gw.vm.provider :linode do |provider|
provider.label = iscsi_gw.vm.hostname
end
end
end
(0..NMONS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}mon#{i}" do |mon|
mon.vm.hostname = "#{LABEL_PREFIX}ceph-mon#{i}"
if ASSIGN_STATIC_IP
mon.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.1#{i}"
end
# Virtualbox
mon.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
end
# VMware
mon.vm.provider :vmware_fusion do |v|
v.vmx['memsize'] = "#{MEMORY}"
end
# Libvirt
mon.vm.provider :libvirt do |lv|
lv.memory = MEMORY
lv.random_hostname = false
end
# Parallels
mon.vm.provider "parallels" do |prl|
prl.name = "ceph-mon#{i}"
prl.memory = "#{MEMORY}"
end
mon.vm.provider :linode do |provider|
provider.label = mon.vm.hostname
end
end
end
(0..NOSDS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}osd#{i}" do |osd|
osd.vm.hostname = "#{LABEL_PREFIX}ceph-osd#{i}"
if ASSIGN_STATIC_IP
osd.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.10#{i}"
osd.vm.network :private_network,
ip: "#{CLUSTER_SUBNET}.20#{i}"
end
# Virtualbox
osd.vm.provider :virtualbox do |vb|
# Create our own controller for consistency and to remove VM dependency
vb.customize ['storagectl', :id,
'--name', 'OSD Controller',
'--add', 'scsi']
(0..1).each do |d|
vb.customize ['createhd',
'--filename', "disk-#{i}-#{d}",
'--size', '11000'] unless File.exist?("disk-#{i}-#{d}.vdi")
vb.customize ['storageattach', :id,
'--storagectl', 'OSD Controller',
'--port', 3 + d,
'--device', 0,
'--type', 'hdd',
'--medium', "disk-#{i}-#{d}.vdi"]
end
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
end
# VMware
osd.vm.provider :vmware_fusion do |v|
(0..1).each do |d|
v.vmx["scsi0:#{d + 1}.present"] = 'TRUE'
v.vmx["scsi0:#{d + 1}.fileName"] =
create_vmdk("disk-#{i}-#{d}", '11000MB')
end
v.vmx['memsize'] = "#{MEMORY}"
end
# Libvirt
driverletters = ('a'..'z').to_a
osd.vm.provider :libvirt do |lv|
# always make /dev/sd{a/b/c} so that CI can ensure that
# virtualbox and libvirt will have the same devices to use for OSDs
(0..2).each do |d|
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => '12G', :bus => "ide"
end
lv.memory = MEMORY
lv.random_hostname = false
end
# Parallels
osd.vm.provider "parallels" do |prl|
prl.name = "ceph-osd#{i}"
prl.memory = "#{MEMORY}"
(0..1).each do |d|
prl.customize ["set", :id,
"--device-add",
"hdd",
"--iface",
"sata"]
end
end
osd.vm.provider :linode do |provider|
provider.label = osd.vm.hostname
end
# Run the provisioner after the last machine comes up
osd.vm.provision 'ansible', &ansible_provision if i == (NOSDS - 1)
end
end
end

View File

@ -0,0 +1,25 @@
---
# this is only here to let the CI tests know
# that this scenario is using docker
docker: True
ceph_stable: True
mon_containerized_deployment: True
osd_containerized_deployment: True
mds_containerized_deployment: True
rgw_containerized_deployment: True
cluster: test
ceph_mon_docker_interface: eth1
ceph_mon_docker_subnet: "{{ public_network }}"
journal_size: 100
ceph_docker_on_openstack: False
public_network: "192.168.15.0/24"
cluster_network: "192.168.16.0/24"
ceph_rgw_civetweb_port: 8080
ceph_osd_docker_devices: "{{ devices }}"
devices:
- /dev/sda
raw_journal_devices:
- /dev/sdb
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }}
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -0,0 +1,5 @@
[mons]
mon0
[osds]
osd0

View File

@ -0,0 +1,51 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: True
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
# Deploy RESTAPI on each of the Monitors
restapi: true
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.15
cluster_subnet: 192.168.16
# MEMORY
# set 1024 for CentOS
memory: 1024
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sda', '/dev/sdb' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# NOTE(leseb): we use centos for this scenario since we at least need Docker version 1.12.5
# which is not available in Atomic Host.
# There are bug like this one: https://github.com/docker/docker/issues/12694
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
#vagrant_sync_dir: /
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true

View File

@ -0,0 +1,498 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
require 'yaml'
require 'time'
VAGRANTFILE_API_VERSION = '2'
DEBUG = false
config_file=File.expand_path(File.join(File.dirname(__FILE__), 'vagrant_variables.yml'))
settings=YAML.load_file(config_file)
LABEL_PREFIX = settings['label_prefix'] ? settings['label_prefix'] + "-" : ""
NMONS = settings['mon_vms']
NOSDS = settings['osd_vms']
NMDSS = settings['mds_vms']
NRGWS = settings['rgw_vms']
NNFSS = settings['nfs_vms']
RESTAPI = settings['restapi']
NRBD_MIRRORS = settings['rbd_mirror_vms']
CLIENTS = settings['client_vms']
NISCSI_GWS = settings['iscsi_gw_vms']
PUBLIC_SUBNET = settings['public_subnet']
CLUSTER_SUBNET = settings['cluster_subnet']
BOX = settings['vagrant_box']
BOX_URL = settings['vagrant_box_url']
SYNC_DIR = settings['vagrant_sync_dir']
MEMORY = settings['memory']
ETH = settings['eth']
DOCKER = settings['docker']
USER = settings['ssh_username']
ASSIGN_STATIC_IP = !(BOX == 'openstack' or BOX == 'linode')
DISABLE_SYNCED_FOLDER = settings.fetch('vagrant_disable_synced_folder', false)
DISK_UUID = Time.now.utc.to_i
ansible_provision = proc do |ansible|
if DOCKER then
ansible.playbook = 'site-docker.yml'
if settings['skip_tags']
ansible.skip_tags = settings['skip_tags']
end
else
ansible.playbook = 'site.yml'
end
# Note: Can't do ranges like mon[0-2] in groups because
# these aren't supported by Vagrant, see
# https://github.com/mitchellh/vagrant/issues/3539
ansible.groups = {
'mons' => (0..NMONS - 1).map { |j| "#{LABEL_PREFIX}mon#{j}" },
'osds' => (0..NOSDS - 1).map { |j| "#{LABEL_PREFIX}osd#{j}" },
'mdss' => (0..NMDSS - 1).map { |j| "#{LABEL_PREFIX}mds#{j}" },
'rgws' => (0..NRGWS - 1).map { |j| "#{LABEL_PREFIX}rgw#{j}" },
'nfss' => (0..NNFSS - 1).map { |j| "#{LABEL_PREFIX}nfs#{j}" },
'rbd_mirrors' => (0..NRBD_MIRRORS - 1).map { |j| "#{LABEL_PREFIX}rbd_mirror#{j}" },
'clients' => (0..CLIENTS - 1).map { |j| "#{LABEL_PREFIX}client#{j}" },
'iscsi_gw' => (0..NISCSI_GWS - 1).map { |j| "#{LABEL_PREFIX}iscsi_gw#{j}" }
}
if RESTAPI then
ansible.groups['restapis'] = (0..NMONS - 1).map { |j| "#{LABEL_PREFIX}mon#{j}" }
end
ansible.extra_vars = {
cluster_network: "#{CLUSTER_SUBNET}.0/24",
journal_size: 100,
public_network: "#{PUBLIC_SUBNET}.0/24",
}
# In a production deployment, these should be secret
if DOCKER then
ansible.extra_vars = ansible.extra_vars.merge({
mon_containerized_deployment: 'true',
osd_containerized_deployment: 'true',
mds_containerized_deployment: 'true',
rgw_containerized_deployment: 'true',
nfs_containerized_deployment: 'true',
restapi_containerized_deployment: 'true',
rbd_mirror_containerized_deployment: 'true',
ceph_mon_docker_interface: ETH,
ceph_mon_docker_subnet: "#{PUBLIC_SUBNET}.0/24",
ceph_osd_docker_devices: settings['disks'],
devices: settings['disks'],
ceph_docker_on_openstack: BOX == 'openstack',
ceph_rgw_civetweb_port: 8080,
generate_fsid: 'true',
})
else
ansible.extra_vars = ansible.extra_vars.merge({
devices: settings['disks'],
journal_collocation: 'true',
monitor_interface: ETH,
os_tuning_params: settings['os_tuning_params'],
pool_default_size: '2',
})
end
if BOX == 'linode' then
ansible.sudo = true
# Use monitor_address_block instead of monitor_interface:
ansible.extra_vars.delete(:monitor_interface)
ansible.extra_vars = ansible.extra_vars.merge({
cluster_network: "#{CLUSTER_SUBNET}.0/16",
devices: ['/dev/sdc'], # hardcode leftover disk
journal_collocation: 'true',
monitor_address_block: "#{PUBLIC_SUBNET}.0/16",
public_network: "#{PUBLIC_SUBNET}.0/16",
})
end
if DEBUG then
ansible.verbose = '-vvv'
end
ansible.limit = 'all'
end
def create_vmdk(name, size)
dir = Pathname.new(__FILE__).expand_path.dirname
path = File.join(dir, '.vagrant', name + '.vmdk')
`vmware-vdiskmanager -c -s #{size} -t 0 -a scsi #{path} \
2>&1 > /dev/null` unless File.exist?(path)
end
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = BOX
config.vm.box_url = BOX_URL
config.ssh.insert_key = false # workaround for https://github.com/mitchellh/vagrant/issues/5048
config.ssh.private_key_path = settings['ssh_private_key_path']
config.ssh.username = USER
# Faster bootup. Disables mounting the sync folder for libvirt and virtualbox
if DISABLE_SYNCED_FOLDER
config.vm.provider :virtualbox do |v,override|
override.vm.synced_folder '.', SYNC_DIR, disabled: true
end
config.vm.provider :libvirt do |v,override|
override.vm.synced_folder '.', SYNC_DIR, disabled: true
end
end
if BOX == 'openstack'
# OpenStack VMs
config.vm.provider :openstack do |os|
config.vm.synced_folder ".", "/home/#{USER}/vagrant", disabled: true
config.ssh.pty = true
os.openstack_auth_url = settings['os_openstack_auth_url']
os.username = settings['os_username']
os.password = settings['os_password']
os.tenant_name = settings['os_tenant_name']
os.region = settings['os_region']
os.flavor = settings['os_flavor']
os.image = settings['os_image']
os.keypair_name = settings['os_keypair_name']
os.security_groups = ['default']
if settings['os.networks'] then
os.networks = settings['os_networks']
end
if settings['os.floating_ip_pool'] then
os.floating_ip_pool = settings['os_floating_ip_pool']
end
config.vm.provision "shell", inline: "true", upload_path: "/home/#{USER}/vagrant-shell"
end
elsif BOX == 'linode'
config.vm.provider :linode do |provider, override|
provider.token = ENV['LINODE_API_KEY']
provider.distribution = settings['cloud_distribution'] # 'Ubuntu 16.04 LTS'
provider.datacenter = settings['cloud_datacenter']
provider.plan = MEMORY.to_s
provider.private_networking = true
# root install generally takes <1GB
provider.xvda_size = 4*1024
# add some swap as the Linode distros require it
provider.swap_size = 128
end
end
(0..CLIENTS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}client#{i}" do |client|
client.vm.hostname = "#{LABEL_PREFIX}ceph-client#{i}"
if ASSIGN_STATIC_IP
client.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.4#{i}"
end
# Virtualbox
client.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
end
# VMware
client.vm.provider :vmware_fusion do |v|
v.vmx['memsize'] = "#{MEMORY}"
end
# Libvirt
client.vm.provider :libvirt do |lv|
lv.memory = MEMORY
lv.random_hostname = true
end
# Parallels
client.vm.provider "parallels" do |prl|
prl.name = "ceph-client#{i}"
prl.memory = "#{MEMORY}"
end
client.vm.provider :linode do |provider|
provider.label = client.vm.hostname
end
end
end
(0..NRGWS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}rgw#{i}" do |rgw|
rgw.vm.hostname = "#{LABEL_PREFIX}ceph-rgw#{i}"
if ASSIGN_STATIC_IP
rgw.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.5#{i}"
end
# Virtualbox
rgw.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
end
# VMware
rgw.vm.provider :vmware_fusion do |v|
v.vmx['memsize'] = "#{MEMORY}"
end
# Libvirt
rgw.vm.provider :libvirt do |lv|
lv.memory = MEMORY
lv.random_hostname = true
end
# Parallels
rgw.vm.provider "parallels" do |prl|
prl.name = "ceph-rgw#{i}"
prl.memory = "#{MEMORY}"
end
rgw.vm.provider :linode do |provider|
provider.label = rgw.vm.hostname
end
end
end
(0..NNFSS - 1).each do |i|
config.vm.define "nfs#{i}" do |nfs|
nfs.vm.hostname = "ceph-nfs#{i}"
if ASSIGN_STATIC_IP
nfs.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.6#{i}"
end
# Virtualbox
nfs.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
end
# VMware
nfs.vm.provider :vmware_fusion do |v|
v.vmx['memsize'] = "#{MEMORY}"
end
# Libvirt
nfs.vm.provider :libvirt do |lv|
lv.memory = MEMORY
lv.random_hostname = true
end
# Parallels
nfs.vm.provider "parallels" do |prl|
prl.name = "ceph-nfs#{i}"
prl.memory = "#{MEMORY}"
end
nfs.vm.provider :linode do |provider|
provider.label = nfs.vm.hostname
end
end
end
(0..NMDSS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}mds#{i}" do |mds|
mds.vm.hostname = "#{LABEL_PREFIX}ceph-mds#{i}"
if ASSIGN_STATIC_IP
mds.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.7#{i}"
end
# Virtualbox
mds.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
end
# VMware
mds.vm.provider :vmware_fusion do |v|
v.vmx['memsize'] = "#{MEMORY}"
end
# Libvirt
mds.vm.provider :libvirt do |lv|
lv.memory = MEMORY
lv.random_hostname = true
end
# Parallels
mds.vm.provider "parallels" do |prl|
prl.name = "ceph-mds#{i}"
prl.memory = "#{MEMORY}"
end
mds.vm.provider :linode do |provider|
provider.label = mds.vm.hostname
end
end
end
(0..NRBD_MIRRORS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}rbd_mirror#{i}" do |rbd_mirror|
rbd_mirror.vm.hostname = "#{LABEL_PREFIX}ceph-rbd-mirror#{i}"
if ASSIGN_STATIC_IP
rbd_mirror.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.8#{i}"
end
# Virtualbox
rbd_mirror.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
end
# VMware
rbd_mirror.vm.provider :vmware_fusion do |v|
v.vmx['memsize'] = "#{MEMORY}"
end
# Libvirt
rbd_mirror.vm.provider :libvirt do |lv|
lv.memory = MEMORY
lv.random_hostname = true
end
# Parallels
rbd_mirror.vm.provider "parallels" do |prl|
prl.name = "ceph-rbd-mirror#{i}"
prl.memory = "#{MEMORY}"
end
rbd_mirror.vm.provider :linode do |provider|
provider.label = rbd_mirror.vm.hostname
end
end
end
(0..NISCSI_GWS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}iscsi_gw#{i}" do |iscsi_gw|
iscsi_gw.vm.hostname = "#{LABEL_PREFIX}ceph-iscsi-gw#{i}"
if ASSIGN_STATIC_IP
iscsi_gw.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.9#{i}"
end
# Virtualbox
iscsi_gw.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
end
# VMware
iscsi_gw.vm.provider :vmware_fusion do |v|
v.vmx['memsize'] = "#{MEMORY}"
end
# Libvirt
iscsi_gw.vm.provider :libvirt do |lv|
lv.memory = MEMORY
lv.random_hostname = true
end
# Parallels
iscsi_gw.vm.provider "parallels" do |prl|
prl.name = "ceph-iscsi-gw#{i}"
prl.memory = "#{MEMORY}"
end
iscsi_gw.vm.provider :linode do |provider|
provider.label = iscsi_gw.vm.hostname
end
end
end
(0..NMONS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}mon#{i}" do |mon|
mon.vm.hostname = "#{LABEL_PREFIX}ceph-mon#{i}"
if ASSIGN_STATIC_IP
mon.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.1#{i}"
end
# Virtualbox
mon.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
end
# VMware
mon.vm.provider :vmware_fusion do |v|
v.vmx['memsize'] = "#{MEMORY}"
end
# Libvirt
mon.vm.provider :libvirt do |lv|
lv.memory = MEMORY
lv.random_hostname = false
end
# Parallels
mon.vm.provider "parallels" do |prl|
prl.name = "ceph-mon#{i}"
prl.memory = "#{MEMORY}"
end
mon.vm.provider :linode do |provider|
provider.label = mon.vm.hostname
end
end
end
(0..NOSDS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}osd#{i}" do |osd|
osd.vm.hostname = "#{LABEL_PREFIX}ceph-osd#{i}"
if ASSIGN_STATIC_IP
osd.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.10#{i}"
osd.vm.network :private_network,
ip: "#{CLUSTER_SUBNET}.20#{i}"
end
# Virtualbox
osd.vm.provider :virtualbox do |vb|
# Create our own controller for consistency and to remove VM dependency
vb.customize ['storagectl', :id,
'--name', 'OSD Controller',
'--add', 'scsi']
(0..1).each do |d|
vb.customize ['createhd',
'--filename', "disk-#{i}-#{d}",
'--size', '11000'] unless File.exist?("disk-#{i}-#{d}.vdi")
vb.customize ['storageattach', :id,
'--storagectl', 'OSD Controller',
'--port', 3 + d,
'--device', 0,
'--type', 'hdd',
'--medium', "disk-#{i}-#{d}.vdi"]
end
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
end
# VMware
osd.vm.provider :vmware_fusion do |v|
(0..1).each do |d|
v.vmx["scsi0:#{d + 1}.present"] = 'TRUE'
v.vmx["scsi0:#{d + 1}.fileName"] =
create_vmdk("disk-#{i}-#{d}", '11000MB')
end
v.vmx['memsize'] = "#{MEMORY}"
end
# Libvirt
driverletters = ('a'..'z').to_a
osd.vm.provider :libvirt do |lv|
# always make /dev/sd{a/b/c} so that CI can ensure that
# virtualbox and libvirt will have the same devices to use for OSDs
(0..2).each do |d|
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => '12G', :bus => "ide"
end
lv.memory = MEMORY
lv.random_hostname = false
end
# Parallels
osd.vm.provider "parallels" do |prl|
prl.name = "ceph-osd#{i}"
prl.memory = "#{MEMORY}"
(0..1).each do |d|
prl.customize ["set", :id,
"--device-add",
"hdd",
"--iface",
"sata"]
end
end
osd.vm.provider :linode do |provider|
provider.label = osd.vm.hostname
end
# Run the provisioner after the last machine comes up
osd.vm.provision 'ansible', &ansible_provision if i == (NOSDS - 1)
end
end
end

View File

@ -0,0 +1,26 @@
---
# this is only here to let the CI tests know
# that this scenario is using docker
docker: True
ceph_stable: True
mon_containerized_deployment: True
osd_containerized_deployment: True
mds_containerized_deployment: True
rgw_containerized_deployment: True
cluster: ceph
ceph_mon_docker_interface: eth1
ceph_mon_docker_subnet: "{{ public_network }}"
journal_size: 100
ceph_docker_on_openstack: False
public_network: "192.168.15.0/24"
cluster_network: "192.168.16.0/24"
journal_collocation: true
ceph_rgw_civetweb_port: 8080
ceph_osd_docker_devices: "{{ devices }}"
devices:
- /dev/sda
raw_journal_devices:
- /dev/sdb
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1

View File

@ -0,0 +1,5 @@
[mons]
mon0
[osds]
osd0

View File

@ -0,0 +1,51 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: True
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
# Deploy RESTAPI on each of the Monitors
restapi: true
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.15
cluster_subnet: 192.168.16
# MEMORY
# set 1024 for CentOS
memory: 1024
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sda', '/dev/sdb' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# NOTE(leseb): we use centos for this scenario since we at least need Docker version 1.12.5
# which is not available in Atomic Host.
# There are bug like this one: https://github.com/docker/docker/issues/12694
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
#vagrant_sync_dir: /
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true

View File

@ -21,3 +21,4 @@ ceph_osd_docker_devices: "{{ devices }}"
devices:
- /dev/sda
- /dev/sdb
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -22,6 +22,7 @@ class TestMons(object):
)
assert Service(service_name).is_enabled
@pytest.mark.no_docker
def test_can_get_cluster_health(self, node, Command):
cmd = "sudo ceph --cluster={} --connect-timeout 5 -s".format(node["cluster_name"])
output = Command.check_output(cmd)
@ -30,6 +31,7 @@ class TestMons(object):
class TestOSDs(object):
@pytest.mark.no_docker
def test_all_osds_are_up_and_in(self, node, Command):
cmd = "sudo ceph --cluster={} --connect-timeout 5 -s".format(node["cluster_name"])
output = Command.check_output(cmd)

View File

@ -1,3 +1,4 @@
import pytest
class TestInstall(object):
@ -13,7 +14,8 @@ class TestInstall(object):
def test_ceph_conf_is_a_file(self, File, node):
assert File(node["conf_path"]).is_file
def test_ceph_command_exists(self, Command):
@pytest.mark.no_docker
def test_ceph_command_exists(self, Command, node):
assert Command.exists("ceph")

View File

@ -1,5 +1,5 @@
[tox]
envlist = {ansible2.2}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,update_dmcrypt}
envlist = {ansible2.2}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,docker_dedicated_journal,docker_dmcrypt_journal_collocation,update_dmcrypt}
skipsdist = True
# extra commands for purging clusters
@ -38,6 +38,8 @@ setenv=
# only available for ansible >= 2.2
ANSIBLE_STDOUT_CALLBACK = debug
docker_cluster: PLAYBOOK = site-docker.yml.sample
docker_dedicated_journal: PLAYBOOK = site-docker.yml.sample
docker_dmcrypt_journal_collocation = site-docker.yml.sample
deps=
ansible1.9: ansible==1.9.4
ansible2.1: ansible==2.1
@ -56,6 +58,8 @@ changedir=
centos7_cluster: {toxinidir}/tests/functional/centos/7/cluster
# tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using docker
docker_cluster: {toxinidir}/tests/functional/centos/7/docker-cluster
docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/docker-cluster-dedicated-journal
docker_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/docker-cluster-dmcrypt-journal-collocation
purge_cluster: {toxinidir}/tests/functional/ubuntu/16.04/cluster
purge_dmcrypt: {toxinidir}/tests/functional/centos/7/dmcrypt-dedicated-journal
update_dmcrypt: {toxinidir}/tests/functional/centos/7/dmcrypt-dedicated-journal