Changes to allow ceph-ansible and vagrant to work on Openstack VMs

pull/648/head
Jim Curtis 2016-02-04 12:24:56 -08:00 committed by Sébastien Han
parent 3b73b8f2d4
commit d5f642c206
7 changed files with 160 additions and 0 deletions

View File

@ -0,0 +1,111 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
require 'yaml'
require 'vagrant-openstack-provider'
VAGRANTFILE_API_VERSION = '2'
config_file=File.expand_path(File.join(File.dirname(__FILE__), 'vagrant_variables.yml'))
settings=YAML.load_file(config_file)
NMONS = settings['mon_vms']
NOSDS = settings['osd_vms']
NMDSS = settings['mds_vms']
NRGWS = settings['rgw_vms']
CLIENTS = settings['client_vms']
SUBNET = settings['subnet']
BOX = settings['vagrant_box']
MEMORY = settings['memory']
STORAGECTL = settings['vagrant_storagectl']
ETH = settings['eth']
USER = settings['os_ssh_username']
ansible_provision = proc do |ansible|
ansible.playbook = 'site.yml'
if settings['skip_tags']
ansible.skip_tags = settings['skip_tags']
end
# Note: Can't do ranges like mon[0-2] in groups because
# these aren't supported by Vagrant, see
# https://github.com/mitchellh/vagrant/issues/3539
ansible.groups = {
'mons' => (0..NMONS - 1).map { |j| "mon#{j}" },
'restapis' => (0..NMONS - 1).map { |j| "mon#{j}" },
'osds' => (0..NOSDS - 1).map { |j| "osd#{j}" },
'mdss' => (0..NMDSS - 1).map { |j| "mds#{j}" },
'rgws' => (0..NRGWS - 1).map { |j| "rgw#{j}" },
'clients' => (0..CLIENTS - 1).map { |j| "client#{j}" }
}
# In a production deployment, these should be secret
ansible.extra_vars = {
ceph_stable: 'true',
journal_collocation: 'true',
fsid: '4a158d27-f750-41d5-9e7f-26ce4c9d2d45',
monitor_secret: 'AQAWqilTCDh7CBAAawXt6kyTgLFCxSvJhTEmuw==',
journal_size: 100,
monitor_interface: ETH,
cluster_network: "#{SUBNET}.0/24",
public_network: "#{SUBNET}.0/24",
devices: settings['disks'],
os_tuning_params: settings['os_tuning_params']
}
ansible.limit = 'all'
end
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.synced_folder ".", "/home/#{USER}/vagrant", disabled: true
config.vm.box = BOX
config.ssh.insert_key = false # workaround for https://github.com/mitchellh/vagrant/issues/5048
# OpenStack VMs
config.vm.provider :openstack do |os|
config.ssh.username = settings['os_ssh_username']
config.ssh.private_key_path = settings['os_ssh_private_key_path']
config.ssh.pty = true
os.openstack_auth_url = settings['os_openstack_auth_url']
os.username = settings['os_username']
os.password = settings['os_password']
os.tenant_name = settings['os_tenant_name']
os.region = settings['os_region']
os.flavor = settings['os_flavor']
os.image = settings['os_image']
os.keypair_name = settings['os_keypair_name']
os.security_groups = ['default']
end
config.vm.provision "shell", inline: "true", upload_path: "/home/#{USER}/vagrant-shell"
(0..CLIENTS - 1).each do |i|
config.vm.define "client#{i}" do |client|
client.vm.hostname = "ceph-client#{i}"
end
end
(0..NRGWS - 1).each do |i|
config.vm.define "rgw#{i}" do |rgw|
rgw.vm.hostname = "ceph-rgw#{i}"
end
end
(0..NMDSS - 1).each do |i|
config.vm.define "mds#{i}" do |mds|
mds.vm.hostname = "ceph-mds#{i}"
end
end
(0..NMONS - 1).each do |i|
config.vm.define "mon#{i}" do |mon|
mon.vm.hostname = "ceph-mon#{i}"
end
end
(0..NOSDS - 1).each do |i|
config.vm.define "osd#{i}" do |osd|
osd.vm.hostname = "ceph-osd#{i}"
# Run the provisioner after the last machine comes up
osd.vm.provision 'ansible', &ansible_provision if i == (NOSDS - 1)
end
end
end

View File

@ -50,6 +50,11 @@
ansible_os_family == 'RedHat' and
ansible_pkg_mgr == "dnf"
- name: install six
pip:
name: six
version: 1.9.0
# NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
- name: install docker-py
pip:

View File

@ -55,6 +55,7 @@
image: "{{ ceph_mon_docker_username }}/{{ ceph_mon_docker_imagename }}"
name: "{{ ansible_hostname }}"
net: "host"
pid: "host"
state: "running"
privileged: "{{ mon_docker_privileged }}"
env: "MON_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_mon_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=MON,CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }},{{ ceph_mon_extra_envs }}"

View File

@ -1,4 +1,10 @@
---
# For openstack VMs modify the mount point below depending on if the Openstack
# VM deploy tool defaults to mounting ephemeral disks
- name: umount ceph disk (if on openstack)
shell: "umount /mnt"
when: ceph_docker_on_openstack
# (rootfs) for reasons I haven't figured out, docker pull and run will fail.
- name: pull ceph daemon image
shell: "docker pull {{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}"

View File

@ -0,0 +1,37 @@
---
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
client_vms: 0
# SUBNET TO USE FOR THE VMS
# Use whatever private subnet your Openstack VMs are given
subnet: 172.17.72
# For Openstack VMs, the disk will depend on what you are allocated
disks: "[ '/dev/vdb' ]"
# For Openstack VMs, the lan is usually eth0
eth: 'eth0'
# For Openstack VMs, choose the following box instead
vagrant_box: 'openstack'
# For Atomic (RHEL or Cento) uncomment the line below
skip_tags: 'with_pkg'
# For deploying on OpenStack VMs uncomment these vars and assign values.
# You can use env vars for the values if it makes sense.
#os_ssh_username :
#os_ssh_private_key_path :
#os_openstack_auth_url :
#os_username :
#os_password :
#os_tenant_name :
#os_region :
#os_flavor :
#os_image :
#os_keypair_name :