Merge pull request #1086 from ceph/testing-updates

testing: adds 3 new functional testing scenarios
pull/1088/head
Alfredo Deza 2016-11-08 13:39:32 -05:00 committed by GitHub
commit da7dd92e95
26 changed files with 306 additions and 19 deletions

2
.gitignore vendored
View File

@ -2,7 +2,7 @@
*.vdi
*.keyring
fetch/*
vagrant_variables.yml
/vagrant_variables.yml
group_vars/all
group_vars/mons
group_vars/osds

View File

@ -244,6 +244,7 @@ rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allow
monitor_interface: interface
monitor_address: 0.0.0.0
mon_use_fqdn: false # if set to true, the MON name used will be the fqdn in the ceph.conf
monitor_address_block: false
## OSD options
#

View File

@ -17,25 +17,32 @@ osd crush chooseleaf type = 0
{% endif %}
{# NOTE (leseb): the blank lines in-between are needed otherwise we won't get any line break #}
{% if groups[mon_group_name] is defined %}
mon initial members = {% if groups[mon_group_name] is defined %}{% for host in groups[mon_group_name] %}{% if hostvars[host]['ansible_fqdn'] is defined and mon_use_fqdn %}{{ hostvars[host]['ansible_fqdn'] }}{% if not loop.last %},{% endif %}{% elif hostvars[host]['ansible_hostname'] is defined %}{{ hostvars[host]['ansible_hostname'] }}{% if not loop.last %},{% endif %}{% endif %}{% endfor %}{% endif %}
mon initial members = {% for host in groups[mon_group_name] %}
{% if hostvars[host]['ansible_fqdn'] is defined and mon_use_fqdn -%}
{{ hostvars[host]['ansible_fqdn'] }}
{%- elif hostvars[host]['ansible_hostname'] is defined -%}
{{ hostvars[host]['ansible_hostname'] }}
{%- endif %}
{%- if not loop.last %},{% endif %}
{% endfor %}
{% endif %}
{% if not mon_containerized_deployment and not mon_containerized_deployment_with_kv %}
{% if monitor_address_block is defined %}
{% if monitor_address_block %}
mon host = {% for host in groups[mon_group_name] %}{{ hostvars[host]['ansible_all_ipv4_addresses'] | ipaddr(monitor_address_block) | first }}{% if not loop.last %},{% endif %}{% endfor %}
{% elif groups[mon_group_name] is defined %}
mon host = {% for host in groups[mon_group_name] %}
{% set address = hostvars[host]['monitor_address'] if hostvars[host]['monitor_address'] is defined else monitor_address %}
{% set interface = hostvars[host]['monitor_interface'] if hostvars[host]['monitor_interface'] is defined else monitor_interface %}
{% if address != "0.0.0.0" -%}
{{ address }}
{%- else %}
{% if interface != "interface" %}
{% for key in hostvars[host].iterkeys() %}
{% if hostvars[host][key]['macaddress'] is defined and hostvars[host][key]['device'] is defined and hostvars[host][key]['device'] == interface -%}
{{ hostvars[host][key]['ipv4']['address'] }}
{%- endif %}
{% endfor %}
{% endif %}
{% elif address != "0.0.0.0" -%}
{{ address }}
{%- endif %}
{%- if not loop.last %},{% endif %}
{% endfor %}
{% endif %}

3
tests/functional/.gitignore vendored 100644
View File

@ -0,0 +1,3 @@
ubuntu-key/
fetch/
vagrant_ssh_config

View File

@ -0,0 +1 @@
../../../../../Vagrantfile

View File

@ -0,0 +1,13 @@
---
ceph_stable: True
public_network: "192.168.42.0/24"
cluster_network: "192.168.43.0/24"
journal_size: 100
devices:
- '/dev/sdb'
- '/dev/sdc'
journal_collocation: True
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -0,0 +1,4 @@
---
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -0,0 +1,6 @@
[mons]
# centos7 uses the enp0s8 interface
mon0 monitor_interface=enp0s8
[osds]
osd0

View File

@ -11,5 +11,9 @@ nodes = {
'mon0': {
'username': 'vagrant',
'components': ['mon', 'mon_initial_members']
}
},
'osd0': {
'username': 'vagrant',
'components': [],
},
}

View File

@ -0,0 +1,64 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
# Deploy RESTAPI on each of the Monitors
restapi: true
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.42
cluster_subnet: 192.168.43
# MEMORY
# set 1024 for CentOS
memory: 512
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sdb', '/dev/sdc' ]"
# VAGRANT BOX
# Ubuntu: bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: box-cutter/centos72
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
#vagrant_sync_dir: /
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -0,0 +1,39 @@
import pytest
uses_conf_tests = pytest.mark.skipif(
'conf_tests' not in pytest.config.slaveinput['node_config']['components'],
reason="only run in monitors configured with initial_members"
)
class TestMon(object):
def get_line_from_config(self, string, conf_path):
with open(conf_path) as ceph_conf:
ceph_conf_lines = ceph_conf.readlines()
for line in ceph_conf_lines:
if string in line:
return line.strip().strip('\n')
@uses_conf_tests
def test_ceph_config_has_inital_members_line(self, scenario_config):
cluster_name = scenario_config.get('ceph', {}).get('cluster_name', 'ceph')
ceph_conf_path = '/etc/ceph/%s.conf' % cluster_name
initial_members_line = self.get_line_from_config('mon initial members', ceph_conf_path)
assert initial_members_line
@uses_conf_tests
def test_initial_members_line_has_correct_value(self, scenario_config):
cluster_name = scenario_config.get('ceph', {}).get('cluster_name', 'ceph')
ceph_conf_path = '/etc/ceph/%s.conf' % cluster_name
initial_members_line = self.get_line_from_config('mon initial members', ceph_conf_path)
assert initial_members_line == 'mon initial members = ceph-mon0,ceph-mon1,ceph-mon2'
@uses_conf_tests
def test_mon_host_line_has_correct_value(self, scenario_config):
cluster_name = scenario_config.get('ceph', {}).get('cluster_name', 'ceph')
ceph_conf_path = '/etc/ceph/%s.conf' % cluster_name
initial_members_line = self.get_line_from_config('mon host', ceph_conf_path)
assert initial_members_line == 'mon host = 192.168.42.10,192.168.42.11,192.168.42.12'

View File

@ -0,0 +1 @@
../../../../../Vagrantfile

View File

@ -0,0 +1,13 @@
---
ceph_stable: True
public_network: "192.168.42.0/24"
cluster_network: "192.168.43.0/24"
journal_size: 100
devices:
- '/dev/sdb'
- '/dev/sdc'
journal_collocation: True
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -0,0 +1,4 @@
---
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -1,2 +1,5 @@
[mons]
mon0 monitor_interface=eth1
[osds]
osd0

View File

@ -0,0 +1,19 @@
# Basic information about ceph and its configuration
ceph = {
'releases': ['infernalis', 'jewel'],
'cluster_name': 'ceph'
}
# remote nodes to test, with anything specific to them that might be useful for
# tests to get. Each one of these can get requested as a py.test fixture to
# validate information.
nodes = {
'mon0': {
'username': 'vagrant',
'components': ['mon', 'mon_initial_members']
},
'osd0': {
'username': 'vagrant',
'components': ['mon', 'mon_initial_members']
}
}

View File

@ -5,7 +5,7 @@ docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0

View File

@ -0,0 +1 @@
../../../../../Vagrantfile

View File

@ -0,0 +1,13 @@
---
ceph_stable: True
public_network: "192.168.42.0/24"
cluster_network: "192.168.43.0/24"
journal_size: 100
devices:
- '/dev/sdb'
- '/dev/sdc'
journal_collocation: True
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -0,0 +1,4 @@
---
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -0,0 +1,4 @@
[mons]
mon0 monitor_address=192.168.42.10
mon1 monitor_address=192.168.42.11
mon2 monitor_interface=eth1

View File

@ -1 +0,0 @@
../../../../../../Vagrantfile

View File

@ -1,6 +0,0 @@
---
ceph_stable: True
public_network: "192.168.42.0/24"
cluster_network: "192.168.43.0/24"
journal_size: 100

View File

@ -0,0 +1,23 @@
# Basic information about ceph and its configuration
ceph = {
'releases': ['infernalis', 'jewel'],
'cluster_name': 'ceph'
}
# remote nodes to test, with anything specific to them that might be useful for
# tests to get. Each one of these can get requested as a py.test fixture to
# validate information.
nodes = {
'mon0': {
'username': 'vagrant',
'components': ['conf_tests']
},
'mon1': {
'username': 'vagrant',
'components': ['conf_tests']
},
'mon2': {
'username': 'vagrant',
'components': ['conf_tests']
},
}

View File

@ -0,0 +1,64 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 3
osd_vms: 0
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
# Deploy RESTAPI on each of the Monitors
restapi: true
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.42
cluster_subnet: 192.168.43
# MEMORY
# set 1024 for CentOS
memory: 512
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sdb', '/dev/sdc' ]"
# VAGRANT BOX
# Ubuntu: bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: geerlingguy/ubuntu1604
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
#vagrant_sync_dir: /
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -1,5 +1,5 @@
[tox]
envlist = {ansible2.1,ansible2.2}-{initial-members}
envlist = {ansible2.1}-{xenial-conf-tests,xenial-mon-osd,centos7-mon-osd}
skipsdist = True
[testenv]
@ -11,16 +11,19 @@ setenv=
ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
ANSIBLE_ACTION_PLUGINS = {toxinidir}/plugins/actions
deps=
ansible1.9: ansible==1.9.4
ansible2.1: ansible==2.1
ansible2.2: ansible==2.2
-r{toxinidir}/tests/requirements.txt
changedir=
initial-members: {toxinidir}/tests/functional/ubuntu/16.04/mon/initial_members
xenial-conf-tests: {toxinidir}/tests/functional/ubuntu/16.04/mon
xenial-mon-osd: {toxinidir}/tests/functional/ubuntu/16.04/mon-osd
centos7-mon-osd: {toxinidir}/tests/functional/centos/7/mon-osd
commands=
vagrant up --no-provision --provider=virtualbox
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
ansible-playbook -i {changedir}/hosts {toxinidir}/site.yml.sample
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/site.yml.sample
py.test -v
vagrant destroy --force