Merge pull request #1150 from ceph/testinfra

tests: use testinfra to verify cluster setup in our CI tests
pull/1168/head
Alfredo Deza 2016-12-08 10:22:24 -05:00 committed by GitHub
commit 33331cb962
19 changed files with 242 additions and 444 deletions

View File

@ -50,19 +50,17 @@ mon host = {% for host in groups[mon_group_name] %}
{% if mon_containerized_deployment %}
fsid = {{ fsid }}
{% if groups[mon_group_name] is defined %}
mon host = {% for host in groups[mon_group_name] -%}
{% if mon_containerized_deployment %}
{% set interface = ["ansible_",ceph_mon_docker_interface]|join %}
{{ hostvars[host][interface]['ipv4']['address'] }}
{%- if not loop.last %},{% endif %}
{% elif hostvars[host]['monitor_address'] is defined %}
{{ hostvars[host]['monitor_address'] }}
{%- if not loop.last %},{% endif %}
{% elif monitor_address != "0.0.0.0" %}
monitor_address
{%- if not loop.last %},{% endif %}
{% endif %}
{%- endfor %}
mon host = {% for host in groups[mon_group_name] %}
{% set interface = ["ansible_",ceph_mon_docker_interface]|join %}
{% if mon_containerized_deployment -%}
{{ hostvars[host][interface]['ipv4']['address'] }}
{%- elif hostvars[host]['monitor_address'] is defined -%}
{{ hostvars[host]['monitor_address'] }}
{%- elif monitor_address != "0.0.0.0" -%}
{{ monitor_address }}
{%- endif %}
{%- if not loop.last %},{% endif %}
{% endfor %}
{% endif %}
{% endif %}

View File

@ -1,101 +1,84 @@
import os
import pytest
import imp
def pytest_addoption(parser):
default = 'scenario.py'
parser.addoption(
"--scenario",
action="store",
default=default,
help="YAML file defining scenarios to test. Currently defaults to: %s" % default
)
def load_scenario_config(filepath, **kw):
'''
Creates a configuration dictionary from a file.
:param filepath: The path to the file.
'''
abspath = os.path.abspath(os.path.expanduser(filepath))
conf_dict = {}
if not os.path.isfile(abspath):
raise RuntimeError('`%s` is not a file.' % abspath)
# First, make sure the code will actually compile (and has no SyntaxErrors)
with open(abspath, 'rb') as f:
compiled = compile(f.read(), abspath, 'exec')
# Next, attempt to actually import the file as a module.
# This provides more verbose import-related error reporting than exec()
absname, _ = os.path.splitext(abspath)
basepath, module_name = absname.rsplit(os.sep, 1)
imp.load_module(
module_name,
*imp.find_module(module_name, [basepath])
)
# If we were able to import as a module, actually exec the compiled code
exec(compiled, globals(), conf_dict)
conf_dict['__file__'] = abspath
return conf_dict
def pytest_configure_node(node):
node_id = node.slaveinput['slaveid']
scenario_path = os.path.abspath(node.config.getoption('--scenario'))
scenario = load_scenario_config(scenario_path)
node.slaveinput['node_config'] = scenario['nodes'][node_id]
node.slaveinput['scenario_config'] = scenario
@pytest.fixture(scope='session')
def node_config(request):
return request.config.slaveinput['node_config']
@pytest.fixture(scope="session")
def scenario_config(request):
return request.config.slaveinput['scenario_config']
def pytest_report_header(config):
@pytest.fixture()
def node(Ansible, Interface, Command, request):
"""
Hook to add extra information about the execution environment and to be
able to debug what did the magical args got expanded to
This fixture represents a single node in the ceph cluster. Using the
Ansible fixture provided by testinfra it can access all the ansible variables
provided to it by the specific test scenario being ran.
You must include this fixture on any tests that operate on specific type of node
because it contains the logic to manage which tests a node should run.
"""
lines = []
scenario_path = str(config.rootdir.join(config.getoption('--scenario')))
if not config.remote_execution:
lines.append('execution environment: local')
else:
lines.append('execution environment: remote')
lines.append('loaded scenario: %s' % scenario_path)
lines.append('expanded args: %s' % config.extended_args)
return lines
ansible_vars = Ansible.get_variables()
node_type = ansible_vars["group_names"][0]
docker = ansible_vars.get("docker")
if not request.node.get_marker(node_type) and not request.node.get_marker('all'):
pytest.skip("Not a valid test for node type: %s" % node_type)
if request.node.get_marker("no_docker") and docker:
pytest.skip("Not a valid test for containerized deployments or atomic hosts")
journal_collocation_test = ansible_vars.get("journal_collocation") or ansible_vars.get("dmcrypt_journal_collocation")
if request.node.get_marker("journal_collocation") and not journal_collocation_test:
pytest.skip("Scenario is not using journal collocation")
osd_ids = []
osds = []
cluster_address = ""
if node_type == "osds":
result = Command.check_output('sudo ls /var/lib/ceph/osd/ | grep -oP "\d+$"')
osd_ids = result.split("\n")
# I can assume eth2 because I know all the vagrant
# boxes we test with use that interface. OSDs are the only
# nodes that have this interface.
cluster_address = Interface("eth2").addresses[0]
osds = osd_ids
if docker:
osds = [device.split("/")[-1] for device in ansible_vars["devices"]]
# I can assume eth1 because I know all the vagrant
# boxes we test with use that interface
address = Interface("eth1").addresses[0]
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
num_mons = len(ansible_vars["groups"]["mons"])
num_devices = len(ansible_vars["devices"])
num_osd_hosts = len(ansible_vars["groups"]["osds"])
total_osds = num_devices * num_osd_hosts
cluster_name = ansible_vars.get("cluster", "ceph")
conf_path = "/etc/ceph/{}.conf".format(cluster_name)
data = dict(
address=address,
subnet=subnet,
vars=ansible_vars,
osd_ids=osd_ids,
num_mons=num_mons,
num_devices=num_devices,
num_osd_hosts=num_osd_hosts,
total_osds=total_osds,
cluster_name=cluster_name,
conf_path=conf_path,
cluster_address=cluster_address,
docker=docker,
osds=osds,
)
return data
def pytest_cmdline_preparse(args, config):
# Note: we can only do our magical args expansion if we aren't already in
# a remote node via xdist/execnet so return quickly if we can't do magic.
# TODO: allow setting an environment variable that helps to skip this kind
# of magical argument expansion
if os.getcwd().endswith('pyexecnetcache'):
return
def pytest_collection_modifyitems(session, config, items):
for item in items:
test_path = item.location[0]
if "mon" in test_path:
item.add_marker(pytest.mark.mons)
elif "osd" in test_path:
item.add_marker(pytest.mark.osds)
elif "mds" in test_path:
item.add_marker(pytest.mark.mdss)
elif "rgw" in test_path:
item.add_marker(pytest.mark.rgws)
else:
item.add_marker(pytest.mark.all)
scenario_path = os.path.abspath(config.getoption('--scenario'))
scenarios = load_scenario_config(scenario_path, args=args)
rsync_dir = os.path.dirname(str(config.rootdir.join('functional')))
test_path = str(config.rootdir.join('functional/tests'))
nodes = []
config.remote_execution = True
for node in scenarios.get('nodes', []):
nodes.append('--tx')
nodes.append('vagrant_ssh={node_name}//id={node_name}'.format(node_name=node))
args[:] = args + ['--max-slave-restart', '0', '--dist=each'] + nodes + ['--rsyncdir', rsync_dir, test_path]
config.extended_args = ' '.join(args)
if "journal_collocation" in test_path:
item.add_marker(pytest.mark.journal_collocation)

View File

@ -1,36 +0,0 @@
# Basic information about ceph and its configuration
ceph = {
'releases': ['infernalis', 'jewel'],
'cluster_name': 'ceph',
'subnet': '192.168.1',
}
# remote nodes to test, with anything specific to them that might be useful for
# tests to get. Each one of these can get requested as a py.test fixture to
# validate information.
nodes = {
'mon0': {
'username': 'vagrant',
'components': ['conf_tests']
},
'mon1': {
'username': 'vagrant',
'components': ['conf_tests']
},
'mon2': {
'username': 'vagrant',
'components': ['conf_tests']
},
'osd0': {
'username': 'vagrant',
'components': []
},
'mds0': {
'username': 'vagrant',
'components': []
},
'rgw0': {
'username': 'vagrant',
'components': []
},
}

View File

@ -1,19 +0,0 @@
# Basic information about ceph and its configuration
ceph = {
'releases': ['infernalis', 'jewel'],
'cluster_name': 'ceph'
}
# remote nodes to test, with anything specific to them that might be useful for
# tests to get. Each one of these can get requested as a py.test fixture to
# validate information.
nodes = {
'mon0': {
'username': 'vagrant',
'components': ['mon', 'mon_initial_members']
},
'osd0': {
'username': 'vagrant',
'components': [],
},
}

View File

@ -1,19 +0,0 @@
# Basic information about ceph and its configuration
ceph = {
'releases': ['infernalis', 'jewel'],
'cluster_name': 'ceph'
}
# remote nodes to test, with anything specific to them that might be useful for
# tests to get. Each one of these can get requested as a py.test fixture to
# validate information.
nodes = {
'mon0': {
'username': 'vagrant',
'components': ['mon', 'mon_initial_members']
},
'osd0': {
'username': 'vagrant',
'components': [],
},
}

View File

@ -1,4 +1,7 @@
---
# this is only here to let the CI tests know
# that this scenario is using docker
docker: True
ceph_stable: True
mon_containerized_deployment: True

View File

@ -1,36 +0,0 @@
# Basic information about ceph and its configuration
ceph = {
'releases': ['infernalis', 'jewel'],
'cluster_name': 'ceph',
'subnet': '192.168.15',
}
# remote nodes to test, with anything specific to them that might be useful for
# tests to get. Each one of these can get requested as a py.test fixture to
# validate information.
nodes = {
'mon0': {
'username': 'vagrant',
'components': ['conf_tests']
},
'mon1': {
'username': 'vagrant',
'components': ['conf_tests']
},
'mon2': {
'username': 'vagrant',
'components': ['conf_tests']
},
'osd0': {
'username': 'vagrant',
'components': []
},
'mds0': {
'username': 'vagrant',
'components': []
},
'rgw0': {
'username': 'vagrant',
'components': []
},
}

View File

@ -1,20 +0,0 @@
# Basic information about ceph and its configuration
ceph = {
'releases': ['infernalis', 'jewel'],
'cluster_name': 'ceph'
}
# remote nodes to test, with anything specific to them that might be useful for
# tests to get. Each one of these can get requested as a py.test fixture to
# validate information.
nodes = {
'mon0': {
'username': 'vagrant',
'components': ['mon', 'mon_initial_members']
},
'osd0': {
'username': 'vagrant',
'components': ['collocated_journals'],
'devices': ['/dev/sda', '/dev/sdb'],
},
}

View File

@ -0,0 +1,24 @@
---
- hosts: all
gather_facts: true
become: yes
tasks:
- name: check if it is Atomic host
stat: path=/run/ostree-booted
register: stat_ostree
always_run: true
- name: set fact for using Atomic host
set_fact:
is_atomic: '{{ stat_ostree.stat.exists }}'
# we need to install this so the Socket testinfra module
# can use netcat for testing
- name: install net-tools
package:
name: net-tools
state: present
when:
- not is_atomic

View File

@ -1,42 +0,0 @@
import pytest
uses_conf_tests = pytest.mark.skipif(
'conf_tests' not in pytest.config.slaveinput['node_config']['components'],
reason="only run in monitors configured with initial_members"
)
class TestMon(object):
def get_line_from_config(self, string, conf_path):
with open(conf_path) as ceph_conf:
ceph_conf_lines = ceph_conf.readlines()
for line in ceph_conf_lines:
if string in line:
return line.strip().strip('\n')
@uses_conf_tests
def test_ceph_config_has_inital_members_line(self, scenario_config):
cluster_name = scenario_config.get('ceph', {}).get('cluster_name', 'ceph')
ceph_conf_path = '/etc/ceph/%s.conf' % cluster_name
initial_members_line = self.get_line_from_config('mon initial members', ceph_conf_path)
assert initial_members_line
@uses_conf_tests
def test_initial_members_line_has_correct_value(self, scenario_config):
cluster_name = scenario_config.get('ceph', {}).get('cluster_name', 'ceph')
ceph_conf_path = '/etc/ceph/%s.conf' % cluster_name
initial_members_line = self.get_line_from_config('mon initial members', ceph_conf_path)
assert initial_members_line == 'mon initial members = ceph-mon0,ceph-mon1,ceph-mon2'
@uses_conf_tests
def test_mon_host_line_has_correct_value(self, scenario_config):
config = scenario_config.get('ceph', {})
cluster_name = config.get('cluster_name', 'ceph')
ceph_conf_path = '/etc/ceph/%s.conf' % cluster_name
initial_members_line = self.get_line_from_config('mon host', ceph_conf_path)
subnet = config.get('subnet', "192.168.9")
expected = 'mon host = {subnet}.10,{subnet}.11,{subnet}.12'.format(subnet=subnet)
assert initial_members_line == expected

View File

@ -1,32 +0,0 @@
import pytest
uses_mon_initial_members = pytest.mark.skipif(
'mon_initial_members' not in pytest.config.slaveinput['node_config']['components'],
reason="only run in monitors configured with initial_members"
)
class TestMon(object):
def get_line_from_config(self, string, conf_path):
with open(conf_path) as ceph_conf:
ceph_conf_lines = ceph_conf.readlines()
for line in ceph_conf_lines:
if string in line:
return line.strip().strip('\n')
@uses_mon_initial_members
def test_ceph_config_has_inital_members_line(self, scenario_config):
cluster_name = scenario_config.get('ceph', {}).get('cluster_name', 'ceph')
ceph_conf_path = '/etc/ceph/%s.conf' % cluster_name
initial_members_line = self.get_line_from_config('mon initial members', ceph_conf_path)
assert initial_members_line
@uses_mon_initial_members
def test_initial_members_line_has_correct_value(self, scenario_config):
cluster_name = scenario_config.get('ceph', {}).get('cluster_name', 'ceph')
ceph_conf_path = '/etc/ceph/%s.conf' % cluster_name
initial_members_line = self.get_line_from_config('mon initial members', ceph_conf_path)
assert initial_members_line == 'mon initial members = ceph-mon0'

View File

@ -0,0 +1,37 @@
import pytest
class TestMons(object):
@pytest.mark.no_docker
def test_ceph_mon_package_is_installed(self, node, Package):
assert Package("ceph-mon").is_installed
def test_mon_listens_on_6789(self, node, Socket):
assert Socket("tcp://%s:6789" % node["address"]).is_listening
def test_mon_service_is_running(self, node, Service):
service_name = "ceph-mon@ceph-{hostname}".format(
hostname=node["vars"]["inventory_hostname"]
)
assert Service(service_name).is_running
def test_mon_service_is_enabled(self, node, Service):
service_name = "ceph-mon@ceph-{hostname}".format(
hostname=node["vars"]["inventory_hostname"]
)
assert Service(service_name).is_enabled
def test_can_get_cluster_health(self, node, Command):
cmd = "sudo ceph --cluster={} --connect-timeout 5 -s".format(node["cluster_name"])
output = Command.check_output(cmd)
assert output.strip().startswith("cluster")
class TestOSDs(object):
def test_all_osds_are_up_and_in(self, node, Command):
cmd = "sudo ceph --cluster={} --connect-timeout 5 -s".format(node["cluster_name"])
output = Command.check_output(cmd)
phrase = "{num_osds} osds: {num_osds} up, {num_osds} in".format(num_osds=node["total_osds"])
assert phrase in output

View File

@ -1,90 +0,0 @@
import os
import pytest
import subprocess
uses_collocated_journals = pytest.mark.skipif(
'collocated_journals' not in pytest.config.slaveinput['node_config']['components'],
reason="only run in osds with collocated journals"
)
# XXX These could/should probably move to fixtures
def which(executable):
locations = (
'/usr/local/bin',
'/bin',
'/usr/bin',
'/usr/local/sbin',
'/usr/sbin',
'/sbin',
)
for location in locations:
executable_path = os.path.join(location, executable)
if os.path.exists(executable_path):
return executable_path
def get_system_devices():
"""
uses ceph-disk to get a list of devices of a system, and formats the output nicely
so that tests can consume it to make assertions:
From:
/dev/sda :
/dev/sda2 other, 0x5
/dev/sda5 other, LVM2_member
/dev/sda1 other, ext2, mounted on /boot
/dev/sdb :
/dev/sdb1 ceph data, active, cluster ceph, osd.0, journal /dev/sdc1
/dev/sdc :
/dev/sdc1 ceph journal, for /dev/sdb1
/dev/sr0 other, unknown
To:
{"/dev/sda2": "other, 0x5",
"/dev/sda5": "other, LVM2_member",
"/dev/sda1": "other, ext2, mounted on /boot",
"/dev/sdb1": "ceph data, active, cluster ceph, osd.0, journal /dev/sdc1",
"/dev/sdc1": "ceph journal, for /dev/sdb1",
"/dev/sr0": "other, unknown"}
"""
cmd = ['sudo', which('ceph-disk'), 'list']
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True
)
stdout = process.stdout.read().splitlines()
stderr = process.stderr.read().splitlines()
returncode = process.wait()
if not stdout:
raise RuntimeError("'ceph-disk list' failed with: %s" % ' '.join(stderr))
device_map = {}
for line in stdout:
dev, comment = line.strip().split(' ', 1)
if line.endswith(':'):
continue
device_map[dev] = comment
return device_map
# XXX This test needs to be revisited. The loops obfuscate the values. They
# could very well be parametrized
class TestOSD(object):
@uses_collocated_journals
def test_osds_are_all_collocated(self, node_config):
system_devices = get_system_devices()
devices = node_config.get('devices', [])
for device in devices:
osd_devices = dict((d, comment) for d, comment in system_devices.items() if d.startswith(device))
journal = dict((d, comment) for d, comment in osd_devices.items() if 'ceph journal' in comment)
osd = dict((d, comment) for d, comment in osd_devices.items() if 'ceph data' in comment)
assert journal != {}, 'no journal found for device: %s' % device
assert osd != {}, 'no osd found for device: %s' % device

View File

@ -0,0 +1,7 @@
class TestOSD(object):
def test_osds_are_all_collocated(self, node, Command):
# TODO: figure out way to paramaterize node['vars']['devices'] for this test
for device in node["vars"]["devices"]:
assert Command.check_output("sudo blkid -s PARTLABEL -o value %s2" % device) == "ceph journal"

View File

@ -0,0 +1,46 @@
import pytest
class TestOSDs(object):
@pytest.mark.no_docker
def test_ceph_osd_package_is_installed(self, node, Package):
assert Package("ceph-osd").is_installed
def test_osds_listen_on_public_network(self, node, Socket):
# TODO: figure out way to paramaterize this test
for x in range(0, node["num_devices"] * 2):
port = "680{}".format(x)
assert Socket("tcp://{address}:{port}".format(
address=node["address"],
port=port,
)).is_listening
def test_osds_listen_on_cluster_network(self, node, Socket):
# TODO: figure out way to paramaterize this test
for x in range(0, node["num_devices"] * 2):
port = "680{}".format(x)
assert Socket("tcp://{address}:{port}".format(
address=node["cluster_address"],
port=port,
)).is_listening
def test_osd_services_are_running(self, node, Service):
# TODO: figure out way to paramaterize node['osds'] for this test
for osd in node["osds"]:
assert Service("ceph-osd@%s" % osd).is_running
def test_osd_services_are_enabled(self, node, Service):
# TODO: figure out way to paramaterize node['osds'] for this test
for osd in node["osds"]:
assert Service("ceph-osd@%s" % osd).is_enabled
@pytest.mark.no_docker
def test_osd_are_mounted(self, node, MountPoint):
# TODO: figure out way to paramaterize node['osd_ids'] for this test
for osd_id in node["osd_ids"]:
osd_path = "/var/lib/ceph/osd/{cluster}-{osd_id}".format(
cluster=node["cluster_name"],
osd_id=osd_id,
)
assert MountPoint(osd_path).exists

View File

@ -1,10 +1,39 @@
import os
class TestInstall(object):
def test_ceph_dir_exists(self):
assert os.path.isdir('/etc/ceph')
def test_ceph_dir_exists(self, File):
assert File('/etc/ceph').exists
def test_ceph_conf_exists(self):
assert os.path.isfile('/etc/ceph/ceph.conf')
def test_ceph_dir_is_a_directory(self, File):
assert File('/etc/ceph').is_directory
def test_ceph_conf_exists(self, File, node):
assert File(node["conf_path"]).exists
def test_ceph_conf_is_a_file(self, File, node):
assert File(node["conf_path"]).is_file
def test_ceph_command_exists(self, Command):
assert Command.exists("ceph")
class TestCephConf(object):
def test_ceph_config_has_inital_members_line(self, node, File):
assert File(node["conf_path"]).contains("^mon initial members = .*$")
def test_initial_members_line_has_correct_value(self, node, File):
mons = ",".join("ceph-%s" % host
for host in node["vars"]["groups"]["mons"])
line = "mon initial members = {}".format(mons)
assert File(node["conf_path"]).contains(line)
def test_ceph_config_has_mon_host_line(self, node, File):
assert File(node["conf_path"]).contains("^mon host = .*$")
def test_mon_host_line_has_correct_value(self, node, File):
mon_ips = []
for x in range(0, node["num_mons"]):
mon_ips.append("{}.1{}".format(node["subnet"], x))
line = "mon host = {}".format(",".join(mon_ips))
assert File(node["conf_path"]).contains(line)

View File

@ -1,36 +0,0 @@
# Basic information about ceph and its configuration
ceph = {
'releases': ['infernalis', 'jewel'],
'cluster_name': 'ceph',
'subnet': '192.168.5',
}
# remote nodes to test, with anything specific to them that might be useful for
# tests to get. Each one of these can get requested as a py.test fixture to
# validate information.
nodes = {
'mon0': {
'username': 'vagrant',
'components': ['conf_tests']
},
'mon1': {
'username': 'vagrant',
'components': ['conf_tests']
},
'mon2': {
'username': 'vagrant',
'components': ['conf_tests']
},
'osd0': {
'username': 'vagrant',
'components': []
},
'mds0': {
'username': 'vagrant',
'components': []
},
'rgw0': {
'username': 'vagrant',
'components': []
},
}

View File

@ -1,3 +1,3 @@
# These are Python requirements needed to run the functional tests
pytest
testinfra
pytest-xdist

View File

@ -34,6 +34,7 @@ commands=
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars="fetch_directory={changedir}/fetch"
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/setup.yml
py.test -v
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
vagrant destroy --force