ceph-ansible/tests/conftest.py

227 lines
8.1 KiB
Python
Raw Permalink Normal View History

import pytest
import os
@pytest.fixture
def ceph_status(host, setup):
def _run(keyring,
name=None,
cluster='ceph',
container_binary='podman'):
containerized_deployment = setup["containerized_deployment"]
container_image = setup["container_image"]
client_name = ''
if name is not None:
client_name = f'-n {name}'
ceph_args = f"--connect-timeout 5 {client_name} -k {keyring} --cluster {cluster} -f json -s"
if containerized_deployment:
cmd = f"sudo {container_binary} run --rm -v /etc/ceph:/etc/ceph -v {keyring}:{keyring}:z --entrypoint=ceph {container_image} {ceph_args}"
else:
cmd = f"ceph {ceph_args}"
output = host.check_output(cmd)
return output
return _run
def str_to_bool(val):
try:
val = val.lower()
except AttributeError:
val = str(val).lower()
if val == 'true':
return True
elif val == 'false':
return False
else:
raise ValueError("Invalid input value: %s" % val)
@pytest.fixture(scope="module")
def setup(host):
cluster_address = ""
osd_ids = []
osds = []
ansible_vars = host.ansible.get_variables()
ansible_facts = host.ansible("setup")
containerized_deployment = ansible_vars.get("containerized_deployment", False)
ceph_docker_registry = ansible_vars.get("ceph_docker_registry")
ceph_docker_image = ansible_vars.get("ceph_docker_image")
ceph_docker_image_tag = ansible_vars.get("ceph_docker_image_tag")
container_image = f"{ceph_docker_registry}/{ceph_docker_image}:{ceph_docker_image_tag}"
docker = ansible_vars.get("docker")
container_binary = ansible_vars.get("container_binary", "")
osd_auto_discovery = ansible_vars.get("osd_auto_discovery")
group_names = ansible_vars["group_names"]
ansible_distribution = ansible_facts["ansible_facts"]["ansible_distribution"]
if ansible_distribution == "CentOS":
public_interface = "eth1"
cluster_interface = "eth2"
else:
public_interface = "ens6"
cluster_interface = "ens7"
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
num_mons = len(ansible_vars["groups"].get('mons', []))
if osd_auto_discovery:
num_osds = 3
else:
num_osds = len(ansible_vars.get("devices", []))
if not num_osds:
num_osds = len(ansible_vars.get("lvm_volumes", []))
osds_per_device = ansible_vars.get("osds_per_device", 1)
num_osds = num_osds * osds_per_device
# If number of devices doesn't map to number of OSDs, allow tests to define
# that custom number, defaulting it to ``num_devices``
num_osds = ansible_vars.get('num_osds', num_osds)
cluster_name = ansible_vars.get("cluster", "ceph")
conf_path = "/etc/ceph/{}.conf".format(cluster_name)
if "osds" in group_names:
cluster_address = host.interface(cluster_interface).addresses[0]
cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
if cmd.rc == 0:
osd_ids = cmd.stdout.rstrip("\n").split("\n")
osds = osd_ids
address = host.interface(public_interface).addresses[0]
if docker and not container_binary:
container_binary = "podman"
data = dict(
cluster_name=cluster_name,
containerized_deployment=containerized_deployment,
container_image=container_image,
subnet=subnet,
osd_ids=osd_ids,
num_mons=num_mons,
num_osds=num_osds,
address=address,
osds=osds,
conf_path=conf_path,
public_interface=public_interface,
cluster_interface=cluster_interface,
cluster_address=cluster_address,
container_binary=container_binary)
return data
@pytest.fixture()
def node(host, request):
"""
This fixture represents a single node in the ceph cluster. Using the
host.ansible fixture provided by testinfra it can access all the ansible
variables provided to it by the specific test scenario being ran.
You must include this fixture on any tests that operate on specific type
of node because it contains the logic to manage which tests a node
should run.
"""
ansible_vars = host.ansible.get_variables()
# tox will pass in this environment variable. we need to do it this way
# because testinfra does not collect and provide ansible config passed in
# from using --extra-vars
ceph_stable_release = os.environ.get("CEPH_STABLE_RELEASE", "quincy")
rolling_update = os.environ.get("ROLLING_UPDATE", "False")
group_names = ansible_vars["group_names"]
docker = ansible_vars.get("docker")
dashboard = ansible_vars.get("dashboard_enabled", True)
radosgw_num_instances = ansible_vars.get("radosgw_num_instances", 1)
ceph_rbd_mirror_remote_user = ansible_vars.get('ceph_rbd_mirror_remote_user', '')
ceph_release_num = {
'jewel': 10,
'kraken': 11,
'luminous': 12,
'mimic': 13,
'nautilus': 14,
'octopus': 15,
'pacific': 16,
'quincy': 17,
'reef': 18,
'squid': 19,
'dev': 99
}
sanitized_group_names = group_names
if 'all' in sanitized_group_names:
sanitized_group_names.remove('all')
# capture the initial/default state
test_is_applicable = False
for marker in request.node.iter_markers():
if marker.name in group_names or marker.name == 'all':
test_is_applicable = True
break
# Check if any markers on the test method exist in the nodes group_names.
# If they do not, this test is not valid for the node being tested.
if not test_is_applicable:
reason = "%s: Not a valid test for node type: %s" % (
request.function, group_names)
pytest.skip(reason)
if request.node.get_closest_marker('rbdmirror_secondary') and not ceph_rbd_mirror_remote_user: # noqa E501
pytest.skip('Not a valid test for a non-secondary rbd-mirror node')
if request.node.get_closest_marker('ceph_crash') and sanitized_group_names in [['nfss'], ['clients'], ['monitoring']]:
pytest.skip('Not a valid test for nfs or client nodes')
if request.node.get_closest_marker('ceph_exporter') and sanitized_group_names in [['nfss'], ['clients'], ['monitoring']]:
pytest.skip('Not a valid test for nfs or client nodes')
if request.node.get_closest_marker("no_docker") and docker:
pytest.skip(
"Not a valid test for containerized deployments or atomic hosts")
if request.node.get_closest_marker("docker") and not docker:
pytest.skip(
"Not a valid test for non-containerized deployments or atomic hosts") # noqa E501
if request.node.get_closest_marker("dashboard") and not dashboard:
pytest.skip(
"Not a valid test with dashboard disabled")
if request.node.get_closest_marker("dashboard") and sanitized_group_names == ['clients']:
pytest.skip('Not a valid test for client node')
data = dict(
vars=ansible_vars,
docker=docker,
ceph_stable_release=ceph_stable_release,
tests: add mimic support for test_rbd_mirror_is_up() prior mimic, the data structure returned by `ceph -s -f json` used to gather information about rbd-mirror daemons looked like below: ``` "servicemap": { "epoch": 8, "modified": "2018-07-05 13:21:06.207483", "services": { "rbd-mirror": { "daemons": { "summary": "", "ceph-nano-luminous-faa32aebf00b": { "start_epoch": 8, "start_stamp": "2018-07-05 13:21:04.668450", "gid": 14107, "addr": "172.17.0.2:0/2229952892", "metadata": { "arch": "x86_64", "ceph_version": "ceph version 12.2.5 (cad919881333ac92274171586c827e01f554a70a) luminous (stable)", "cpu": "Intel(R) Core(TM) i7-4870HQ CPU @ 2.50GHz", "distro": "centos", "distro_description": "CentOS Linux 7 (Core)", "distro_version": "7", "hostname": "ceph-nano-luminous-faa32aebf00b", "instance_id": "14107", "kernel_description": "#1 SMP Wed Mar 14 15:12:16 UTC 2018", "kernel_version": "4.9.87-linuxkit-aufs", "mem_swap_kb": "1048572", "mem_total_kb": "2046652", "os": "Linux" } } } } } } ``` This part has changed from mimic and became: ``` "servicemap": { "epoch": 2, "modified": "2018-07-04 09:54:36.164786", "services": { "rbd-mirror": { "daemons": { "summary": "", "14151": { "start_epoch": 2, "start_stamp": "2018-07-04 09:54:35.541272", "gid": 14151, "addr": "192.168.1.80:0/240942528", "metadata": { "arch": "x86_64", "ceph_release": "mimic", "ceph_version": "ceph version 13.2.0 (79a10589f1f80dfe21e8f9794365ed98143071c4) mimic (stable)", "ceph_version_short": "13.2.0", "cpu": "Intel(R) Xeon(R) CPU X5650 @ 2.67GHz", "distro": "centos", "distro_description": "CentOS Linux 7 (Core)", "distro_version": "7", "hostname": "ceph-rbd-mirror0", "id": "ceph-rbd-mirror0", "instance_id": "14151", "kernel_description": "#1 SMP Wed May 9 18:05:47 UTC 2018", "kernel_version": "3.10.0-862.2.3.el7.x86_64", "mem_swap_kb": "1572860", "mem_total_kb": "1015548", "os": "Linux" } } } } } } ``` This patch modifies the function `test_rbd_mirror_is_up()` in `test_rbd_mirror.py` so it works with `mimic` and keeps backward compatibility with `luminous` Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
2018-07-05 21:16:19 +08:00
ceph_release_num=ceph_release_num,
rolling_update=rolling_update,
radosgw_num_instances=radosgw_num_instances,
)
return data
def pytest_collection_modifyitems(session, config, items):
for item in items:
test_path = item.location[0]
if "mon" in test_path:
item.add_marker(pytest.mark.mons)
elif "osd" in test_path:
item.add_marker(pytest.mark.osds)
elif "mds" in test_path:
item.add_marker(pytest.mark.mdss)
elif "mgr" in test_path:
item.add_marker(pytest.mark.mgrs)
elif "rbd-mirror" in test_path:
item.add_marker(pytest.mark.rbdmirrors)
elif "rgw" in test_path:
item.add_marker(pytest.mark.rgws)
elif "nfs" in test_path:
item.add_marker(pytest.mark.nfss)
elif "grafana" in test_path:
item.add_marker(pytest.mark.grafanas)
else:
item.add_marker(pytest.mark.all)
if "journal_collocation" in test_path:
item.add_marker(pytest.mark.journal_collocation)