tests: introduce `ceph_status` fixture

This avoids some duplicated code in various test_*_is_up() tests

Signed-off-by: Guillaume Abrioux <gabrioux@ibm.com>
pull/7431/head
Guillaume Abrioux 2023-01-20 11:07:31 +01:00 committed by Teoman ONAY
parent 027a3d7e3d
commit 0e086c4c61
6 changed files with 48 additions and 70 deletions

View File

@ -2,6 +2,28 @@ import pytest
import os
@pytest.fixture
def ceph_status(host, setup):
def _run(keyring,
name=None,
cluster='ceph',
container_binary='podman'):
containerized_deployment = setup["containerized_deployment"]
container_image = setup["container_image"]
client_name = ''
if name is not None:
client_name = f'-n {name}'
ceph_args = f"--connect-timeout 5 {client_name} -k {keyring} --cluster {cluster} -f json -s"
if containerized_deployment:
cmd = f"sudo {container_binary} run --rm -v /etc/ceph:/etc/ceph -v {keyring}:{keyring}:z --entrypoint=ceph {container_image} {ceph_args}"
else:
cmd = f"ceph {ceph_args}"
output = host.check_output(cmd)
return output
return _run
def str_to_bool(val):
try:
val = val.lower()
@ -24,6 +46,11 @@ def setup(host):
ansible_vars = host.ansible.get_variables()
ansible_facts = host.ansible("setup")
containerized_deployment = ansible_vars.get("containerized_deployment", False)
ceph_docker_registry = ansible_vars.get("ceph_docker_registry")
ceph_docker_image = ansible_vars.get("ceph_docker_image")
ceph_docker_image_tag = ansible_vars.get("ceph_docker_image_tag")
container_image = f"{ceph_docker_registry}/{ceph_docker_image}:{ceph_docker_image_tag}"
docker = ansible_vars.get("docker")
container_binary = ansible_vars.get("container_binary", "")
osd_auto_discovery = ansible_vars.get("osd_auto_discovery")
@ -68,6 +95,8 @@ def setup(host):
data = dict(
cluster_name=cluster_name,
containerized_deployment=containerized_deployment,
container_image=container_image,
subnet=subnet,
osd_ids=osd_ids,
num_mons=num_mons,

View File

@ -16,19 +16,10 @@ class TestMDSs(object):
assert s.is_enabled
assert s.is_running
def test_mds_is_up(self, node, host, setup):
hostname = node["vars"]["inventory_hostname"]
container_binary = setup['container_binary']
if node["docker"]:
container_exec_cmd = '{container_binary} exec ceph-mds-{hostname}'.format( # noqa E501
hostname=hostname, container_binary=container_binary)
else:
container_exec_cmd = ''
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
container_exec_cmd=container_exec_cmd,
cluster=setup['cluster_name']
)
cluster_status = json.loads(host.check_output(cmd))
def test_mds_is_up(self, node, setup, ceph_status):
cluster = setup["cluster_name"]
name = 'client.bootstrap-mds'
output = ceph_status(f'/var/lib/ceph/bootstrap-mds/{cluster}.keyring', name=name)
cluster_status = json.loads(output)
assert (cluster_status['fsmap'].get('up', 0) + cluster_status['fsmap'].get( # noqa E501
'up:standby', 0)) == len(node["vars"]["groups"]["mdss"])

View File

@ -29,21 +29,11 @@ class TestMGRs(object):
s = host.socket('tcp://%s:%s' % (setup["address"], port))
assert s.is_listening
def test_mgr_is_up(self, node, host, setup):
def test_mgr_is_up(self, node, setup, ceph_status):
hostname = node["vars"]["inventory_hostname"]
cluster = setup["cluster_name"]
container_binary = setup["container_binary"]
if node['docker']:
container_exec_cmd = '{container_binary} exec ceph-mgr-{hostname}'.format( # noqa E501
hostname=hostname, container_binary=container_binary)
else:
container_exec_cmd = ''
cmd = "sudo {container_exec_cmd} ceph --name mgr.{hostname} --keyring /var/lib/ceph/mgr/{cluster}-{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
container_exec_cmd=container_exec_cmd,
hostname=node["vars"]["inventory_hostname"],
cluster=cluster
)
output_raw = host.check_output(cmd)
name = f"mgr.{hostname}"
output_raw = ceph_status(f'/var/lib/ceph/mgr/{cluster}-{hostname}/keyring', name=name)
output_json = json.loads(output_raw)
assert output_json['mgrmap']['available']

View File

@ -23,21 +23,11 @@ class TestNFSs(object):
assert host.file(
"/etc/ganesha/ganesha.conf").contains("Entries_HWMark")
def test_nfs_is_up(self, node, host, setup):
def test_nfs_is_up(self, node, setup, ceph_status):
hostname = node["vars"]["inventory_hostname"]
cluster = setup['cluster_name']
container_binary = setup["container_binary"]
if node['docker']:
container_exec_cmd = '{container_binary} exec ceph-nfs-{hostname}'.format( # noqa E501
hostname=hostname, container_binary=container_binary)
else:
container_exec_cmd = ''
cmd = "sudo {container_exec_cmd} ceph --name client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
container_exec_cmd=container_exec_cmd,
hostname=hostname,
cluster=cluster
)
output = host.check_output(cmd)
cluster = setup["cluster_name"]
name = f"client.rgw.{hostname}"
output = ceph_status(f'/var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring', name=name)
keys = list(json.loads(
output)["servicemap"]["services"]["rgw-nfs"]["daemons"].keys())
keys.remove('summary')

View File

@ -19,27 +19,14 @@ class TestRbdMirrors(object):
assert s.is_running
@pytest.mark.rbdmirror_secondary
def test_rbd_mirror_is_up(self, node, host, setup):
def test_rbd_mirror_is_up(self, node, setup, ceph_status):
hostname = node["vars"]["inventory_hostname"]
cluster = setup["cluster_name"]
container_binary = setup["container_binary"]
daemons = []
if node['docker']:
container_exec_cmd = '{container_binary} exec ceph-rbd-mirror-{hostname}'.format( # noqa E501
hostname=hostname, container_binary=container_binary)
else:
container_exec_cmd = ''
hostname = node["vars"]["inventory_hostname"]
cluster = setup['cluster_name']
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rbd-mirror --keyring /var/lib/ceph/bootstrap-rbd-mirror/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
container_exec_cmd=container_exec_cmd,
hostname=hostname,
cluster=cluster
)
output = host.check_output(cmd)
output = ceph_status(f'/var/lib/ceph/bootstrap-rbd-mirror/{cluster}.keyring')
status = json.loads(output)
daemon_ids = [i for i in status["servicemap"]["services"]
["rbd-mirror"]["daemons"].keys() if i != "summary"]
daemons = []
for daemon_id in daemon_ids:
daemons.append(status["servicemap"]["services"]["rbd-mirror"]
["daemons"][daemon_id]["metadata"]["hostname"])

View File

@ -21,21 +21,11 @@ class TestRGWs(object):
assert s.is_enabled
assert s.is_running
def test_rgw_is_up(self, node, host, setup):
def test_rgw_is_up(self, node, setup, ceph_status):
hostname = node["vars"]["inventory_hostname"]
cluster = setup["cluster_name"]
container_binary = setup["container_binary"]
if node['docker']:
container_exec_cmd = '{container_binary} exec ceph-rgw-{hostname}-rgw0'.format( # noqa E501
hostname=hostname, container_binary=container_binary)
else:
container_exec_cmd = ''
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
container_exec_cmd=container_exec_cmd,
hostname=hostname,
cluster=cluster
)
output = host.check_output(cmd)
name = "client.bootstrap-rgw"
output = ceph_status(f'/var/lib/ceph/bootstrap-rgw/{cluster}.keyring', name=name)
keys = list(json.loads(
output)["servicemap"]["services"]["rgw"]["daemons"].keys())
keys.remove('summary')
@ -43,6 +33,7 @@ class TestRGWs(object):
hostnames = []
for key in keys:
hostnames.append(daemons[key]['metadata']['hostname'])
assert hostname in hostnames
@pytest.mark.no_docker
def test_rgw_http_endpoint(self, node, host, setup):