mirror of https://github.com/ceph/ceph-ansible.git
tests: share fixture instance across the whole module.
there's no need to rerun this part of the code for each function test. Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>pull/3620/head
parent
c98fd0b9e0
commit
f80e43a0d8
|
@ -2,6 +2,80 @@ import pytest
|
|||
import os
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def setup(host):
|
||||
cluster_address = ""
|
||||
container_binary = ""
|
||||
osd_ids = []
|
||||
osds = []
|
||||
|
||||
ansible_vars = host.ansible.get_variables()
|
||||
ansible_facts = host.ansible("setup")
|
||||
|
||||
docker = ansible_vars.get("docker")
|
||||
osd_auto_discovery = ansible_vars.get("osd_auto_discovery")
|
||||
group_names = ansible_vars["group_names"]
|
||||
fsid = ansible_vars.get("fsid")
|
||||
|
||||
ansible_distribution = ansible_facts["ansible_facts"]["ansible_distribution"]
|
||||
|
||||
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
|
||||
num_mons = len(ansible_vars["groups"]["mons"])
|
||||
if osd_auto_discovery:
|
||||
num_osds = 3
|
||||
else:
|
||||
num_osds = len(ansible_vars.get("devices", []))
|
||||
if not num_osds:
|
||||
num_osds = len(ansible_vars.get("lvm_volumes", []))
|
||||
osds_per_device = ansible_vars.get("osds_per_device", 1)
|
||||
num_osds = num_osds * osds_per_device
|
||||
|
||||
if ansible_distribution == "RedHat":
|
||||
public_interface = "ens6"
|
||||
cluster_interface = "ens7"
|
||||
else:
|
||||
public_interface = "eth1"
|
||||
cluster_interface = "eth2"
|
||||
|
||||
# If number of devices doesn't map to number of OSDs, allow tests to define
|
||||
# that custom number, defaulting it to ``num_devices``
|
||||
num_osds = ansible_vars.get('num_osds', num_osds)
|
||||
cluster_name = ansible_vars.get("cluster", "ceph")
|
||||
conf_path = "/etc/ceph/{}.conf".format(cluster_name)
|
||||
if "osds" in group_names:
|
||||
cluster_address = host.interface(cluster_interface).addresses[0]
|
||||
cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
|
||||
if cmd.rc == 0:
|
||||
osd_ids = cmd.stdout.rstrip("\n").split("\n")
|
||||
osds = osd_ids
|
||||
if docker and fsid == "6e008d48-1661-11e8-8546-008c3214218a":
|
||||
osds = []
|
||||
for device in ansible_vars.get("devices", []):
|
||||
real_dev = host.run("sudo readlink -f %s" % device)
|
||||
real_dev_split = real_dev.stdout.split("/")[-1]
|
||||
osds.append(real_dev_split)
|
||||
|
||||
address = host.interface(public_interface).addresses[0]
|
||||
|
||||
if docker:
|
||||
container_binary = "docker"
|
||||
if docker and host.exists("podman") and ansible_distribution in ["Fedora", "RedHat"]: # noqa E501
|
||||
container_binary = "podman"
|
||||
|
||||
data = dict(
|
||||
cluster_name=cluster_name,
|
||||
subnet=subnet,
|
||||
osd_ids=osd_ids,
|
||||
num_mons=num_mons,
|
||||
num_osds=num_osds,
|
||||
address=address,
|
||||
osds=osds,
|
||||
conf_path=conf_path,
|
||||
cluster_address=cluster_address,
|
||||
container_binary=container_binary)
|
||||
|
||||
return data
|
||||
|
||||
@pytest.fixture()
|
||||
def node(host, request):
|
||||
"""
|
||||
|
@ -21,8 +95,6 @@ def node(host, request):
|
|||
rolling_update = os.environ.get("ROLLING_UPDATE", "False")
|
||||
group_names = ansible_vars["group_names"]
|
||||
docker = ansible_vars.get("docker")
|
||||
fsid = ansible_vars.get("fsid")
|
||||
osd_auto_discovery = ansible_vars.get("osd_auto_discovery")
|
||||
osd_scenario = ansible_vars.get("osd_scenario")
|
||||
radosgw_num_instances = ansible_vars.get("radosgw_num_instances", 1)
|
||||
lvm_scenario = osd_scenario in ['lvm', 'lvm-batch']
|
||||
|
@ -33,7 +105,6 @@ def node(host, request):
|
|||
'mimic': 13,
|
||||
'dev': 99
|
||||
}
|
||||
ansible_distribution = host.ansible("setup")["ansible_facts"]['ansible_distribution']
|
||||
|
||||
# capture the initial/default state
|
||||
test_is_applicable = False
|
||||
|
@ -66,69 +137,15 @@ def node(host, request):
|
|||
if request.node.get_closest_marker("journal_collocation") and not journal_collocation_test: # noqa E501
|
||||
pytest.skip("Scenario is not using journal collocation")
|
||||
|
||||
osd_ids = []
|
||||
osds = []
|
||||
cluster_address = ""
|
||||
container_binary = ""
|
||||
|
||||
if ansible_distribution == 'RedHat':
|
||||
public_interface = 'ens6'
|
||||
cluster_interface = 'ens7'
|
||||
else:
|
||||
public_interface = 'eth1'
|
||||
cluster_interface = 'eth2'
|
||||
address = host.interface(public_interface).addresses[0]
|
||||
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
|
||||
num_mons = len(ansible_vars["groups"]["mons"])
|
||||
if osd_auto_discovery:
|
||||
num_osds = 3
|
||||
else:
|
||||
num_osds = len(ansible_vars.get("devices", []))
|
||||
if not num_osds:
|
||||
num_osds = len(ansible_vars.get("lvm_volumes", []))
|
||||
osds_per_device = ansible_vars.get("osds_per_device", 1)
|
||||
num_osds = num_osds * osds_per_device
|
||||
|
||||
# If number of devices doesn't map to number of OSDs, allow tests to define
|
||||
# that custom number, defaulting it to ``num_devices``
|
||||
num_osds = ansible_vars.get('num_osds', num_osds)
|
||||
cluster_name = ansible_vars.get("cluster", "ceph")
|
||||
conf_path = "/etc/ceph/{}.conf".format(cluster_name)
|
||||
if "osds" in group_names:
|
||||
cluster_address = host.interface(cluster_interface).addresses[0]
|
||||
cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
|
||||
if cmd.rc == 0:
|
||||
osd_ids = cmd.stdout.rstrip("\n").split("\n")
|
||||
osds = osd_ids
|
||||
if docker and fsid == "6e008d48-1661-11e8-8546-008c3214218a":
|
||||
osds = []
|
||||
for device in ansible_vars.get("devices", []):
|
||||
real_dev = host.run("sudo readlink -f %s" % device)
|
||||
real_dev_split = real_dev.stdout.split("/")[-1]
|
||||
osds.append(real_dev_split)
|
||||
|
||||
if docker:
|
||||
container_binary = 'docker'
|
||||
if docker and host.exists('podman') and ansible_distribution in ['Fedora', 'RedHat']: # noqa E501
|
||||
container_binary = 'podman'
|
||||
|
||||
data = dict(
|
||||
address=address,
|
||||
subnet=subnet,
|
||||
vars=ansible_vars,
|
||||
osd_ids=osd_ids,
|
||||
num_mons=num_mons,
|
||||
num_osds=num_osds,
|
||||
cluster_name=cluster_name,
|
||||
conf_path=conf_path,
|
||||
cluster_address=cluster_address,
|
||||
docker=docker,
|
||||
osds=osds,
|
||||
ceph_stable_release=ceph_stable_release,
|
||||
ceph_release_num=ceph_release_num,
|
||||
rolling_update=rolling_update,
|
||||
radosgw_num_instances=radosgw_num_instances,
|
||||
container_binary=container_binary,
|
||||
)
|
||||
return data
|
||||
|
||||
|
|
|
@ -20,9 +20,9 @@ class TestMDSs(object):
|
|||
)
|
||||
assert host.service(service_name).is_enabled
|
||||
|
||||
def test_mds_is_up(self, node, host):
|
||||
def test_mds_is_up(self, node, host, setup):
|
||||
hostname = node["vars"]["inventory_hostname"]
|
||||
container_binary = node['container_binary']
|
||||
container_binary = setup['container_binary']
|
||||
if node["docker"]:
|
||||
container_exec_cmd = '{container_binary} exec ceph-mds-{hostname}'.format( # noqa E501
|
||||
hostname=hostname, container_binary=container_binary)
|
||||
|
@ -31,7 +31,7 @@ class TestMDSs(object):
|
|||
|
||||
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
|
||||
container_exec_cmd=container_exec_cmd,
|
||||
cluster=node['cluster_name']
|
||||
cluster=setup['cluster_name']
|
||||
)
|
||||
cluster_status = json.loads(host.check_output(cmd))
|
||||
assert (cluster_status['fsmap'].get('up', 0) + cluster_status['fsmap'].get( # noqa E501
|
||||
|
|
|
@ -20,10 +20,10 @@ class TestMGRs(object):
|
|||
)
|
||||
assert host.service(service_name).is_enabled
|
||||
|
||||
def test_mgr_is_up(self, node, host):
|
||||
def test_mgr_is_up(self, node, host, setup):
|
||||
hostname = node["vars"]["inventory_hostname"]
|
||||
cluster = node["cluster_name"]
|
||||
container_binary = node["container_binary"]
|
||||
cluster = setup["cluster_name"]
|
||||
container_binary = setup["container_binary"]
|
||||
if node['docker']:
|
||||
container_exec_cmd = '{container_binary} exec ceph-mgr-{hostname}'.format( # noqa E501
|
||||
hostname=hostname, container_binary=container_binary)
|
||||
|
|
|
@ -8,8 +8,8 @@ class TestMons(object):
|
|||
def test_ceph_mon_package_is_installed(self, node, host):
|
||||
assert host.package("ceph-mon").is_installed
|
||||
|
||||
def test_mon_listens_on_6789(self, node, host):
|
||||
assert host.socket("tcp://%s:6789" % node["address"]).is_listening
|
||||
def test_mon_listens_on_6789(self, node, host, setup):
|
||||
assert host.socket("tcp://%s:6789" % setup["address"]).is_listening
|
||||
|
||||
def test_mon_service_is_running(self, node, host):
|
||||
service_name = "ceph-mon@{hostname}".format(
|
||||
|
@ -24,16 +24,16 @@ class TestMons(object):
|
|||
assert host.service(service_name).is_enabled
|
||||
|
||||
@pytest.mark.no_docker
|
||||
def test_can_get_cluster_health(self, node, host):
|
||||
cmd = "sudo ceph --cluster={} --connect-timeout 5 -s".format(node["cluster_name"]) # noqa E501
|
||||
def test_can_get_cluster_health(self, node, host, setup):
|
||||
cmd = "sudo ceph --cluster={} --connect-timeout 5 -s".format(setup["cluster_name"]) # noqa E501
|
||||
output = host.check_output(cmd)
|
||||
assert output.strip().startswith("cluster")
|
||||
|
||||
def test_ceph_config_has_inital_members_line(self, node, File):
|
||||
assert File(node["conf_path"]).contains("^mon initial members = .*$")
|
||||
def test_ceph_config_has_inital_members_line(self, node, File, setup):
|
||||
assert File(setup["conf_path"]).contains("^mon initial members = .*$")
|
||||
|
||||
def test_initial_members_line_has_correct_value(self, node, host, File):
|
||||
mon_initial_members_line = host.check_output("grep 'mon initial members = ' /etc/ceph/{cluster}.conf".format(cluster=node['cluster_name'])) # noqa E501
|
||||
def test_initial_members_line_has_correct_value(self, node, host, File, setup):
|
||||
mon_initial_members_line = host.check_output("grep 'mon initial members = ' /etc/ceph/{cluster}.conf".format(cluster=setup['cluster_name'])) # noqa E501
|
||||
result = True
|
||||
for host in node["vars"]["groups"]["mons"]:
|
||||
pattern = re.compile(host)
|
||||
|
|
|
@ -25,10 +25,10 @@ class TestNFSs(object):
|
|||
assert host.file(
|
||||
"/etc/ganesha/ganesha.conf").contains("Entries_HWMark")
|
||||
|
||||
def test_nfs_is_up(self, node, host):
|
||||
def test_nfs_is_up(self, node, host, setup):
|
||||
hostname = node["vars"]["inventory_hostname"]
|
||||
cluster = node['cluster_name']
|
||||
container_binary = node["container_binary"]
|
||||
cluster = setup['cluster_name']
|
||||
container_binary = setup["container_binary"]
|
||||
if node['docker']:
|
||||
container_exec_cmd = '{container_binary} exec ceph-nfs-{hostname}'.format( # noqa E501
|
||||
hostname=hostname, container_binary=container_binary)
|
||||
|
|
|
@ -9,35 +9,35 @@ class TestOSDs(object):
|
|||
def test_ceph_osd_package_is_installed(self, node, host):
|
||||
assert host.package("ceph-osd").is_installed
|
||||
|
||||
def test_osds_listen_on_public_network(self, node, host):
|
||||
def test_osds_listen_on_public_network(self, node, host, setup):
|
||||
# TODO: figure out way to paramaterize this test
|
||||
nb_port = (node["num_osds"] * 4)
|
||||
nb_port = (setup["num_osds"] * 4)
|
||||
assert host.check_output(
|
||||
"netstat -lntp | grep ceph-osd | grep %s | wc -l" % (node["address"])) == str(nb_port) # noqa E501
|
||||
"netstat -lntp | grep ceph-osd | grep %s | wc -l" % (setup["address"])) == str(nb_port) # noqa E501
|
||||
|
||||
def test_osds_listen_on_cluster_network(self, node, host):
|
||||
def test_osds_listen_on_cluster_network(self, node, host, setup):
|
||||
# TODO: figure out way to paramaterize this test
|
||||
nb_port = (node["num_osds"] * 4)
|
||||
nb_port = (setup["num_osds"] * 4)
|
||||
assert host.check_output("netstat -lntp | grep ceph-osd | grep %s | wc -l" % # noqa E501
|
||||
(node["cluster_address"])) == str(nb_port)
|
||||
(setup["cluster_address"])) == str(nb_port)
|
||||
|
||||
def test_osd_services_are_running(self, node, host):
|
||||
def test_osd_services_are_running(self, node, host, setup):
|
||||
# TODO: figure out way to paramaterize node['osds'] for this test
|
||||
for osd in node["osds"]:
|
||||
for osd in setup["osds"]:
|
||||
assert host.service("ceph-osd@%s" % osd).is_running
|
||||
|
||||
@pytest.mark.no_lvm_scenario
|
||||
def test_osd_services_are_enabled(self, node, host):
|
||||
def test_osd_services_are_enabled(self, node, host, setup):
|
||||
# TODO: figure out way to paramaterize node['osds'] for this test
|
||||
for osd in node["osds"]:
|
||||
for osd in setup["osds"]:
|
||||
assert host.service("ceph-osd@%s" % osd).is_enabled
|
||||
|
||||
@pytest.mark.no_docker
|
||||
def test_osd_are_mounted(self, node, host):
|
||||
# TODO: figure out way to paramaterize node['osd_ids'] for this test
|
||||
for osd_id in node["osd_ids"]:
|
||||
def test_osd_are_mounted(self, node, host, setup):
|
||||
# TODO: figure out way to paramaterize setup['osd_ids'] for this test
|
||||
for osd_id in setup["osd_ids"]:
|
||||
osd_path = "/var/lib/ceph/osd/{cluster}-{osd_id}".format(
|
||||
cluster=node["cluster_name"],
|
||||
cluster=setup["cluster_name"],
|
||||
osd_id=osd_id,
|
||||
)
|
||||
assert host.mount_point(osd_path).exists
|
||||
|
@ -66,21 +66,21 @@ class TestOSDs(object):
|
|||
return nb_up
|
||||
|
||||
@pytest.mark.no_docker
|
||||
def test_all_osds_are_up_and_in(self, node, host):
|
||||
def test_all_osds_are_up_and_in(self, node, host, setup):
|
||||
cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( # noqa E501
|
||||
cluster=node["cluster_name"])
|
||||
cluster=setup["cluster_name"])
|
||||
output = json.loads(host.check_output(cmd))
|
||||
assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output)
|
||||
assert setup["num_osds"] == self._get_nb_up_osds_from_ids(node, output)
|
||||
|
||||
@pytest.mark.docker
|
||||
def test_all_docker_osds_are_up_and_in(self, node, host):
|
||||
container_binary = node["container_binary"]
|
||||
def test_all_docker_osds_are_up_and_in(self, node, host, setup):
|
||||
container_binary = setup["container_binary"]
|
||||
osd_id = host.check_output(os.path.join(
|
||||
container_binary + " ps -q --filter='name=ceph-osd' | head -1"))
|
||||
cmd = "sudo {container_binary} exec {osd_id} ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( # noqa E501
|
||||
osd_id=osd_id,
|
||||
cluster=node["cluster_name"],
|
||||
cluster=setup["cluster_name"],
|
||||
container_binary=container_binary
|
||||
)
|
||||
output = json.loads(host.check_output(cmd))
|
||||
assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output)
|
||||
assert setup["num_osds"] == self._get_nb_up_osds_from_ids(node, output)
|
||||
|
|
|
@ -27,10 +27,10 @@ class TestRbdMirrors(object):
|
|||
)
|
||||
assert host.service(service_name).is_enabled
|
||||
|
||||
def test_rbd_mirror_is_up(self, node, host):
|
||||
def test_rbd_mirror_is_up(self, node, host, setup):
|
||||
hostname = node["vars"]["inventory_hostname"]
|
||||
cluster = node["cluster_name"]
|
||||
container_binary = node["container_binary"]
|
||||
cluster = setup["cluster_name"]
|
||||
container_binary = setup["container_binary"]
|
||||
daemons = []
|
||||
if node['docker']:
|
||||
container_exec_cmd = '{container_binary} exec ceph-rbd-mirror-{hostname}'.format( # noqa E501
|
||||
|
@ -38,7 +38,7 @@ class TestRbdMirrors(object):
|
|||
else:
|
||||
container_exec_cmd = ''
|
||||
hostname = node["vars"]["inventory_hostname"]
|
||||
cluster = node['cluster_name']
|
||||
cluster = setup['cluster_name']
|
||||
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rbd-mirror --keyring /var/lib/ceph/bootstrap-rbd-mirror/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
|
||||
container_exec_cmd=container_exec_cmd,
|
||||
hostname=hostname,
|
||||
|
|
|
@ -27,10 +27,10 @@ class TestRGWs(object):
|
|||
)
|
||||
assert host.service(service_name).is_enabled
|
||||
|
||||
def test_rgw_is_up(self, node, host):
|
||||
def test_rgw_is_up(self, node, host, setup):
|
||||
hostname = node["vars"]["inventory_hostname"]
|
||||
cluster = node["cluster_name"]
|
||||
container_binary = node["container_binary"]
|
||||
cluster = setup["cluster_name"]
|
||||
container_binary = setup["container_binary"]
|
||||
if node['docker']:
|
||||
container_exec_cmd = '{container_binary} exec ceph-rgw-{hostname}-rgw0'.format( # noqa E501
|
||||
hostname=hostname, container_binary=container_binary)
|
||||
|
|
|
@ -5,17 +5,17 @@ import json
|
|||
class TestRGWs(object):
|
||||
|
||||
@pytest.mark.no_docker
|
||||
def test_rgw_bucket_default_quota_is_set(self, node, host):
|
||||
assert host.file(node["conf_path"]).contains(
|
||||
def test_rgw_bucket_default_quota_is_set(self, node, host, setup):
|
||||
assert host.file(setup["conf_path"]).contains(
|
||||
"rgw override bucket index max shards")
|
||||
assert host.file(node["conf_path"]).contains(
|
||||
assert host.file(setup["conf_path"]).contains(
|
||||
"rgw bucket default quota max objects")
|
||||
|
||||
@pytest.mark.no_docker
|
||||
def test_rgw_bucket_default_quota_is_applied(self, node, host):
|
||||
def test_rgw_bucket_default_quota_is_applied(self, node, host, setup):
|
||||
radosgw_admin_cmd = "sudo radosgw-admin --cluster={cluster} -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring user create --uid=test --display-name Test".format( # noqa E501
|
||||
hostname=node["vars"]["inventory_hostname"],
|
||||
cluster=node['cluster_name']
|
||||
cluster=setup['cluster_name']
|
||||
)
|
||||
radosgw_admin_output = host.check_output(radosgw_admin_cmd)
|
||||
radosgw_admin_output_json = json.loads(radosgw_admin_output)
|
||||
|
@ -23,10 +23,10 @@ class TestRGWs(object):
|
|||
assert radosgw_admin_output_json["bucket_quota"]["max_objects"] == 1638400 # noqa E501
|
||||
|
||||
@pytest.mark.no_docker
|
||||
def test_rgw_tuning_pools_are_set(self, node, host):
|
||||
def test_rgw_tuning_pools_are_set(self, node, host, setup):
|
||||
cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring osd dump".format( # noqa E501
|
||||
hostname=node["vars"]["inventory_hostname"],
|
||||
cluster=node['cluster_name']
|
||||
cluster=setup['cluster_name']
|
||||
)
|
||||
output = host.check_output(cmd)
|
||||
pools = node["vars"]["rgw_create_pools"]
|
||||
|
@ -36,10 +36,10 @@ class TestRGWs(object):
|
|||
assert pg_num_str in output
|
||||
|
||||
@pytest.mark.docker
|
||||
def test_docker_rgw_tuning_pools_are_set(self, node, host):
|
||||
def test_docker_rgw_tuning_pools_are_set(self, node, host, setup):
|
||||
hostname = node["vars"]["inventory_hostname"]
|
||||
cluster = node['cluster_name']
|
||||
container_binary = node["container_binary"]
|
||||
cluster = setup['cluster_name']
|
||||
container_binary = setup["container_binary"]
|
||||
cmd = "sudo {container_binary} exec ceph-rgw-{hostname}-rgw0 ceph --cluster={cluster} -n client.rgw.{hostname}.rgw0 --connect-timeout 5 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring osd dump".format( # noqa E501
|
||||
hostname=hostname,
|
||||
cluster=cluster,
|
||||
|
|
|
@ -10,11 +10,11 @@ class TestInstall(object):
|
|||
def test_ceph_dir_is_a_directory(self, host, node):
|
||||
assert host.file('/etc/ceph').is_directory
|
||||
|
||||
def test_ceph_conf_exists(self, host, node):
|
||||
assert host.file(node["conf_path"]).exists
|
||||
def test_ceph_conf_exists(self, host, node, setup):
|
||||
assert host.file(setup["conf_path"]).exists
|
||||
|
||||
def test_ceph_conf_is_a_file(self, host, node):
|
||||
assert host.file(node["conf_path"]).is_file
|
||||
def test_ceph_conf_is_a_file(self, host, node, setup):
|
||||
assert host.file(setup["conf_path"]).is_file
|
||||
|
||||
@pytest.mark.no_docker
|
||||
def test_ceph_command_exists(self, host, node):
|
||||
|
@ -23,11 +23,11 @@ class TestInstall(object):
|
|||
|
||||
class TestCephConf(object):
|
||||
|
||||
def test_mon_host_line_has_correct_value(self, node, host):
|
||||
mon_host_line = host.check_output("grep 'mon host = ' /etc/ceph/{cluster}.conf".format(cluster=node['cluster_name'])) # noqa E501
|
||||
def test_mon_host_line_has_correct_value(self, node, host, setup):
|
||||
mon_host_line = host.check_output("grep 'mon host = ' /etc/ceph/{cluster}.conf".format(cluster=setup['cluster_name'])) # noqa E501
|
||||
result = True
|
||||
for x in range(0, node["num_mons"]):
|
||||
pattern = re.compile(("v2:{subnet}.1{x}:3300,v1:{subnet}.1{x}:6789".format(subnet=node["subnet"], x=x)))
|
||||
for x in range(0, setup["num_mons"]):
|
||||
pattern = re.compile(("v2:{subnet}.1{x}:3300,v1:{subnet}.1{x}:6789".format(subnet=setup["subnet"], x=x)))
|
||||
if pattern.search(mon_host_line) is None:
|
||||
result = False
|
||||
assert result
|
||||
|
|
Loading…
Reference in New Issue