mirror of https://github.com/ceph/ceph-ansible.git
tests: fix broken test when collocated daemons scenarios
At the moment, a lot of tests are skipped when daemons are collocated. Our tests consider a node belong to only 1 group while it's possible for certain scenario it can belong to multiple groups. Also pinning to pytest 3.6.1 so we can use `request.node.iter_markers()` Co-Authored-by: Alfredo Deza <adeza@redhat.com> Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>pull/2805/head
parent
d0cd6de5a5
commit
d83b24d271
|
@ -18,7 +18,7 @@ def node(host, request):
|
|||
# because testinfra does not collect and provide ansible config passed in
|
||||
# from using --extra-vars
|
||||
ceph_stable_release = os.environ.get("CEPH_STABLE_RELEASE", "luminous")
|
||||
node_type = ansible_vars["group_names"][0]
|
||||
group_names = ansible_vars["group_names"]
|
||||
docker = ansible_vars.get("docker")
|
||||
osd_auto_discovery = ansible_vars.get("osd_auto_discovery")
|
||||
lvm_scenario = ansible_vars.get("osd_scenario") == 'lvm'
|
||||
|
@ -29,41 +29,50 @@ def node(host, request):
|
|||
'mimic': 13,
|
||||
'dev': 99
|
||||
}
|
||||
if not request.node.get_marker(node_type) and not request.node.get_marker('all'): # noqa E501
|
||||
pytest.skip("Not a valid test for node type: %s" % node_type)
|
||||
|
||||
if request.node.get_marker("no_lvm_scenario") and lvm_scenario:
|
||||
# capture the initial/default state
|
||||
test_is_applicable = False
|
||||
for marker in request.node.iter_markers():
|
||||
if marker.name in group_names or marker.name == 'all':
|
||||
test_is_applicable = True
|
||||
break
|
||||
# Check if any markers on the test method exist in the nodes group_names. If they do not, this test is not valid for the node being tested.
|
||||
if not test_is_applicable:
|
||||
reason = "%s: Not a valid test for node type: %s" % (request.function, group_names)
|
||||
pytest.skip(reason)
|
||||
|
||||
if request.node.get_closest_marker("no_lvm_scenario") and lvm_scenario:
|
||||
pytest.skip("Not a valid test for lvm scenarios")
|
||||
|
||||
if not lvm_scenario and request.node.get_marker("lvm_scenario"):
|
||||
if not lvm_scenario and request.node.get_closest_marker("lvm_scenario"):
|
||||
pytest.skip("Not a valid test for non-lvm scenarios")
|
||||
|
||||
if request.node.get_marker("no_docker") and docker:
|
||||
if request.node.get_closest_marker("no_docker") and docker:
|
||||
pytest.skip(
|
||||
"Not a valid test for containerized deployments or atomic hosts")
|
||||
|
||||
if request.node.get_marker("docker") and not docker:
|
||||
if request.node.get_closest_marker("docker") and not docker:
|
||||
pytest.skip(
|
||||
"Not a valid test for non-containerized deployments or atomic hosts") # noqa E501
|
||||
|
||||
if node_type == "mgrs" and ceph_stable_release == "jewel":
|
||||
if "mgrs" in group_names and ceph_stable_release == "jewel":
|
||||
pytest.skip("mgr nodes can not be tested with ceph release jewel")
|
||||
|
||||
if node_type == "nfss" and ceph_stable_release == "jewel":
|
||||
if "nfss" in group_names and ceph_stable_release == "jewel":
|
||||
pytest.skip("nfs nodes can not be tested with ceph release jewel")
|
||||
|
||||
if node_type == "iscsigws" and ceph_stable_release == "jewel":
|
||||
if "iscsigws" in group_names and ceph_stable_release == "jewel":
|
||||
pytest.skip("iscsigws nodes can not be tested with ceph release jewel") # noqa E501
|
||||
|
||||
if request.node.get_marker("from_luminous") and ceph_release_num[ceph_stable_release] < ceph_release_num['luminous']: # noqa E501
|
||||
if request.node.get_closest_marker("from_luminous") and ceph_release_num[ceph_stable_release] < ceph_release_num['luminous']: # noqa E501
|
||||
pytest.skip(
|
||||
"This test is only valid for releases starting from Luminous and above") # noqa E501
|
||||
|
||||
if request.node.get_marker("before_luminous") and ceph_release_num[ceph_stable_release] >= ceph_release_num['luminous']: # noqa E501
|
||||
if request.node.get_closest_marker("before_luminous") and ceph_release_num[ceph_stable_release] >= ceph_release_num['luminous']: # noqa E501
|
||||
pytest.skip("This test is only valid for release before Luminous")
|
||||
|
||||
journal_collocation_test = ansible_vars.get("osd_scenario") == "collocated"
|
||||
if request.node.get_marker("journal_collocation") and not journal_collocation_test: # noqa E501
|
||||
if request.node.get_closest_marker("journal_collocation") and not journal_collocation_test: # noqa E501
|
||||
pytest.skip("Scenario is not using journal collocation")
|
||||
|
||||
osd_ids = []
|
||||
|
@ -84,7 +93,7 @@ def node(host, request):
|
|||
total_osds = num_devices * num_osd_hosts
|
||||
cluster_name = ansible_vars.get("cluster", "ceph")
|
||||
conf_path = "/etc/ceph/{}.conf".format(cluster_name)
|
||||
if node_type == "osds":
|
||||
if "osds" in group_names:
|
||||
# I can assume eth2 because I know all the vagrant
|
||||
# boxes we test with use that interface. OSDs are the only
|
||||
# nodes that have this interface.
|
||||
|
|
|
@ -2,4 +2,5 @@
|
|||
# testinfra < 1.7.1 does not work Ansible 2.4, see https://github.com/philpep/testinfra/issues/249
|
||||
testinfra==1.7.1
|
||||
pytest-xdist
|
||||
pytest==3.6.1
|
||||
notario>=0.0.13
|
||||
|
|
Loading…
Reference in New Issue