tests/conftest: update node fixture for new testinfra API

Signed-off-by: Alfredo Deza <adeza@redhat.com>
pull/1781/head
Alfredo Deza 2017-08-21 14:07:43 -04:00
parent b8af5fc5a0
commit c0ad0b58f0
1 changed files with 10 additions and 6 deletions

View File

@ -3,25 +3,29 @@ import os
@pytest.fixture() @pytest.fixture()
def node(Ansible, Interface, Command, request): def node(host, request):
""" """
This fixture represents a single node in the ceph cluster. Using the This fixture represents a single node in the ceph cluster. Using the
Ansible fixture provided by testinfra it can access all the ansible variables host.ansible fixture provided by testinfra it can access all the ansible variables
provided to it by the specific test scenario being ran. provided to it by the specific test scenario being ran.
You must include this fixture on any tests that operate on specific type of node You must include this fixture on any tests that operate on specific type of node
because it contains the logic to manage which tests a node should run. because it contains the logic to manage which tests a node should run.
""" """
ansible_vars = Ansible.get_variables() ansible_vars = host.ansible.get_variables()
# tox will pass in this environment variable. we need to do it this way # tox will pass in this environment variable. we need to do it this way
# because testinfra does not collect and provide ansible config passed in # because testinfra does not collect and provide ansible config passed in
# from using --extra-vars # from using --extra-vars
ceph_stable_release = os.environ.get("CEPH_STABLE_RELEASE", "kraken") ceph_stable_release = os.environ.get("CEPH_STABLE_RELEASE", "kraken")
node_type = ansible_vars["group_names"][0] node_type = ansible_vars["group_names"][0]
docker = ansible_vars.get("docker") docker = ansible_vars.get("docker")
lvm_scenario = ansible_vars.get("osd_scenario") == 'lvm'
if not request.node.get_marker(node_type) and not request.node.get_marker('all'): if not request.node.get_marker(node_type) and not request.node.get_marker('all'):
pytest.skip("Not a valid test for node type: %s" % node_type) pytest.skip("Not a valid test for node type: %s" % node_type)
if not lvm_scenario and request.node.get_marker("lvm_scenario"):
pytest.skip("Not a valid test for non-lvm scenarios")
if request.node.get_marker("no_docker") and docker: if request.node.get_marker("no_docker") and docker:
pytest.skip("Not a valid test for containerized deployments or atomic hosts") pytest.skip("Not a valid test for containerized deployments or atomic hosts")
@ -40,7 +44,7 @@ def node(Ansible, Interface, Command, request):
cluster_address = "" cluster_address = ""
# I can assume eth1 because I know all the vagrant # I can assume eth1 because I know all the vagrant
# boxes we test with use that interface # boxes we test with use that interface
address = Interface("eth1").addresses[0] address = host.interface("eth1").addresses[0]
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1]) subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
num_mons = len(ansible_vars["groups"]["mons"]) num_mons = len(ansible_vars["groups"]["mons"])
num_devices = len(ansible_vars.get("devices", [])) num_devices = len(ansible_vars.get("devices", []))
@ -54,8 +58,8 @@ def node(Ansible, Interface, Command, request):
# I can assume eth2 because I know all the vagrant # I can assume eth2 because I know all the vagrant
# boxes we test with use that interface. OSDs are the only # boxes we test with use that interface. OSDs are the only
# nodes that have this interface. # nodes that have this interface.
cluster_address = Interface("eth2").addresses[0] cluster_address = host.interface("eth2").addresses[0]
cmd = Command('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"') cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
if cmd.rc == 0: if cmd.rc == 0:
osd_ids = cmd.stdout.rstrip("\n").split("\n") osd_ids = cmd.stdout.rstrip("\n").split("\n")
osds = osd_ids osds = osd_ids