mirror of https://github.com/ceph/ceph-ansible.git
tests/conftest: update node fixture for new testinfra API
Signed-off-by: Alfredo Deza <adeza@redhat.com>pull/1781/head
parent
b8af5fc5a0
commit
c0ad0b58f0
|
@ -3,25 +3,29 @@ import os
|
|||
|
||||
|
||||
@pytest.fixture()
|
||||
def node(Ansible, Interface, Command, request):
|
||||
def node(host, request):
|
||||
"""
|
||||
This fixture represents a single node in the ceph cluster. Using the
|
||||
Ansible fixture provided by testinfra it can access all the ansible variables
|
||||
host.ansible fixture provided by testinfra it can access all the ansible variables
|
||||
provided to it by the specific test scenario being ran.
|
||||
|
||||
You must include this fixture on any tests that operate on specific type of node
|
||||
because it contains the logic to manage which tests a node should run.
|
||||
"""
|
||||
ansible_vars = Ansible.get_variables()
|
||||
ansible_vars = host.ansible.get_variables()
|
||||
# tox will pass in this environment variable. we need to do it this way
|
||||
# because testinfra does not collect and provide ansible config passed in
|
||||
# from using --extra-vars
|
||||
ceph_stable_release = os.environ.get("CEPH_STABLE_RELEASE", "kraken")
|
||||
node_type = ansible_vars["group_names"][0]
|
||||
docker = ansible_vars.get("docker")
|
||||
lvm_scenario = ansible_vars.get("osd_scenario") == 'lvm'
|
||||
if not request.node.get_marker(node_type) and not request.node.get_marker('all'):
|
||||
pytest.skip("Not a valid test for node type: %s" % node_type)
|
||||
|
||||
if not lvm_scenario and request.node.get_marker("lvm_scenario"):
|
||||
pytest.skip("Not a valid test for non-lvm scenarios")
|
||||
|
||||
if request.node.get_marker("no_docker") and docker:
|
||||
pytest.skip("Not a valid test for containerized deployments or atomic hosts")
|
||||
|
||||
|
@ -40,7 +44,7 @@ def node(Ansible, Interface, Command, request):
|
|||
cluster_address = ""
|
||||
# I can assume eth1 because I know all the vagrant
|
||||
# boxes we test with use that interface
|
||||
address = Interface("eth1").addresses[0]
|
||||
address = host.interface("eth1").addresses[0]
|
||||
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
|
||||
num_mons = len(ansible_vars["groups"]["mons"])
|
||||
num_devices = len(ansible_vars.get("devices", []))
|
||||
|
@ -54,8 +58,8 @@ def node(Ansible, Interface, Command, request):
|
|||
# I can assume eth2 because I know all the vagrant
|
||||
# boxes we test with use that interface. OSDs are the only
|
||||
# nodes that have this interface.
|
||||
cluster_address = Interface("eth2").addresses[0]
|
||||
cmd = Command('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
|
||||
cluster_address = host.interface("eth2").addresses[0]
|
||||
cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
|
||||
if cmd.rc == 0:
|
||||
osd_ids = cmd.stdout.rstrip("\n").split("\n")
|
||||
osds = osd_ids
|
||||
|
|
Loading…
Reference in New Issue