mirror of https://github.com/ceph/ceph-ansible.git
tests: refact test_all_*_osds_are_up_and_in
these tests are skipped on bluestore osds scenarios. they were going to fail anyway since they are run on mon nodes and `devices` is defined in inventory for each osd node. It means `num_devices * num_osd_hosts` returns `0`. The result is that the test expects to have 0 OSDs up. The idea here is to move these tests so they are run on OSD nodes. Each OSD node checks their respective OSD to be UP, if an OSD has 2 devices defined in `devices` variable, it means we are checking for 2 OSD to be up on that node, if each node has all its OSD up, we can say all OSD are up. Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>pull/2805/head
parent
2d560b562a
commit
fe79a5d240
|
@ -89,8 +89,6 @@ def node(host, request):
|
||||||
num_devices = len(ansible_vars.get("devices", []))
|
num_devices = len(ansible_vars.get("devices", []))
|
||||||
if not num_devices:
|
if not num_devices:
|
||||||
num_devices = len(ansible_vars.get("lvm_volumes", []))
|
num_devices = len(ansible_vars.get("lvm_volumes", []))
|
||||||
num_osd_hosts = len(ansible_vars["groups"]["osds"])
|
|
||||||
total_osds = num_devices * num_osd_hosts
|
|
||||||
cluster_name = ansible_vars.get("cluster", "ceph")
|
cluster_name = ansible_vars.get("cluster", "ceph")
|
||||||
conf_path = "/etc/ceph/{}.conf".format(cluster_name)
|
conf_path = "/etc/ceph/{}.conf".format(cluster_name)
|
||||||
if "osds" in group_names:
|
if "osds" in group_names:
|
||||||
|
@ -116,8 +114,6 @@ def node(host, request):
|
||||||
osd_ids=osd_ids,
|
osd_ids=osd_ids,
|
||||||
num_mons=num_mons,
|
num_mons=num_mons,
|
||||||
num_devices=num_devices,
|
num_devices=num_devices,
|
||||||
num_osd_hosts=num_osd_hosts,
|
|
||||||
total_osds=total_osds,
|
|
||||||
cluster_name=cluster_name,
|
cluster_name=cluster_name,
|
||||||
conf_path=conf_path,
|
conf_path=conf_path,
|
||||||
cluster_address=cluster_address,
|
cluster_address=cluster_address,
|
||||||
|
|
|
@ -40,22 +40,3 @@ class TestMons(object):
|
||||||
result = False
|
result = False
|
||||||
assert result
|
assert result
|
||||||
|
|
||||||
|
|
||||||
class TestOSDs(object):
|
|
||||||
|
|
||||||
@pytest.mark.no_docker
|
|
||||||
def test_all_osds_are_up_and_in(self, node, host):
|
|
||||||
cmd = "sudo ceph --cluster={} --connect-timeout 5 -s".format(node["cluster_name"])
|
|
||||||
output = host.check_output(cmd)
|
|
||||||
phrase = "{num_osds} osds: {num_osds} up, {num_osds} in".format(num_osds=node["total_osds"])
|
|
||||||
assert phrase in output
|
|
||||||
|
|
||||||
@pytest.mark.docker
|
|
||||||
def test_all_docker_osds_are_up_and_in(self, node, host):
|
|
||||||
cmd = "sudo docker exec ceph-mon-{} ceph --cluster={} --connect-timeout 5 -s".format(
|
|
||||||
node["vars"]["inventory_hostname"],
|
|
||||||
node["cluster_name"]
|
|
||||||
)
|
|
||||||
output = host.check_output(cmd)
|
|
||||||
phrase = "{num_osds} osds: {num_osds} up, {num_osds} in".format(num_osds=node["total_osds"])
|
|
||||||
assert phrase in output
|
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
import pytest
|
import pytest
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
class TestOSDs(object):
|
class TestOSDs(object):
|
||||||
|
@ -45,3 +47,32 @@ class TestOSDs(object):
|
||||||
@pytest.mark.lvm_scenario
|
@pytest.mark.lvm_scenario
|
||||||
def test_ceph_volume_systemd_is_installed(self, node, host):
|
def test_ceph_volume_systemd_is_installed(self, node, host):
|
||||||
host.exists('ceph-volume-systemd')
|
host.exists('ceph-volume-systemd')
|
||||||
|
|
||||||
|
def _get_osd_id_from_host(self, node, osd_tree):
|
||||||
|
for n in osd_tree['nodes']:
|
||||||
|
if n['name'] == node['vars']['inventory_hostname'] and n['type'] == 'host':
|
||||||
|
children = n['children']
|
||||||
|
return children
|
||||||
|
|
||||||
|
def _get_nb_up_osds_from_ids(self, node, osd_tree):
|
||||||
|
nb_up = 0
|
||||||
|
ids = self._get_osd_id_from_host(node, osd_tree)
|
||||||
|
for n in osd_tree['nodes']:
|
||||||
|
if n['id'] in ids and n['status'] == 'up':
|
||||||
|
nb_up += 1
|
||||||
|
return nb_up
|
||||||
|
|
||||||
|
@pytest.mark.no_docker
|
||||||
|
def test_all_osds_are_up_and_in(self, node, host):
|
||||||
|
cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(cluster=node["cluster_name"])
|
||||||
|
output = json.loads(host.check_output(cmd))
|
||||||
|
assert node["num_devices"] == self._get_nb_up_osds_from_ids(node, output)
|
||||||
|
|
||||||
|
@pytest.mark.docker
|
||||||
|
def test_all_docker_osds_are_up_and_in(self, node, host):
|
||||||
|
cmd = "sudo docker exec ceph-osd-{hostname}-sda ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(
|
||||||
|
hostname=node["vars"]["inventory_hostname"],
|
||||||
|
cluster=node["cluster_name"]
|
||||||
|
)
|
||||||
|
output = json.loads(host.check_output(cmd))
|
||||||
|
assert node["num_devices"] == self._get_nb_up_osds_from_ids(node, output)
|
Loading…
Reference in New Issue