tests: refact update scenario (stable-3.2)

refact the update scenario like it has been made in master.
(see f0e616962)

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/3771/head
Guillaume Abrioux 2019-03-28 09:11:09 +01:00
parent 8e2cfd9d24
commit f200f1ca87
4 changed files with 142 additions and 57 deletions

View File

@ -1,6 +1,18 @@
import pytest
import json
import os
def str_to_bool(val):
try:
val = val.lower()
except AttributeError:
val = str(val).lower()
if val == 'true':
return True
elif val == 'false':
return False
else:
raise ValueError("Invalid input value: %s" % val)
@pytest.fixture()
def node(host, request):
@ -18,13 +30,14 @@ def node(host, request):
# because testinfra does not collect and provide ansible config passed in
# from using --extra-vars
ceph_stable_release = os.environ.get("CEPH_STABLE_RELEASE", "luminous")
rolling_update = os.environ.get("ROLLING_UPDATE", "False")
rolling_update = str_to_bool(os.environ.get("ROLLING_UPDATE", False))
group_names = ansible_vars["group_names"]
docker = ansible_vars.get("docker")
fsid = ansible_vars.get("fsid")
osd_auto_discovery = ansible_vars.get("osd_auto_discovery")
osd_scenario = ansible_vars.get("osd_scenario")
lvm_scenario = osd_scenario in ['lvm', 'lvm-batch']
devices = ansible_vars.get("devices", [])
ceph_release_num = {
'jewel': 10,
'kraken': 11,
@ -62,9 +75,6 @@ def node(host, request):
if request.node.get_closest_marker("journal_collocation") and not journal_collocation_test: # noqa E501
pytest.skip("Scenario is not using journal collocation")
osd_ids = []
osds = []
cluster_address = ""
# I can assume eth1 because I know all the vagrant
# boxes we test with use that interface
address = host.interface("eth1").addresses[0]
@ -82,36 +92,23 @@ def node(host, request):
# If number of devices doesn't map to number of OSDs, allow tests to define
# that custom number, defaulting it to ``num_devices``
num_osds = ansible_vars.get('num_osds', num_osds)
cluster_name = ansible_vars.get("cluster", "ceph")
if rolling_update:
cluster_name = "test"
else:
cluster_name = ansible_vars.get("cluster", "ceph")
conf_path = "/etc/ceph/{}.conf".format(cluster_name)
if "osds" in group_names:
# I can assume eth2 because I know all the vagrant
# boxes we test with use that interface. OSDs are the only
# nodes that have this interface.
cluster_address = host.interface("eth2").addresses[0]
cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
if cmd.rc == 0:
osd_ids = cmd.stdout.rstrip("\n").split("\n")
osds = osd_ids
if docker and fsid == "6e008d48-1661-11e8-8546-008c3214218a":
osds = []
for device in ansible_vars.get("devices", []):
real_dev = host.run("sudo readlink -f %s" % device)
real_dev_split = real_dev.stdout.split("/")[-1]
osds.append(real_dev_split)
data = dict(
address=address,
subnet=subnet,
vars=ansible_vars,
osd_ids=osd_ids,
num_mons=num_mons,
num_osds=num_osds,
cluster_name=cluster_name,
conf_path=conf_path,
cluster_address=cluster_address,
docker=docker,
osds=osds,
fsid=fsid,
devices=devices,
ceph_stable_release=ceph_stable_release,
ceph_release_num=ceph_release_num,
rolling_update=rolling_update,
@ -142,4 +139,4 @@ def pytest_collection_modifyitems(session, config, items):
item.add_marker(pytest.mark.all)
if "journal_collocation" in test_path:
item.add_marker(pytest.mark.journal_collocation)
item.add_marker(pytest.mark.journal_collocation)

View File

@ -5,6 +5,35 @@ import os
class TestOSDs(object):
def _get_osds_id(self, node, host):
osds = []
if node['rolling_update'] and node['docker']:
cmd = host.run('sudo docker exec {osd_id} ceph-disk list --format json'.format(osd_id=self._get_docker_exec_cmd(host)))
ceph_disk_list = json.loads(cmd.stdout)
for device in ceph_disk_list:
if 'partitions' in device.keys():
for partition in device['partitions']:
if 'type' in partition.keys() and partition['type'] == 'data':
osds.append(device['path'].split('/')[-1])
else:
cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
if cmd.rc == 0:
osd_ids = cmd.stdout.rstrip("\n").split("\n")
osds = osd_ids
if node['docker'] and node['fsid'] == "6e008d48-1661-11e8-8546-008c3214218a":
osds = []
for device in node['devices']:
real_dev = host.run("sudo readlink -f %s" % device)
real_dev_split = real_dev.stdout.split("/")[-1]
osds.append(real_dev_split)
return osds
def _get_docker_exec_cmd(self, host):
osd_id = host.check_output(
"docker ps -q --filter='name=ceph-osd' | head -1")
return osd_id
@pytest.mark.no_docker
def test_ceph_osd_package_is_installed(self, node, host):
assert host.package("ceph-osd").is_installed
@ -17,23 +46,25 @@ class TestOSDs(object):
def test_osds_listen_on_cluster_network(self, node, host):
# TODO: figure out way to paramaterize this test
nb_port = (node["num_osds"] * 2)
assert host.check_output("netstat -lntp | grep ceph-osd | grep %s | wc -l" % (node["cluster_address"])) == str(nb_port)
assert host.check_output("netstat -lntp | grep ceph-osd | grep %s | wc -l" % (host.interface("eth2").addresses[0])) == str(nb_port)
def test_osd_services_are_running(self, node, host):
# TODO: figure out way to paramaterize node['osds'] for this test
for osd in node["osds"]:
# for osd in node["osds"]:
for osd in self._get_osds_id(node, host):
assert host.service("ceph-osd@%s" % osd).is_running
@pytest.mark.no_lvm_scenario
def test_osd_services_are_enabled(self, node, host):
# TODO: figure out way to paramaterize node['osds'] for this test
for osd in node["osds"]:
# for osd in node["osds"]:
for osd in self._get_osds_id(node, host):
assert host.service("ceph-osd@%s" % osd).is_enabled
@pytest.mark.no_docker
def test_osd_are_mounted(self, node, host):
# TODO: figure out way to paramaterize node['osd_ids'] for this test
for osd_id in node["osd_ids"]:
for osd_id in self._get_osds_id(node, host):
osd_path = "/var/lib/ceph/osd/{cluster}-{osd_id}".format(
cluster=node["cluster_name"],
osd_id=osd_id,
@ -71,11 +102,9 @@ class TestOSDs(object):
@pytest.mark.docker
def test_all_docker_osds_are_up_and_in(self, node, host):
osd_id = host.check_output(
"docker ps -q --filter='name=ceph-osd' | head -1")
cmd = "sudo docker exec {osd_id} ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(
osd_id=osd_id,
osd_id=self._get_docker_exec_cmd(host),
cluster=node["cluster_name"]
)
output = json.loads(host.check_output(cmd))
assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output)
assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output)

83
tox-update.ini 100644
View File

@ -0,0 +1,83 @@
[tox]
envlist = luminous-{centos,ubuntu}-{container,non_container}-update
skipsdist = True
[testenv]
whitelist_externals =
vagrant
bash
cp
git
pip
passenv=*
setenv=
ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
ANSIBLE_ACTION_PLUGINS = {toxinidir}/plugins/actions
ANSIBLE_CALLBACK_PLUGINS = {toxinidir}/plugins/callback
ANSIBLE_CALLBACK_WHITELIST = profile_tasks
ANSIBLE_CACHE_PLUGIN = memory
ANSIBLE_GATHERING = implicit
ANSIBLE_STDOUT_CALLBACK = debug
centos: CEPH_ANSIBLE_VAGRANT_BOX = centos/7
fedora: CEPH_ANSIBLE_VAGRANT_BOX = fedora/29-atomic-host
# Set the vagrant box image to use
centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/7
centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/atomic-host
ubuntu: CEPH_ANSIBLE_VAGRANT_BOX = guits/ubuntu-bionic64
# Set the ansible inventory host file to be used according to which distrib we are running on
ubuntu: _INVENTORY = hosts-ubuntu
INVENTORY = {env:_INVENTORY:hosts}
container: CONTAINER_DIR = /container
container: PLAYBOOK = site-docker.yml.sample
non_container: PLAYBOOK = site.yml.sample
CEPH_DOCKER_IMAGE_TAG = latest-jewel
CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-jewel
UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-luminous
CEPH_STABLE_RELEASE = jewel
UPDATE_CEPH_STABLE_RELEASE = luminous
ROLLING_UPDATE = True
changedir={toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:}
commands=
vagrant up --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
# use the stable-3.1 branch to deploy a luminous cluster
git clone -b {env:CEPH_ANSIBLE_BRANCH:stable-3.1} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons/{env:INVENTORY} {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
# deploy the cluster
ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --limit 'all:!iscsigws' --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:jewel} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-jewel} \
copy_admin_key={env:COPY_ADMIN_KEY:False} \
"
pip install -r {toxinidir}/tests/requirements.txt
cp {toxinidir}/infrastructure-playbooks/rolling_update.yml {toxinidir}/rolling_update.yml
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/rolling_update.yml --extra-vars \
"\{'ireallymeanit': 'yes', \
'cluster': 'test', \
'fetch_directory': '{env:FETCH_DIRECTORY:{changedir}/fetch}', \
'ceph_stable_release': '{env:UPDATE_CEPH_STABLE_RELEASE:luminous}', \
'ceph_docker_registry': '{env:CEPH_DOCKER_REGISTRY:docker.io}', \
'ceph_docker_image': '{env:UPDATE_CEPH_DOCKER_IMAGE:ceph/daemon}', \
'ceph_docker_image_tag': '{env:UPDATE_CEPH_DOCKER_IMAGE_TAG:latest-luminous}', \
'devices': [/dev/sda,/dev/sdb], \
'dedicated_devices': [/dev/sdc,/dev/sdc], \
'osd_scenario': 'non-collocated' \}"
bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:luminous} ROLLING_UPDATE=TRUE py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests"
vagrant destroy --force

26
tox.ini
View File

@ -1,5 +1,5 @@
[tox]
envlist = {luminous,mimic,rhcs}-{centos,ubuntu}-{container,non_container}-{all_daemons,collocation,update,bluestore_lvm_osds,lvm_osds,shrink_mon,shrink_osd,lvm_batch,add_osds,rgw_multisite,purge}
envlist = {luminous,mimic,rhcs}-{centos,ubuntu}-{container,non_container}-{all_daemons,collocation,bluestore_lvm_osds,lvm_osds,shrink_mon,shrink_osd,lvm_batch,add_osds,rgw_multisite,purge}
{luminous,mimic,rhcs}-{centos,ubuntu}-non_container-switch_to_containers
{luminous,mimic,rhcs}-{centos,ubuntu}-container-{cluster,ooo_collocation,infra_lv_create}
infra_lv_create
@ -96,24 +96,6 @@ commands=
# test that the cluster can be redeployed in a healthy state
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
# extra commands for performing a rolling update
# currently this hardcodes the release to kraken
# as we're still installing jewel by default
[update]
commands=
cp {toxinidir}/infrastructure-playbooks/rolling_update.yml {toxinidir}/rolling_update.yml
ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "change_dir={changedir} ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/rolling_update.yml --extra-vars "\
ireallymeanit=yes \
ceph_stable_release={env:UPDATE_CEPH_STABLE_RELEASE:luminous} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:UPDATE_CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:UPDATE_CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
"
bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:nautilus} py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests"
[shrink-mon]
commands=
cp {toxinidir}/infrastructure-playbooks/shrink-mon.yml {toxinidir}/shrink-mon.yml
@ -221,10 +203,6 @@ setenv=
rhcs: CEPH_STABLE_RELEASE = luminous
lvm_osds: CEPH_STABLE_RELEASE = luminous
rhcs: CEPH_STABLE_RELEASE = luminous
update: ROLLING_UPDATE = True
update: CEPH_STABLE_RELEASE = jewel
container-update: CEPH_DOCKER_IMAGE_TAG = latest-jewel
container-update: CEPH_DOCKER_IMAGE_TAG_BIS = latest-jewel-bis
ooo_collocation: CEPH_DOCKER_IMAGE_TAG = v3.0.3-stable-3.0-luminous-centos-7-x86_64
deps= -r{toxinidir}/tests/requirements.txt
@ -235,7 +213,6 @@ changedir=
shrink_osd: {toxinidir}/tests/functional/shrink_osd{env:CONTAINER_DIR:}
# tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using docker
collocation: {toxinidir}/tests/functional/collocation{env:CONTAINER_DIR:}
update: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:}
purge: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:}
switch_to_containers: {toxinidir}/tests/functional/all_daemons
lvm_osds: {toxinidir}/tests/functional/lvm-osds{env:CONTAINER_DIR:}
@ -294,7 +271,6 @@ commands=
purge: {[purge]commands}
switch_to_containers: {[switch-to-containers]commands}
update: {[update]commands}
shrink_mon: {[shrink-mon]commands}
shrink_osd: {[shrink-osd]commands}
add_osds: {[add-osds]commands}