tests: test podman against atomic os instead rhel8

the rhel8 image used is an outdated beta version, it is not worth it to
maintain this image upstream, since it's possible to test podman with a
newer version of centos/atomic-host image.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/4025/head
Guillaume Abrioux 2019-05-23 10:49:54 +02:00 committed by Dimitri Savineau
parent 2d375e1aa7
commit a78fb209b1
18 changed files with 92 additions and 185 deletions

View File

@ -1,7 +1,6 @@
---
- name: include prerequisites.yml
include_tasks: prerequisites.yml
when: not is_atomic
- name: get docker version
block:

View File

@ -2,6 +2,18 @@ import pytest
import os
def str_to_bool(val):
try:
val = val.lower()
except AttributeError:
val = str(val).lower()
if val == 'true':
return True
elif val == 'false':
return False
else:
raise ValueError("Invalid input value: %s" % val)
@pytest.fixture(scope="module")
def setup(host):
cluster_address = ""
@ -59,7 +71,7 @@ def setup(host):
if docker:
container_binary = "docker"
if docker and host.exists("podman") and ansible_distribution in ["Fedora", "RedHat"]: # noqa E501
if docker and str_to_bool(os.environ.get('IS_PODMAN', False)): # noqa E501
container_binary = "podman"
data = dict(

View File

@ -1 +0,0 @@
../../../Vagrantfile

View File

@ -1 +0,0 @@
../all_daemons/ceph-override.json

View File

@ -1,39 +0,0 @@
---
# this is only here to let the CI tests know
# that this scenario is using docker
docker: True
containerized_deployment: True
monitor_interface: ens5
radosgw_interface: ens5
ceph_mon_docker_subnet: "{{ public_network }}"
ceph_docker_on_openstack: False
public_network: "192.168.22.0/24"
cluster_network: "192.168.23.0/24"
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
osd_pool_default_size: 1
openstack_config: True
openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: 1
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: 1
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"

View File

@ -1,22 +0,0 @@
---
user_config: True
copy_admin_key: True
test:
name: "test"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
test2:
name: "test2"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
pools:
- "{{ test }}"
- "{{ test2 }}"

View File

@ -1,3 +0,0 @@
---
gateway_ip_list: 192.168.1.90
generate_crt: True

View File

@ -1,10 +0,0 @@
---
create_crush_tree: True
crush_rule_config: True
crush_rule_hdd:
name: HDD
root: HDD
type: host
default: true
crush_rules:
- "{{ crush_rule_hdd }}"

View File

@ -1,10 +0,0 @@
---
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore"
lvm_volumes:
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals

View File

@ -1,7 +0,0 @@
---
copy_admin_key: True
rgw_create_pools:
foo:
pg_num: 17
bar:
pg_num: 19

View File

@ -1,30 +0,0 @@
[mons]
mon0
mon1
mon2
[osds]
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
[mdss]
mds0
[rgws]
rgw0
#[nfss]
#nfs0
#[clients]
#client0
#client1
[rbdmirrors]
rbd-mirror0
[iscsigws]
iscsi-gw0
[all:vars]
ansible_python_interpreter=/usr/bin/python3

View File

@ -1,33 +0,0 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: True
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 3
osd_vms: 2
mds_vms: 1
rgw_vms: 1
nfs_vms: 0
rbd_mirror_vms: 1
client_vms: 0
iscsi_gw_vms: 1
mgr_vms: 1
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.22
cluster_subnet: 192.168.23
# MEMORY
# set 1024 for CentOS
memory: 1024
vagrant_box: rhel8-x86_64
vagrant_box_url: 'http://drop.front.sepia.ceph.com/vagrant/rhel8-x86_64.box'
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true

View File

@ -5,7 +5,7 @@ docker: True
containerized_deployment: True
monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
radosgw_interface: "{{ 'ens1' if ansible_distribution == 'CentOS' else 'ens6' }}"
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
ceph_docker_on_openstack: False
public_network: "192.168.30.0/24"

View File

@ -26,5 +26,5 @@ rbd-mirror0
[iscsigws]
iscsi-gw0
[all:vars]
ansible_python_interpreter=/usr/bin/python3
#[all:vars]
#ansible_python_interpreter=/usr/bin/python3

View File

@ -22,7 +22,7 @@ cluster_subnet: 192.168.31
# set 1024 for CentOS
memory: 1024
vagrant_box: fedora/29-atomic-host
vagrant_box: centos/atomic-host
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync

View File

@ -1,14 +0,0 @@
- hosts: all
gather_facts: true
become: yes
tasks:
- name: unlock /usr
command: ostree admin unlock --hotfix
changed_when: false
when: ansible_distribution == 'Fedora'
- name: remove docker binary on fedora to simulate rhel8
file:
path: /usr/bin/docker
state: absent
when: ansible_distribution == 'Fedora'

74
tox-podman.ini 100644
View File

@ -0,0 +1,74 @@
[tox]
envlist = dev-centos-container-podman
skipsdist = True
[testenv]
whitelist_externals =
vagrant
bash
pip
sleep
rm
passenv=*
sitepackages=True
setenv=
ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
ANSIBLE_ACTION_PLUGINS = {toxinidir}/plugins/actions
ANSIBLE_CALLBACK_PLUGINS = {toxinidir}/plugins/callback
ANSIBLE_CALLBACK_WHITELIST = profile_tasks
ANSIBLE_KEEP_REMOTE_FILES = 1
ANSIBLE_CACHE_PLUGIN = memory
ANSIBLE_GATHERING = implicit
# only available for ansible >= 2.5
ANSIBLE_STDOUT_CALLBACK = yaml
# Set the vagrant box image to use
CEPH_ANSIBLE_VAGRANT_BOX = centos/atomic-host
# Set the ansible inventory host file to be used according to which distrib we are running on
INVENTORY = {env:_INVENTORY:hosts}
PLAYBOOK = site-docker.yml.sample
PURGE_PLAYBOOK = purge-docker-cluster.yml
IS_PODMAN = TRUE
CEPH_STABLE_RELEASE = nautilus
deps= -r{toxinidir}/tests/requirements.txt
changedir= {toxinidir}/tests/functional/podman
commands=
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
# configure lvm
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
copy_admin_key={env:COPY_ADMIN_KEY:False} \
container_binary=podman \
container_package_name=podman \
container_service_name=podman \
container_binding_name=podman \
"
# wait 30sec for services to be ready
sleep 30
# test cluster state using ceph-ansible tests
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# wait 30sec for services to be ready
# reboot all vms
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
# wait 30sec for services to be ready
# retest to ensure cluster came back up correctly after rebooting
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
vagrant destroy -f

10
tox.ini
View File

@ -1,8 +1,7 @@
[tox]
envlist = {dev,rhcs}-{centos,ubuntu}-{container,non_container}-{all_daemons,collocation,lvm_osds,shrink_mon,shrink_osd,lvm_batch,add_mons,add_osds,add_mgrs,add_mdss,add_rbdmirrors,add_rgws,rgw_multisite,purge,storage_inventory,lvm_auto_discovery}
{dev,rhcs}-{centos,ubuntu}-container-{ooo_collocation,podman}
{dev,rhcs}-{centos,ubuntu}-container-{ooo_collocation}
{dev,rhcs}-{centos,ubuntu}-non_container-{switch_to_containers}
dev-rhel-container-podman
infra_lv_create
migrate_ceph_disk_to_ceph_volume
@ -347,10 +346,7 @@ setenv=
# Set the vagrant box image to use
centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/7
centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/atomic-host
podman: CEPH_ANSIBLE_VAGRANT_BOX = fedora/29-atomic-host
ubuntu: CEPH_ANSIBLE_VAGRANT_BOX = guits/ubuntu-bionic64
dev-rhel-container-podman: CEPH_ANSIBLE_VAGRANT_BOX = rhel8-x86_64
dev-rhel-container-podman: CEPH_ANSIBLE_VAGRANT_BOX_URL = http://drop.front.sepia.ceph.com/vagrant/rhel8-x86_64.box
# Set the ansible inventory host file to be used according to which distrib we are running on
ubuntu: _INVENTORY = hosts-ubuntu
@ -359,7 +355,6 @@ setenv=
container: PLAYBOOK = site-docker.yml.sample
container: PURGE_PLAYBOOK = purge-docker-cluster.yml
storage_inventory: COPY_ADMIN_KEY = True
podman: PLAYBOOK = site-docker.yml.sample
non_container: PLAYBOOK = site.yml.sample
shrink_mon: MON_TO_KILL = mon2
shrink_osd: COPY_ADMIN_KEY = True
@ -399,7 +394,6 @@ changedir=
add_rbdmirrors: {toxinidir}/tests/functional/add-rbdmirrors{env:CONTAINER_DIR:}
add_rgws: {toxinidir}/tests/functional/add-rgws{env:CONTAINER_DIR:}
rgw_multisite: {toxinidir}/tests/functional/rgw-multisite{env:CONTAINER_DIR:}
podman: {toxinidir}/tests/functional/podman
storage_inventory: {toxinidir}/tests/functional/lvm-osds{env:CONTAINER_DIR:}
lvm_auto_discovery: {toxinidir}/tests/functional/lvm-auto-discovery{env:CONTAINER_DIR:}
@ -413,8 +407,6 @@ commands=
# configure lvm
!lvm_batch-!lvm_auto_discovery: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
podman: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/simulate_rhel8.yml
rhcs: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml