mirror of https://github.com/ceph/ceph-ansible.git
tests: add a rhel8 scenario testing
test upstream with rhel8 vagrant image Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>pull/3571/head
parent
fdca29f2a7
commit
0d72fe9b30
|
@ -20,8 +20,8 @@ MGRS = settings['mgr_vms']
|
|||
PUBLIC_SUBNET = settings['public_subnet']
|
||||
CLUSTER_SUBNET = settings['cluster_subnet']
|
||||
BOX = ENV['CEPH_ANSIBLE_VAGRANT_BOX'] || settings['vagrant_box']
|
||||
CLIENT_BOX = settings['client_vagrant_box'] || BOX
|
||||
BOX_URL = settings['vagrant_box_url']
|
||||
CLIENT_BOX = ENV['CEPH_ANSIBLE_VAGRANT_BOX'] || settings['client_vagrant_box'] || BOX
|
||||
BOX_URL = ENV['CEPH_ANSIBLE_VAGRANT_BOX_URL'] || settings['vagrant_box_url']
|
||||
SYNC_DIR = settings['vagrant_sync_dir']
|
||||
MEMORY = settings['memory']
|
||||
ETH = settings['eth']
|
||||
|
@ -126,6 +126,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||
lv.cpu_mode = 'host-passthrough'
|
||||
lv.volume_cache = 'unsafe'
|
||||
lv.graphics_type = 'none'
|
||||
lv.cpus = 2
|
||||
end
|
||||
|
||||
# Faster bootup. Disables mounting the sync folder for libvirt and virtualbox
|
||||
|
|
|
@ -33,6 +33,7 @@ def node(host, request):
|
|||
'mimic': 13,
|
||||
'dev': 99
|
||||
}
|
||||
ansible_distribution = host.ansible("setup")["ansible_facts"]['ansible_distribution']
|
||||
|
||||
# capture the initial/default state
|
||||
test_is_applicable = False
|
||||
|
@ -68,9 +69,15 @@ def node(host, request):
|
|||
osd_ids = []
|
||||
osds = []
|
||||
cluster_address = ""
|
||||
# I can assume eth1 because I know all the vagrant
|
||||
# boxes we test with use that interface
|
||||
address = host.interface("eth1").addresses[0]
|
||||
container_binary = ""
|
||||
|
||||
if ansible_distribution == 'RedHat':
|
||||
public_interface = 'ens6'
|
||||
cluster_interface = 'ens7'
|
||||
else:
|
||||
public_interface = 'eth1'
|
||||
cluster_interface = 'eth2'
|
||||
address = host.interface(public_interface).addresses[0]
|
||||
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
|
||||
num_mons = len(ansible_vars["groups"]["mons"])
|
||||
if osd_auto_discovery:
|
||||
|
@ -88,10 +95,7 @@ def node(host, request):
|
|||
cluster_name = ansible_vars.get("cluster", "ceph")
|
||||
conf_path = "/etc/ceph/{}.conf".format(cluster_name)
|
||||
if "osds" in group_names:
|
||||
# I can assume eth2 because I know all the vagrant
|
||||
# boxes we test with use that interface. OSDs are the only
|
||||
# nodes that have this interface.
|
||||
cluster_address = host.interface("eth2").addresses[0]
|
||||
cluster_address = host.interface(cluster_interface).addresses[0]
|
||||
cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
|
||||
if cmd.rc == 0:
|
||||
osd_ids = cmd.stdout.rstrip("\n").split("\n")
|
||||
|
@ -103,6 +107,11 @@ def node(host, request):
|
|||
real_dev_split = real_dev.stdout.split("/")[-1]
|
||||
osds.append(real_dev_split)
|
||||
|
||||
if docker:
|
||||
container_binary = 'docker'
|
||||
if docker and host.exists('podman') and ansible_distribution in ['Fedora', 'RedHat']: # noqa E501
|
||||
container_binary = 'podman'
|
||||
|
||||
data = dict(
|
||||
address=address,
|
||||
subnet=subnet,
|
||||
|
@ -119,6 +128,7 @@ def node(host, request):
|
|||
ceph_release_num=ceph_release_num,
|
||||
rolling_update=rolling_update,
|
||||
radosgw_num_instances=radosgw_num_instances,
|
||||
container_binary=container_binary,
|
||||
)
|
||||
return data
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
../../../Vagrantfile
|
|
@ -0,0 +1 @@
|
|||
../all_daemons/ceph-override.json
|
|
@ -0,0 +1,39 @@
|
|||
---
|
||||
# this is only here to let the CI tests know
|
||||
# that this scenario is using docker
|
||||
docker: True
|
||||
|
||||
containerized_deployment: True
|
||||
monitor_interface: ens5
|
||||
radosgw_interface: ens5
|
||||
ceph_mon_docker_subnet: "{{ public_network }}"
|
||||
ceph_docker_on_openstack: False
|
||||
public_network: "192.168.22.0/24"
|
||||
cluster_network: "192.168.23.0/24"
|
||||
rgw_override_bucket_index_max_shards: 16
|
||||
rgw_bucket_default_quota_max_objects: 1638400
|
||||
ceph_conf_overrides:
|
||||
global:
|
||||
osd_pool_default_size: 1
|
||||
openstack_config: True
|
||||
openstack_glance_pool:
|
||||
name: "images"
|
||||
pg_num: "{{ osd_pool_default_pg_num }}"
|
||||
pgp_num: "{{ osd_pool_default_pg_num }}"
|
||||
rule_name: "HDD"
|
||||
type: 1
|
||||
erasure_profile: ""
|
||||
expected_num_objects: ""
|
||||
size: 1
|
||||
openstack_cinder_pool:
|
||||
name: "volumes"
|
||||
pg_num: "{{ osd_pool_default_pg_num }}"
|
||||
pgp_num: "{{ osd_pool_default_pg_num }}"
|
||||
rule_name: "HDD"
|
||||
type: 1
|
||||
erasure_profile: ""
|
||||
expected_num_objects: ""
|
||||
size: 1
|
||||
openstack_pools:
|
||||
- "{{ openstack_glance_pool }}"
|
||||
- "{{ openstack_cinder_pool }}"
|
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
user_config: True
|
||||
copy_admin_key: True
|
||||
test:
|
||||
name: "test"
|
||||
pg_num: "{{ osd_pool_default_pg_num }}"
|
||||
pgp_num: "{{ osd_pool_default_pg_num }}"
|
||||
rule_name: "HDD"
|
||||
type: 1
|
||||
erasure_profile: ""
|
||||
expected_num_objects: ""
|
||||
test2:
|
||||
name: "test2"
|
||||
pg_num: "{{ osd_pool_default_pg_num }}"
|
||||
pgp_num: "{{ osd_pool_default_pg_num }}"
|
||||
rule_name: "HDD"
|
||||
type: 1
|
||||
erasure_profile: ""
|
||||
expected_num_objects: ""
|
||||
pools:
|
||||
- "{{ test }}"
|
||||
- "{{ test2 }}"
|
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
gateway_ip_list: 192.168.1.90
|
||||
generate_crt: True
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
create_crush_tree: True
|
||||
crush_rule_config: True
|
||||
crush_rule_hdd:
|
||||
name: HDD
|
||||
root: HDD
|
||||
type: host
|
||||
default: true
|
||||
crush_rules:
|
||||
- "{{ crush_rule_hdd }}"
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
ceph_osd_docker_run_script_path: /var/tmp
|
||||
osd_objectstore: "bluestore"
|
||||
osd_scenario: lvm
|
||||
lvm_volumes:
|
||||
- data: data-lv1
|
||||
data_vg: test_group
|
||||
- data: data-lv2
|
||||
data_vg: test_group
|
||||
db: journal1
|
||||
db_vg: journals
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
copy_admin_key: True
|
||||
rgw_create_pools:
|
||||
foo:
|
||||
pg_num: 17
|
||||
bar:
|
||||
pg_num: 19
|
|
@ -0,0 +1,30 @@
|
|||
[mons]
|
||||
mon0
|
||||
mon1
|
||||
mon2
|
||||
|
||||
[osds]
|
||||
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
|
||||
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
|
||||
|
||||
[mdss]
|
||||
mds0
|
||||
|
||||
[rgws]
|
||||
rgw0
|
||||
|
||||
#[nfss]
|
||||
#nfs0
|
||||
|
||||
#[clients]
|
||||
#client0
|
||||
#client1
|
||||
|
||||
[rbdmirrors]
|
||||
rbd-mirror0
|
||||
|
||||
[iscsigws]
|
||||
iscsi-gw0
|
||||
|
||||
[all:vars]
|
||||
ansible_python_interpreter=/usr/bin/python3
|
|
@ -0,0 +1,33 @@
|
|||
---
|
||||
|
||||
# DEPLOY CONTAINERIZED DAEMONS
|
||||
docker: True
|
||||
|
||||
# DEFINE THE NUMBER OF VMS TO RUN
|
||||
mon_vms: 3
|
||||
osd_vms: 2
|
||||
mds_vms: 1
|
||||
rgw_vms: 1
|
||||
nfs_vms: 0
|
||||
rbd_mirror_vms: 1
|
||||
client_vms: 0
|
||||
iscsi_gw_vms: 1
|
||||
mgr_vms: 1
|
||||
|
||||
# SUBNETS TO USE FOR THE VMS
|
||||
public_subnet: 192.168.22
|
||||
cluster_subnet: 192.168.23
|
||||
|
||||
# MEMORY
|
||||
# set 1024 for CentOS
|
||||
memory: 1024
|
||||
|
||||
vagrant_box: rhel8-x86_64
|
||||
vagrant_box_url: 'http://drop.front.sepia.ceph.com/vagrant/rhel8-x86_64.box'
|
||||
# The sync directory changes based on vagrant box
|
||||
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
|
||||
#vagrant_sync_dir: /home/vagrant/sync
|
||||
vagrant_sync_dir: /vagrant
|
||||
# Disables synced folder creation. Not needed for testing, will skip mounting
|
||||
# the vagrant directory on the remote box regardless of the provider.
|
||||
vagrant_disable_synced_folder: true
|
|
@ -4,8 +4,8 @@
|
|||
docker: True
|
||||
|
||||
containerized_deployment: True
|
||||
monitor_interface: eth1
|
||||
radosgw_interface: eth1
|
||||
monitor_interface: "{{ 'ens6' if ansible_distribution == 'RedHat' else 'eth1' }}"
|
||||
radosgw_interface: "{{ 'ens6' if ansible_distribution == 'RedHat' else 'eth1' }}"
|
||||
ceph_mon_docker_subnet: "{{ public_network }}"
|
||||
ceph_docker_on_openstack: False
|
||||
public_network: "192.168.30.0/24"
|
||||
|
|
|
@ -22,17 +22,15 @@ class TestMDSs(object):
|
|||
|
||||
def test_mds_is_up(self, node, host):
|
||||
hostname = node["vars"]["inventory_hostname"]
|
||||
if node['docker']:
|
||||
container_binary = 'docker'
|
||||
if host.exists('podman') and host.ansible("setup")["ansible_facts"]["ansible_distribution"] == 'Fedora': # noqa E501
|
||||
container_binary = 'podman'
|
||||
docker_exec_cmd = '{container_binary} exec ceph-mds-{hostname}'.format( # noqa E501
|
||||
container_binary = node['container_binary']
|
||||
if node["docker"]:
|
||||
container_exec_cmd = '{container_binary} exec ceph-mds-{hostname}'.format( # noqa E501
|
||||
hostname=hostname, container_binary=container_binary)
|
||||
else:
|
||||
docker_exec_cmd = ''
|
||||
container_exec_cmd = ''
|
||||
|
||||
cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
|
||||
docker_exec_cmd=docker_exec_cmd,
|
||||
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
|
||||
container_exec_cmd=container_exec_cmd,
|
||||
cluster=node['cluster_name']
|
||||
)
|
||||
cluster_status = json.loads(host.check_output(cmd))
|
||||
|
|
|
@ -23,16 +23,14 @@ class TestMGRs(object):
|
|||
def test_mgr_is_up(self, node, host):
|
||||
hostname = node["vars"]["inventory_hostname"]
|
||||
cluster = node["cluster_name"]
|
||||
container_binary = node["container_binary"]
|
||||
if node['docker']:
|
||||
container_binary = 'docker'
|
||||
if host.exists('podman') and host.ansible("setup")["ansible_facts"]["ansible_distribution"] == 'Fedora': # noqa E501
|
||||
container_binary = 'podman'
|
||||
docker_exec_cmd = '{container_binary} exec ceph-mgr-{hostname}'.format( # noqa E501
|
||||
container_exec_cmd = '{container_binary} exec ceph-mgr-{hostname}'.format( # noqa E501
|
||||
hostname=hostname, container_binary=container_binary)
|
||||
else:
|
||||
docker_exec_cmd = ''
|
||||
cmd = "sudo {docker_exec_cmd} ceph --name mgr.{hostname} --keyring /var/lib/ceph/mgr/{cluster}-{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
|
||||
docker_exec_cmd=docker_exec_cmd,
|
||||
container_exec_cmd = ''
|
||||
cmd = "sudo {container_exec_cmd} ceph --name mgr.{hostname} --keyring /var/lib/ceph/mgr/{cluster}-{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
|
||||
container_exec_cmd=container_exec_cmd,
|
||||
hostname=node["vars"]["inventory_hostname"],
|
||||
cluster=cluster
|
||||
)
|
||||
|
|
|
@ -28,16 +28,14 @@ class TestNFSs(object):
|
|||
def test_nfs_is_up(self, node, host):
|
||||
hostname = node["vars"]["inventory_hostname"]
|
||||
cluster = node['cluster_name']
|
||||
container_binary = node["container_binary"]
|
||||
if node['docker']:
|
||||
container_binary = 'docker'
|
||||
if host.exists('podman') and host.ansible("setup")["ansible_facts"]["ansible_distribution"] == 'Fedora': # noqa E501
|
||||
container_binary = 'podman'
|
||||
docker_exec_cmd = '{container_binary} exec ceph-nfs-{hostname}'.format( # noqa E501
|
||||
container_exec_cmd = '{container_binary} exec ceph-nfs-{hostname}'.format( # noqa E501
|
||||
hostname=hostname, container_binary=container_binary)
|
||||
else:
|
||||
docker_exec_cmd = ''
|
||||
cmd = "sudo {docker_exec_cmd} ceph --name client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
|
||||
docker_exec_cmd=docker_exec_cmd,
|
||||
container_exec_cmd = ''
|
||||
cmd = "sudo {container_exec_cmd} ceph --name client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
|
||||
container_exec_cmd=container_exec_cmd,
|
||||
hostname=hostname,
|
||||
cluster=cluster
|
||||
)
|
||||
|
|
|
@ -74,9 +74,7 @@ class TestOSDs(object):
|
|||
|
||||
@pytest.mark.docker
|
||||
def test_all_docker_osds_are_up_and_in(self, node, host):
|
||||
container_binary = 'docker'
|
||||
if host.exists('podman') and host.ansible("setup")["ansible_facts"]["ansible_distribution"] == 'Fedora': # noqa E501
|
||||
container_binary = 'podman'
|
||||
container_binary = node["container_binary"]
|
||||
osd_id = host.check_output(os.path.join(
|
||||
container_binary + " ps -q --filter='name=ceph-osd' | head -1"))
|
||||
cmd = "sudo {container_binary} exec {osd_id} ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( # noqa E501
|
||||
|
|
|
@ -30,19 +30,17 @@ class TestRbdMirrors(object):
|
|||
def test_rbd_mirror_is_up(self, node, host):
|
||||
hostname = node["vars"]["inventory_hostname"]
|
||||
cluster = node["cluster_name"]
|
||||
container_binary = node["container_binary"]
|
||||
daemons = []
|
||||
if node['docker']:
|
||||
container_binary = 'docker'
|
||||
if host.exists('podman') and host.ansible("setup")["ansible_facts"]["ansible_distribution"] == 'Fedora': # noqa E501
|
||||
container_binary = 'podman'
|
||||
docker_exec_cmd = '{container_binary} exec ceph-rbd-mirror-{hostname}'.format( # noqa E501
|
||||
container_exec_cmd = '{container_binary} exec ceph-rbd-mirror-{hostname}'.format( # noqa E501
|
||||
hostname=hostname, container_binary=container_binary)
|
||||
else:
|
||||
docker_exec_cmd = ''
|
||||
container_exec_cmd = ''
|
||||
hostname = node["vars"]["inventory_hostname"]
|
||||
cluster = node['cluster_name']
|
||||
cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-rbd-mirror --keyring /var/lib/ceph/bootstrap-rbd-mirror/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
|
||||
docker_exec_cmd=docker_exec_cmd,
|
||||
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rbd-mirror --keyring /var/lib/ceph/bootstrap-rbd-mirror/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
|
||||
container_exec_cmd=container_exec_cmd,
|
||||
hostname=hostname,
|
||||
cluster=cluster
|
||||
)
|
||||
|
|
|
@ -30,16 +30,14 @@ class TestRGWs(object):
|
|||
def test_rgw_is_up(self, node, host):
|
||||
hostname = node["vars"]["inventory_hostname"]
|
||||
cluster = node["cluster_name"]
|
||||
container_binary = node["container_binary"]
|
||||
if node['docker']:
|
||||
container_binary = 'docker'
|
||||
if host.exists('podman') and host.ansible("setup")["ansible_facts"]["ansible_distribution"] == 'Fedora': # noqa E501
|
||||
container_binary = 'podman'
|
||||
docker_exec_cmd = '{container_binary} exec ceph-rgw-{hostname}-rgw0'.format( # noqa E501
|
||||
container_exec_cmd = '{container_binary} exec ceph-rgw-{hostname}-rgw0'.format( # noqa E501
|
||||
hostname=hostname, container_binary=container_binary)
|
||||
else:
|
||||
docker_exec_cmd = ''
|
||||
cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
|
||||
docker_exec_cmd=docker_exec_cmd,
|
||||
container_exec_cmd = ''
|
||||
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
|
||||
container_exec_cmd=container_exec_cmd,
|
||||
hostname=hostname,
|
||||
cluster=cluster
|
||||
)
|
||||
|
|
|
@ -39,9 +39,7 @@ class TestRGWs(object):
|
|||
def test_docker_rgw_tuning_pools_are_set(self, node, host):
|
||||
hostname = node["vars"]["inventory_hostname"]
|
||||
cluster = node['cluster_name']
|
||||
container_binary = 'docker'
|
||||
if host.exists('podman') and host.ansible("setup")["ansible_facts"]["ansible_distribution"] == 'Fedora': # noqa E501
|
||||
container_binary = 'podman'
|
||||
container_binary = node["container_binary"]
|
||||
cmd = "sudo {container_binary} exec ceph-rgw-{hostname}-rgw0 ceph --cluster={cluster} -n client.rgw.{hostname}.rgw0 --connect-timeout 5 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring osd dump".format( # noqa E501
|
||||
hostname=hostname,
|
||||
cluster=cluster,
|
||||
|
|
3
tox.ini
3
tox.ini
|
@ -1,6 +1,7 @@
|
|||
[tox]
|
||||
envlist = {dev,rhcs}-{centos,ubuntu}-{container,non_container}-{all_daemons,collocation,update,bluestore_lvm_osds,lvm_osds,shrink_mon,shrink_osd,lvm_batch,add_osds,rgw_multisite,purge,storage_inventory,lvm_auto_discovery}
|
||||
{dev,rhcs}-{centos,ubuntu}-container-{switch_to_containers,ooo_collocation,podman}
|
||||
dev-rhel-container-podman
|
||||
infra_lv_create
|
||||
|
||||
skipsdist = True
|
||||
|
@ -227,6 +228,8 @@ setenv=
|
|||
centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/atomic-host
|
||||
podman: CEPH_ANSIBLE_VAGRANT_BOX = fedora/29-atomic-host
|
||||
ubuntu: CEPH_ANSIBLE_VAGRANT_BOX = ceph/ubuntu-xenial
|
||||
dev-rhel-container-podman: CEPH_ANSIBLE_VAGRANT_BOX = rhel8-x86_64
|
||||
dev-rhel-container-podman: CEPH_ANSIBLE_VAGRANT_BOX_URL = http://drop.front.sepia.ceph.com/vagrant/rhel8-x86_64.box
|
||||
|
||||
# Set the ansible inventory host file to be used according to which distrib we are running on
|
||||
ubuntu: _INVENTORY = hosts-ubuntu
|
||||
|
|
Loading…
Reference in New Issue