mirror of https://github.com/ceph/ceph-ansible.git
tests: adds a testing scenario for lv-create and lv-teardown
Using an explicitly named testing environment name allows us to have a
specific [testenv] block for this test. This greatly simplifies how it will
work as it doesn't really anything from the ceph cluster tests.
Signed-off-by: Andrew Schoen <aschoen@redhat.com>
(cherry picked from commit 810cc47892
)
Signed-off-by: Sébastien Han <seb@redhat.com>
pull/3033/head
parent
634cc14393
commit
c119150946
|
@ -0,0 +1 @@
|
||||||
|
../../../../../Vagrantfile
|
|
@ -0,0 +1,33 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
logfile_path: ./lv-create.log
|
||||||
|
# Path of nvme device primed for LV creation for journals and data. Only one NVMe device is allowed at a time. Providing a list will not work in this case.
|
||||||
|
nvme_device: /dev/sdb
|
||||||
|
|
||||||
|
# Path of hdd devices designated for LV creation.
|
||||||
|
hdd_devices:
|
||||||
|
- /dev/sdc
|
||||||
|
|
||||||
|
journal_size: 1024
|
||||||
|
|
||||||
|
# This var is a list of bucket index LVs created on the NVMe device. We recommend one be created but you can add others
|
||||||
|
nvme_device_lvs:
|
||||||
|
- lv_name: "ceph-bucket-index-1"
|
||||||
|
size: 100%FREE
|
||||||
|
journal_name: "ceph-journal-bucket-index-1-{{ nvme_device_basename }}"
|
||||||
|
|
||||||
|
## TYPICAL USERS WILL NOT NEED TO CHANGE VARS FROM HERE DOWN ##
|
||||||
|
|
||||||
|
# all hdd's have to be the same size and the LVs on them are dedicated for OSD data
|
||||||
|
hdd_lv_size: 100%FREE
|
||||||
|
|
||||||
|
# Since this playbook can be run multiple times across different devices, {{ var.split('/')[-1] }} is used quite frequently in this play-book.
|
||||||
|
# This is used to strip the device name away from its path (ex: sdc from /dev/sdc) to differenciate the names of vgs, journals, or lvs if the prefixes are not changed across multiple runs.
|
||||||
|
nvme_device_basename: "{{ nvme_device.split('/')[-1] }}"
|
||||||
|
|
||||||
|
# Only one volume group is created in the playbook for all the LVs on NVMe. This volume group takes up the entire device specified in "nvme_device".
|
||||||
|
nvme_vg_name: "ceph-nvme-vg-{{ nvme_device_basename }}"
|
||||||
|
|
||||||
|
hdd_vg_prefix: "ceph-hdd-vg"
|
||||||
|
hdd_lv_prefix: "ceph-hdd-lv"
|
||||||
|
hdd_journal_prefix: "ceph-journal"
|
|
@ -0,0 +1,2 @@
|
||||||
|
[osds]
|
||||||
|
osd0
|
|
@ -0,0 +1,73 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
# DEPLOY CONTAINERIZED DAEMONS
|
||||||
|
docker: false
|
||||||
|
|
||||||
|
# DEFINE THE NUMBER OF VMS TO RUN
|
||||||
|
mon_vms: 0
|
||||||
|
osd_vms: 1
|
||||||
|
mds_vms: 0
|
||||||
|
rgw_vms: 0
|
||||||
|
nfs_vms: 0
|
||||||
|
rbd_mirror_vms: 0
|
||||||
|
client_vms: 0
|
||||||
|
iscsi_gw_vms: 0
|
||||||
|
mgr_vms: 0
|
||||||
|
|
||||||
|
# Deploy RESTAPI on each of the Monitors
|
||||||
|
restapi: true
|
||||||
|
|
||||||
|
# INSTALL SOURCE OF CEPH
|
||||||
|
# valid values are 'stable' and 'dev'
|
||||||
|
ceph_install_source: stable
|
||||||
|
|
||||||
|
# SUBNETS TO USE FOR THE VMS
|
||||||
|
public_subnet: 192.168.39
|
||||||
|
cluster_subnet: 192.168.40
|
||||||
|
|
||||||
|
# MEMORY
|
||||||
|
# set 1024 for CentOS
|
||||||
|
memory: 512
|
||||||
|
|
||||||
|
# Ethernet interface name
|
||||||
|
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
|
||||||
|
eth: 'eth1'
|
||||||
|
|
||||||
|
# Disks
|
||||||
|
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
|
||||||
|
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
|
||||||
|
disks: "[ '/dev/sdb', '/dev/sdc' ]"
|
||||||
|
|
||||||
|
# VAGRANT BOX
|
||||||
|
# Ceph boxes are *strongly* suggested. They are under better control and will
|
||||||
|
# not get updated frequently unless required for build systems. These are (for
|
||||||
|
# now):
|
||||||
|
#
|
||||||
|
# * ceph/ubuntu-xenial
|
||||||
|
#
|
||||||
|
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
|
||||||
|
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
|
||||||
|
# libvirt CentOS: centos/7
|
||||||
|
# parallels Ubuntu: parallels/ubuntu-14.04
|
||||||
|
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
|
||||||
|
# For more boxes have a look at:
|
||||||
|
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
|
||||||
|
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
|
||||||
|
vagrant_box: centos/7
|
||||||
|
#ssh_private_key_path: "~/.ssh/id_rsa"
|
||||||
|
# The sync directory changes based on vagrant box
|
||||||
|
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
|
||||||
|
#vagrant_sync_dir: /home/vagrant/sync
|
||||||
|
vagrant_sync_dir: /vagrant
|
||||||
|
# Disables synced folder creation. Not needed for testing, will skip mounting
|
||||||
|
# the vagrant directory on the remote box regardless of the provider.
|
||||||
|
vagrant_disable_synced_folder: true
|
||||||
|
# VAGRANT URL
|
||||||
|
# This is a URL to download an image from an alternate location. vagrant_box
|
||||||
|
# above should be set to the filename of the image.
|
||||||
|
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
|
||||||
|
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
|
||||||
|
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
|
||||||
|
|
||||||
|
os_tuning_params:
|
||||||
|
- { name: fs.file-max, value: 26234859 }
|
36
tox.ini
36
tox.ini
|
@ -1,9 +1,43 @@
|
||||||
[tox]
|
[tox]
|
||||||
envlist = {dev,jewel,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation}
|
envlist = {dev,jewel,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation}
|
||||||
{dev,luminous,mimic,rhcs}-{filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container}
|
{dev,luminous,mimic,rhcs}-{filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container,lvm_batch}
|
||||||
|
infra_lv_create
|
||||||
|
|
||||||
skipsdist = True
|
skipsdist = True
|
||||||
|
|
||||||
|
# a test scenario for the lv-create.yml and lv-teardown playbooks
|
||||||
|
[testenv:infra_lv_create]
|
||||||
|
whitelist_externals =
|
||||||
|
vagrant
|
||||||
|
bash
|
||||||
|
cp
|
||||||
|
mkdir
|
||||||
|
passenv=*
|
||||||
|
setenv=
|
||||||
|
ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
|
||||||
|
ANSIBLE_CONFIG = -F {toxinidir}/ansible.cfg
|
||||||
|
ANSIBLE_ACTION_PLUGINS = {toxinidir}/plugins/actions
|
||||||
|
ANSIBLE_CALLBACK_PLUGINS = {toxinidir}/plugins/callback
|
||||||
|
ANSIBLE_CALLBACK_WHITELIST = profile_tasks
|
||||||
|
# only available for ansible >= 2.2
|
||||||
|
ANSIBLE_STDOUT_CALLBACK = debug
|
||||||
|
deps= -r{toxinidir}/tests/requirements.txt
|
||||||
|
changedir={toxinidir}/tests/functional/centos/7/infra_lv_create
|
||||||
|
commands=
|
||||||
|
vagrant up --no-provision {posargs:--provider=virtualbox}
|
||||||
|
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
|
||||||
|
|
||||||
|
cp {toxinidir}/infrastructure-playbooks/lv-create.yml {toxinidir}/lv-create.yml
|
||||||
|
mkdir -p {toxinidir}/templates
|
||||||
|
cp {toxinidir}/infrastructure-playbooks/templates/lv-create-log.j2 {toxinidir}/templates/lv-create-log.j2
|
||||||
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/lv-create.yml
|
||||||
|
|
||||||
|
|
||||||
|
cp {toxinidir}/infrastructure-playbooks/lv-teardown.yml {toxinidir}/lv-teardown.yml
|
||||||
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/lv-teardown.yml --extra-vars "ireallymeanit=yes"
|
||||||
|
|
||||||
|
vagrant destroy --force
|
||||||
|
|
||||||
# extra commands for purging clusters
|
# extra commands for purging clusters
|
||||||
# that purge the cluster and then set it up again to
|
# that purge the cluster and then set it up again to
|
||||||
# ensure that a purge can clear nodes well enough that they
|
# ensure that a purge can clear nodes well enough that they
|
||||||
|
|
Loading…
Reference in New Issue