tests: add new scenario subset_update

new scenario in order to test the subset upgrade approach using tags.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit fb8a66149b)
pull/6964/head
Guillaume Abrioux 2021-10-20 09:59:48 +02:00
parent ca25ebb323
commit e29defef7d
22 changed files with 340 additions and 4 deletions

View File

@ -0,0 +1 @@
../../../Vagrantfile

View File

@ -0,0 +1,15 @@
{
"ceph_conf_overrides": {
"global": {
"auth_allow_insecure_global_id_reclaim": false,
"osd_pool_default_pg_num": 12,
"osd_pool_default_size": 1,
"mon_allow_pool_size_one": true,
"mon_warn_on_pool_no_redundancy": false,
"mon_max_pg_per_osd": 300
}
}
],
"ceph_mon_docker_memory_limit": "2g",
"radosgw_num_instances": 2
}

View File

@ -0,0 +1 @@
../../../../Vagrantfile

View File

@ -0,0 +1 @@
../ceph-override.json

View File

@ -0,0 +1,36 @@
---
# this is only here to let the CI tests know
# that this scenario is using docker
docker: True
containerized_deployment: True
monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
public_network: "192.168.5.0/24"
cluster_network: "192.168.6.0/24"
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
auth_allow_insecure_global_id_reclaim: false
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
openstack_config: false
docker_pull_timeout: 600s
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
mds_max_mds: 2
# TODO: add monitoring later
dashboard_enabled: false
dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-pacific
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4"

View File

@ -0,0 +1,13 @@
---
user_config: True
copy_admin_key: True
test:
name: "test"
rule_name: "HDD"
size: 1
test2:
name: "test2"
size: 1
pools:
- "{{ test }}"
- "{{ test2 }}"

View File

@ -0,0 +1,2 @@
---
generate_crt: True

View File

@ -0,0 +1,3 @@
---
create_crush_tree: false
crush_rule_config: false

View File

@ -0,0 +1,6 @@
---
osd_objectstore: "bluestore"
devices:
- /dev/sda
- /dev/sdb
- /dev/sdc

View File

@ -0,0 +1,8 @@
---
copy_admin_key: True
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16

View File

@ -0,0 +1,17 @@
[mons]
mon0 monitor_address=192.168.5.10
mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
mon2 monitor_address=192.168.5.12
[mgrs]
mon0
mon1
[osds]
osd0
osd1
osd2
[rgws]
rgw0
rgw1

View File

@ -0,0 +1,61 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: True
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 3
osd_vms: 3
mds_vms: 0
rgw_vms: 2
nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.5
cluster_subnet: 192.168.6
# MEMORY
# set 1024 for CentOS
memory: 1024
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sda', '/dev/sdb' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/atomic-host
#client_vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box

View File

@ -0,0 +1,27 @@
---
ceph_origin: repository
ceph_repository: community
public_network: "192.168.3.0/24"
cluster_network: "192.168.4.0/24"
radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_conf_overrides:
global:
auth_allow_insecure_global_id_reclaim: false
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
openstack_config: false
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
mds_max_mds: 2
# TODO: add monitoring later
dashboard_enabled: false
dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4"
grafana_server_group_name: ceph_monitoring

View File

@ -0,0 +1,13 @@
---
copy_admin_key: True
user_config: True
test:
name: "test"
rule_name: "HDD"
size: 1
test2:
name: "test2"
size: 1
pools:
- "{{ test }}"
- "{{ test2 }}"

View File

@ -0,0 +1,2 @@
---
generate_crt: True

View File

@ -0,0 +1,3 @@
---
create_crush_tree: false
crush_rule_config: false

View File

@ -0,0 +1,10 @@
copy_admin_key: true
nfs_file_gw: false
nfs_obj_gw: true
ganesha_conf_overrides: |
CACHEINODE {
Entries_HWMark = 100000;
}
nfs_ganesha_stable: true
nfs_ganesha_dev: false
nfs_ganesha_flavor: "ceph_master"

View File

@ -0,0 +1,8 @@
---
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
osd_objectstore: "bluestore"
devices:
- /dev/sda
- /dev/sdb
- /dev/sdc

View File

@ -0,0 +1,9 @@
copy_admin_key: true
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400

View File

@ -0,0 +1,18 @@
[mons]
mon0 monitor_address=192.168.3.10
mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
mon2 monitor_address=192.168.3.12
[mgrs]
mon0
mon1
[osds]
osd0
osd1
osd2
[rgws]
rgw0
rgw1

View File

@ -0,0 +1,74 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 3
osd_vms: 3
mds_vms: 0
rgw_vms: 2
nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.3
cluster_subnet: 192.168.4
# MEMORY
# set 1024 for CentOS
memory: 1024
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sdb', '/dev/sdc' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
# VM prefix name, need to match the hostname
# label_prefix: ceph

View File

@ -38,14 +38,22 @@ commands=
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
# use the stable-5.0 branch to deploy an octopus cluster
git clone -b stable-5.0 --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {changedir}/{env:INVENTORY} {envdir}/tmp/ceph-ansible/tests/functional/setup.yml'
# deploy the cluster
bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {changedir}/{env:INVENTORY} {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_docker_registry=quay.ceph.io \
ceph_docker_image=ceph-ci/daemon \
ceph_docker_image_tag=latest-octopus \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
"'
# upgrade mons
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=mons --extra-vars "\
@ -91,6 +99,6 @@ commands=
"
bash -c "CEPH_STABLE_RELEASE=quincy py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
bash -c "CEPH_STABLE_RELEASE=pacific py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
vagrant destroy --force