ci: new osd scenarios

This commit add new osd scenarios, it aims to simplify the CI setup and
brings a better coverage on the OSD scenarios.
We decided to differentiate between filestore and bluestore, thinking
ahead when filestore won't be supported anymore.
So we now have two classes of tests:

* Filestore
* Bluestore

In each of those classes we have container and non-container.
Then for each we test the following:

* collocated
* collocated dmcrypt
* non-collocated
* non-collocated dmcrypt
* auto discovery collocated
* auto discovery collocated dmcrypt

This gives us a nice coverage and also reduces the footprint on the CI.
We are now up to 4 scenarios, each containing 6 OSD VMs.

Signed-off-by: Sébastien Han <seb@redhat.com>
pull/2000/head
Sébastien Han 2017-10-05 16:22:04 +02:00
parent 7ee9aa94b5
commit a53aa9e8b4
96 changed files with 178 additions and 1105 deletions

View File

@ -134,6 +134,7 @@
register: devices_prepare_canonicalize
when:
- inventory_hostname in groups.get(osd_group_name, [])
- not osd_auto_discovery|default(False)
- name: set_fact build devices from resolved symlinks
set_fact:
@ -141,10 +142,11 @@
with_items: "{{ devices_prepare_canonicalize.results }}"
when:
- inventory_hostname in groups.get(osd_group_name, [])
- not osd_auto_discovery|default(False)
- name: set_fact build final devices list
set_fact:
devices: "{{ devices | reject('search','/dev/disk') | list | unique }}"
when:
- inventory_hostname in groups.get(osd_group_name, [])
- not osd_auto_discovery|default(False)

View File

@ -97,9 +97,9 @@
- not osd_auto_discovery
- devices is string
- name: verify journal devices have been provided
- name: verify dedicated devices have been provided
fail:
msg: "please provide devices and raw journal devices to your osd scenario"
msg: "please provide devices and dedicated_devices to your osd scenario"
when:
- osd_group_name is defined
- osd_group_name in group_names

View File

@ -13,6 +13,7 @@
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /dev:/dev \
-v /etc/localtime:/etc/localtime:ro \
-e DEBUG=verbose \
-e CLUSTER={{ cluster }} \
-e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
-e OSD_DEVICE={{ item.1 }} \
@ -39,6 +40,7 @@
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /dev:/dev \
-v /etc/localtime:/etc/localtime:ro \
-e DEBUG=verbose \
-e CLUSTER={{ cluster }} \
-e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
-e OSD_DEVICE=/dev/{{ item.key }} \

View File

@ -13,6 +13,7 @@
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /dev:/dev \
-v /etc/localtime:/etc/localtime:ro \
-e DEBUG=verbose \
-e CLUSTER={{ cluster }} \
-e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
-e OSD_DEVICE={{ item.1 }} \
@ -41,6 +42,7 @@
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /dev:/dev \
-v /etc/localtime:/etc/localtime:ro \
-e DEBUG=verbose \
-e CLUSTER={{ cluster }} \
-e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
-e OSD_DEVICE={{ item.1 }} \

View File

@ -1,29 +0,0 @@
---
ceph_origin: repository
ceph_repository: community
cluster: test
monitor_interface: eth1
radosgw_interface: eth1
public_network: "192.168.35.0/24"
cluster_network: "192.168.36.0/24"
journal_size: 100
osd_objectstore: "bluestore"
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- '/dev/sdb'
dedicated_devices:
- '/dev/sdc'
- '/dev/sdc'
osd_scenario: non-collocated
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
user_config: True
openstack_config: True
ceph_conf_overrides:
global:
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864
bluestore block wal size = 1048576000

View File

@ -1,19 +0,0 @@
[mons]
mon0 monitor_address=192.168.35.10
mon1 monitor_interface=eth1
mon2 monitor_address=192.168.35.12
[mgrs]
mon0
[osds]
osd0
[mdss]
mds0
[rgws]
rgw0
[clients]
client0

View File

@ -1,27 +0,0 @@
---
ceph_origin: repository
ceph_repository: community
public_network: "192.168.11.0/24"
cluster_network: "192.168.12.0/24"
journal_size: 100
monitor_interface: eth1
radosgw_interface: eth1
osd_scenario: non-collocated
dmcrypt: true
osd_objectstore: "bluestore"
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- '/dev/sdb'
dedicated_devices:
- '/dev/sdc'
- '/dev/sdc'
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864
bluestore block wal size = 1048576000

View File

@ -1,8 +0,0 @@
[mons]
mon0
[mgrs]
mon0
[osds]
osd0

View File

@ -1,24 +0,0 @@
---
ceph_origin: repository
ceph_repository: community
public_network: "192.168.13.0/24"
cluster_network: "192.168.14.0/24"
journal_size: 100
monitor_interface: eth1
radosgw_interface: eth1
osd_scenario: collocated
dmcrypt: true
osd_objectstore: bluestore
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- '/dev/sdb'
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864
bluestore block wal size = 1048576000

View File

@ -1,8 +0,0 @@
[mons]
mon0
[mgrs]
mon0
[osds]
osd0

View File

@ -1,25 +0,0 @@
---
docker: true
containerized_deployment: True
ceph_mon_docker_subnet: "{{ public_network }}"
public_network: "192.168.23.0/24"
cluster_network: "192.168.24.0/24"
journal_size: 100
monitor_interface: eth1
radosgw_interface: eth1
osd_scenario: collocated
dmcrypt: true
osd_objectstore: bluestore
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- '/dev/sdb'
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864
bluestore block wal size = 1048576000
ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1

View File

@ -1,8 +0,0 @@
[mons]
mon0
[osds]
osd0
[mgrs]
mon0

View File

@ -1,74 +0,0 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: true
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# Deploy RESTAPI on each of the Monitors
restapi: true
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.23
cluster_subnet: 192.168.24
# MEMORY
# set 1024 for CentOS
memory: 512
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sdb', '/dev/sdc' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -1 +0,0 @@
../../../../../Vagrantfile

View File

@ -1 +0,0 @@
../cluster/ceph-override.json

View File

@ -1,25 +0,0 @@
---
# this is only here to let the CI tests know
# that this scenario is using docker
docker: True
containerized_deployment: True
cluster: test
monitor_interface: eth1
radosgw_interface: eth1
ceph_mon_docker_subnet: "{{ public_network }}"
journal_size: 100
ceph_docker_on_openstack: False
public_network: "192.168.15.0/24"
cluster_network: "192.168.16.0/24"
ceph_rgw_civetweb_port: 8080
osd_scenario: non-collocated
osd_objectstore: bluestore
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- /dev/sdb
dedicated_devices:
- /dev/sdc
- /dev/sdc
ceph_osd_docker_prepare_env: -e OSD_FORCE_ZAP=1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -1,8 +0,0 @@
[mons]
mon0
[mgrs]
mon0
[osds]
osd0

View File

@ -1,52 +0,0 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: True
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# Deploy RESTAPI on each of the Monitors
restapi: true
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.15
cluster_subnet: 192.168.16
# MEMORY
# set 1024 for CentOS
memory: 1024
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sda', '/dev/sdb' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# NOTE(leseb): we use centos for this scenario since we at least need Docker version 1.12.5
# which is not available in Atomic Host.
# There are bug like this one: https://github.com/docker/docker/issues/12694
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true

View File

@ -1 +0,0 @@
../../../../../Vagrantfile

View File

@ -1 +0,0 @@
../cluster/ceph-override.json

View File

@ -1,24 +0,0 @@
---
ceph_origin: repository
ceph_repository: community
cluster: test
monitor_interface: eth1
radosgw_interface: eth1
public_network: "192.168.3.0/24"
cluster_network: "192.168.4.0/24"
journal_size: 100
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- '/dev/sdb'
osd_scenario: collocated
osd_objectstore: "bluestore"
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864
bluestore block wal size = 1048576000

View File

@ -1,4 +0,0 @@
---
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -1,8 +0,0 @@
[mons]
mon0 monitor_interface=eth1
[mgrs]
mon0
[osds]
osd0

View File

@ -1,77 +0,0 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# Deploy RESTAPI on each of the Monitors
restapi: true
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.3
cluster_subnet: 192.168.4
# MEMORY
# set 1024 for CentOS
memory: 512
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sdb', '/dev/sdc' ]"
# VM disk size in MB
disk_size: 41000
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -0,0 +1,15 @@
---
docker: true
containerized_deployment: true
cluster: test
monitor_interface: eth1
public_network: "192.168.35.0/24"
cluster_network: "192.168.36.0/24"
ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
ceph_conf_overrides:
global:
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864
bluestore block wal size = 1048576000

View File

@ -0,0 +1,13 @@
[mons]
mon0 monitor_address=192.168.35.10
[mgrs]
mon0
[osds]
osd0 osd_scenario="collocated" osd_objectstore="bluestore" devices="['/dev/disk/by-path/pci-0000:00:01.1-ata-1.0', '/dev/sdb']"
osd1 osd_scenario="collocated" osd_objectstore="bluestore" devices="['/dev/disk/by-path/pci-0000:00:01.1-ata-1.0', '/dev/sdb']" dmcrypt=True
osd2 osd_scenario="non-collocated" osd_objectstore="bluestore" devices="['/dev/disk/by-path/pci-0000:00:01.1-ata-1.0', '/dev/sdb']" dedicated_devices="['/dev/sdc', '/dev/sdc']"
osd3 osd_scenario="non-collocated" osd_objectstore="bluestore" devices="['/dev/disk/by-path/pci-0000:00:01.1-ata-1.0', '/dev/sdb']" dedicated_devices="['/dev/sdc', '/dev/sdc']" dmcrypt=True
osd4 osd_scenario="collocated" osd_objectstore="bluestore" osd_auto_discovery=True
osd5 osd_scenario="collocated" osd_objectstore="bluestore" osd_auto_discovery=True dmcrypt=True

View File

@ -4,13 +4,13 @@
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 3
osd_vms: 1
mds_vms: 1
rgw_vms: 1
mon_vms: 1
osd_vms: 6
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 1
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
@ -27,7 +27,7 @@ cluster_subnet: 192.168.36
# MEMORY
# set 1024 for CentOS
memory: 512
memory: 1024
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial

View File

@ -0,0 +1,14 @@
---
ceph_origin: repository
ceph_repository: community
cluster: test
monitor_interface: eth1
public_network: "192.168.45.0/24"
cluster_network: "192.168.46.0/24"
ceph_conf_overrides:
global:
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864
bluestore block wal size = 1048576000

View File

@ -0,0 +1,2 @@
---
copy_admin_key: true

View File

@ -0,0 +1,8 @@
copy_admin_key: true
create_pools:
foo:
pg_num: 17
bar:
pg_num: 19
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400

View File

@ -0,0 +1,14 @@
[mons]
mon0 monitor_address=192.168.45.10
[mgrs]
mon0
[osds]
osd0 osd_scenario="collocated" osd_objectstore="bluestore" devices="['/dev/disk/by-path/pci-0000:00:01.1-ata-1.0', '/dev/sdb']"
osd1 osd_scenario="collocated" osd_objectstore="bluestore" devices="['/dev/disk/by-path/pci-0000:00:01.1-ata-1.0', '/dev/sdb']" dmcrypt=True
osd2 osd_scenario="non-collocated" osd_objectstore="bluestore" devices="['/dev/disk/by-path/pci-0000:00:01.1-ata-1.0', '/dev/sdb']" dedicated_devices="['/dev/sdc', '/dev/sdc']"
osd3 osd_scenario="non-collocated" osd_objectstore="bluestore" devices="['/dev/disk/by-path/pci-0000:00:01.1-ata-1.0', '/dev/sdb']" dedicated_devices="['/dev/sdc', '/dev/sdc']" dmcrypt=True
osd4 osd_scenario="collocated" osd_objectstore="bluestore" osd_auto_discovery=True
osd5 osd_scenario="collocated" osd_objectstore="bluestore" osd_auto_discovery=True dmcrypt=True

View File

@ -5,7 +5,7 @@ docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
osd_vms: 6
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
@ -22,12 +22,12 @@ restapi: true
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.11
cluster_subnet: 192.168.12
public_subnet: 192.168.45
cluster_subnet: 192.168.46
# MEMORY
# set 1024 for CentOS
memory: 512
memory: 1024
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial

View File

@ -1 +0,0 @@
../../../../../Vagrantfile

View File

@ -1 +0,0 @@
../cluster/ceph-override.json

View File

@ -1,24 +0,0 @@
---
ceph_origin: repository
ceph_repository: community
public_network: "192.168.21.0/24"
cluster_network: "192.168.22.0/24"
journal_size: 100
monitor_interface: eth1
radosgw_interface: eth1
osd_scenario: non-collocated
dmcrypt: true
osd_objectstore: filestore
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- '/dev/sdb'
dedicated_devices:
- '/dev/sdc'
- '/dev/sdc'
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_size: 1

View File

@ -1,4 +0,0 @@
---
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -1,8 +0,0 @@
[mons]
mon0
[mgrs]
mon0
[osds]
osd0

View File

@ -1 +0,0 @@
../../../../../Vagrantfile

View File

@ -1 +0,0 @@
../cluster/ceph-override.json

View File

@ -1,21 +0,0 @@
---
ceph_origin: repository
ceph_repository: community
public_network: "192.168.25.0/24"
cluster_network: "192.168.26.0/24"
journal_size: 100
monitor_interface: eth1
radosgw_interface: eth1
osd_scenario: collocated
osd_objectstore: filestore
dmcrypt: true
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- '/dev/sdb'
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_size: 1

View File

@ -1,4 +0,0 @@
---
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -1,8 +0,0 @@
[mons]
mon0
[mgrs]
mon0
[osds]
osd0

View File

@ -1,74 +0,0 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# Deploy RESTAPI on each of the Monitors
restapi: true
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.25
cluster_subnet: 192.168.26
# MEMORY
# set 1024 for CentOS
memory: 512
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sdb', '/dev/sdc' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -1 +0,0 @@
../../../../../Vagrantfile

View File

@ -1 +0,0 @@
../cluster/ceph-override.json

View File

@ -1,22 +0,0 @@
---
# this is only here to let the CI tests know
# that this scenario is using docker
docker: True
containerized_deployment: True
cluster: ceph
monitor_interface: eth1
radosgw_interface: eth1
ceph_mon_docker_subnet: "{{ public_network }}"
journal_size: 100
ceph_docker_on_openstack: False
public_network: "192.168.27.0/24"
cluster_network: "192.168.28.0/24"
osd_scenario: collocated
osd_objectstore: filestore
dmcrypt: true
ceph_rgw_civetweb_port: 8080
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- /dev/sdb
ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1

View File

@ -1,8 +0,0 @@
[mons]
mon0
[mgrs]
mon0
[osds]
osd0

View File

@ -1,52 +0,0 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: True
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# Deploy RESTAPI on each of the Monitors
restapi: true
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.27
cluster_subnet: 192.168.28
# MEMORY
# set 1024 for CentOS
memory: 1024
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sda', '/dev/sdb' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# NOTE(leseb): we use centos for this scenario since we at least need Docker version 1.12.5
# which is not available in Atomic Host.
# There are bug like this one: https://github.com/docker/docker/issues/12694
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true

View File

@ -1 +0,0 @@
../../../../../Vagrantfile

View File

@ -1 +0,0 @@
../cluster/ceph-override.json

View File

@ -1,25 +0,0 @@
---
# this is only here to let the CI tests know
# that this scenario is using docker
docker: True
containerized_deployment: True
cluster: test
monitor_interface: eth1
radosgw_interface: eth1
ceph_mon_docker_subnet: "{{ public_network }}"
journal_size: 100
ceph_docker_on_openstack: False
public_network: "192.168.29.0/24"
cluster_network: "192.168.30.0/24"
ceph_rgw_civetweb_port: 8080
osd_objectstore: filestore
osd_scenario: non-collocated
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- /dev/sdb
dedicated_devices:
- /dev/sdc
- /dev/sdc
ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -1,8 +0,0 @@
[mons]
mon0
[mgrs]
mon0
[osds]
osd0

View File

@ -1,52 +0,0 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: True
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# Deploy RESTAPI on each of the Monitors
restapi: true
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.29
cluster_subnet: 192.168.30
# MEMORY
# set 1024 for CentOS
memory: 1024
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sda', '/dev/sdb' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# NOTE(leseb): we use centos for this scenario since we at least need Docker version 1.12.5
# which is not available in Atomic Host.
# There are bug like this one: https://github.com/docker/docker/issues/12694
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true

View File

@ -0,0 +1,15 @@
---
docker: true
containerized_deployment: true
cluster: test
monitor_interface: eth1
public_network: "192.168.55.0/24"
cluster_network: "192.168.56.0/24"
ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
ceph_conf_overrides:
global:
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864
bluestore block wal size = 1048576000

View File

@ -0,0 +1,2 @@
---
copy_admin_key: true

View File

@ -0,0 +1,8 @@
copy_admin_key: true
create_pools:
foo:
pg_num: 17
bar:
pg_num: 19
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400

View File

@ -0,0 +1,13 @@
[mons]
mon0 monitor_address=192.168.55.10
[mgrs]
mon0
[osds]
osd0 osd_scenario="collocated" osd_objectstore="filestore" devices="['/dev/disk/by-path/pci-0000:00:01.1-ata-1.0', '/dev/sdb']" journal_size=100
osd1 osd_scenario="collocated" osd_objectstore="filestore" devices="['/dev/disk/by-path/pci-0000:00:01.1-ata-1.0', '/dev/sdb']" journal_size=100 dmcrypt=True
osd2 osd_scenario="non-collocated" osd_objectstore="filestore" devices="['/dev/disk/by-path/pci-0000:00:01.1-ata-1.0', '/dev/sdb']" dedicated_devices="['/dev/sdc', '/dev/sdc']" journal_size=100
osd3 osd_scenario="non-collocated" osd_objectstore="filestore" devices="['/dev/disk/by-path/pci-0000:00:01.1-ata-1.0', '/dev/sdb']" dedicated_devices="['/dev/sdc', '/dev/sdc']" journal_size=100 dmcrypt=True
osd4 osd_scenario="collocated" osd_objectstore="filestore" osd_auto_discovery=True journal_size=100
osd5 osd_scenario="collocated" osd_objectstore="filestore" osd_auto_discovery=True journal_size=100 dmcrypt=True

View File

@ -5,7 +5,7 @@ docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
osd_vms: 6
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
@ -22,12 +22,12 @@ restapi: true
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.13
cluster_subnet: 192.168.14
public_subnet: 192.168.55
cluster_subnet: 192.168.56
# MEMORY
# set 1024 for CentOS
memory: 512
memory: 1024
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial

View File

@ -0,0 +1,14 @@
---
ceph_origin: repository
ceph_repository: community
cluster: test
monitor_interface: eth1
public_network: "192.168.65.0/24"
cluster_network: "192.168.66.0/24"
ceph_conf_overrides:
global:
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864
bluestore block wal size = 1048576000

View File

@ -0,0 +1,2 @@
---
copy_admin_key: true

View File

@ -0,0 +1,8 @@
copy_admin_key: true
create_pools:
foo:
pg_num: 17
bar:
pg_num: 19
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400

View File

@ -0,0 +1,13 @@
[mons]
mon0 monitor_address=192.168.65.10
[mgrs]
mon0
[osds]
osd0 osd_scenario="collocated" osd_objectstore="filestore" devices="['/dev/disk/by-path/pci-0000:00:01.1-ata-1.0', '/dev/sdb']" journal_size=100
osd1 osd_scenario="collocated" osd_objectstore="filestore" devices="['/dev/disk/by-path/pci-0000:00:01.1-ata-1.0', '/dev/sdb']" journal_size=100 dmcrypt=True
osd2 osd_scenario="non-collocated" osd_objectstore="filestore" devices="['/dev/disk/by-path/pci-0000:00:01.1-ata-1.0', '/dev/sdb']" dedicated_devices="['/dev/sdc', '/dev/sdc']" journal_size=100
osd3 osd_scenario="non-collocated" osd_objectstore="filestore" devices="['/dev/disk/by-path/pci-0000:00:01.1-ata-1.0', '/dev/sdb']" dedicated_devices="['/dev/sdc', '/dev/sdc']" journal_size=100 dmcrypt=True
osd4 osd_scenario="collocated" osd_objectstore="filestore" osd_auto_discovery=True journal_size=100
osd5 osd_scenario="collocated" osd_objectstore="filestore" osd_auto_discovery=True journal_size=100 dmcrypt=True

View File

@ -5,7 +5,7 @@ docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
osd_vms: 6
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
@ -22,12 +22,12 @@ restapi: true
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.21
cluster_subnet: 192.168.22
public_subnet: 192.168.65
cluster_subnet: 192.168.66
# MEMORY
# set 1024 for CentOS
memory: 512
memory: 1024
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial

View File

@ -1 +0,0 @@
../../../../../Vagrantfile

View File

@ -1 +0,0 @@
../cluster/ceph-override.json

View File

@ -1,20 +0,0 @@
---
ceph_origin: repository
ceph_repository: community
cluster: test
public_network: "192.168.37.0/24"
cluster_network: "192.168.38.0/24"
monitor_interface: eth1
radosgw_interface: eth1
journal_size: 100
osd_objectstore: filestore
dmcrypt: true
osd_auto_discovery: true
osd_scenario: collocated
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_size: 1

View File

@ -1,4 +0,0 @@
---
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -1,8 +0,0 @@
[mons]
mon0
[mgrs]
mon0
[osds]
osd0

View File

@ -1,69 +0,0 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# Deploy RESTAPI on each of the Monitors
restapi: true
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.37
cluster_subnet: 192.168.38
# MEMORY
# set 1024 for CentOS
memory: 512
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -1 +0,0 @@
../../../../../Vagrantfile

View File

@ -1 +0,0 @@
../cluster/ceph-override.json

View File

@ -1,19 +0,0 @@
---
ceph_origin: repository
ceph_repository: community
cluster: test
public_network: "192.168.33.0/24"
cluster_network: "192.168.34.0/24"
monitor_interface: eth1
radosgw_interface: eth1
journal_size: 100
osd_objectstore: filestore
osd_auto_discovery: true
osd_scenario: collocated
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_size: 1

View File

@ -1,4 +0,0 @@
---
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -1,8 +0,0 @@
[mons]
mon0
[mgrs]
mon0
[osds]
osd0

View File

@ -1,69 +0,0 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# Deploy RESTAPI on each of the Monitors
restapi: true
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.33
cluster_subnet: 192.168.34
# MEMORY
# set 1024 for CentOS
memory: 512
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -1 +0,0 @@
../../../../../Vagrantfile

View File

@ -1 +0,0 @@
../cluster/ceph-override.json

View File

@ -1,21 +0,0 @@
---
ceph_origin: repository
ceph_repository: community
cluster: test
public_network: "192.168.31.0/24"
cluster_network: "192.168.32.0/24"
monitor_interface: eth1
radosgw_interface: eth1
journal_size: 100
osd_objectstore: filestore
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- '/dev/sdb'
osd_scenario: collocated
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_size: 1

View File

@ -1,4 +0,0 @@
---
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -1,8 +0,0 @@
[mons]
mon0
[mgrs]
mon0
[osds]
osd0

View File

@ -1,74 +0,0 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# Deploy RESTAPI on each of the Monitors
restapi: true
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.31
cluster_subnet: 192.168.32
# MEMORY
# set 1024 for CentOS
memory: 512
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sdb', '/dev/sdc' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

38
tox.ini
View File

@ -1,6 +1,6 @@
[tox]
envlist = {dev,jewel,luminous,rhcs}-{ansible2.2,ansible2.3}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,docker_dedicated_journal,docker_dmcrypt_journal_collocation,update_dmcrypt,update_cluster,cluster,purge_docker_cluster,update_docker_cluster,switch_to_containers}
{dev,luminous}-{ansible2.2,ansible2.3}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,journal_collocation_auto,journal_collocation_auto_dmcrypt,shrink_mon_container,shrink_osd_container,docker_cluster_collocation}
envlist = {dev,jewel,luminous,rhcs}-{ansible2.2,ansible2.3}-{xenial_cluster,centos7_cluster,docker_cluster,purge_cluster,purge_dmcrypt,update_dmcrypt,update_cluster,cluster,purge_docker_cluster,update_docker_cluster,switch_to_containers}
{dev,luminous}-{ansible2.2,ansible2.3}-{filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation}
skipsdist = True
@ -127,14 +127,11 @@ setenv=
# only available for ansible >= 2.2
ANSIBLE_STDOUT_CALLBACK = debug
docker_cluster: PLAYBOOK = site-docker.yml.sample
docker_cluster_collocation: PLAYBOOK = site-docker.yml.sample
update_docker_cluster: PLAYBOOK = site-docker.yml.sample
purge_docker_cluster: PLAYBOOK = site-docker.yml.sample
purge_docker_cluster: PURGE_PLAYBOOK = purge-docker-cluster.yml
docker_dedicated_journal: PLAYBOOK = site-docker.yml.sample
docker_dmcrypt_journal_collocation: PLAYBOOK = site-docker.yml.sample
bluestore_docker_dedicated_journal: PLAYBOOK = site-docker.yml.sample
bluestore_docker_dmcrypt_journal_collocation: PLAYBOOK = site-docker.yml.sample
filestore_osds_container: PLAYBOOK = site-docker.yml.sample
bluestore_osds_container: PLAYBOOK = site-docker.yml.sample
shrink_mon_container: PLAYBOOK = site-docker.yml.sample
shrink_mon_container: MON_TO_KILL = mon2
shrink_osd_container: PLAYBOOK = site-docker.yml.sample
@ -155,18 +152,12 @@ deps=
changedir=
# tests a 1 mon, 1 osd, 1 mds and 1 rgw xenial cluster using non-collocated OSD scenario
xenial_cluster: {toxinidir}/tests/functional/ubuntu/16.04/cluster
# tests a 1 mon 1 osd centos7 cluster using collocated OSD scenario
journal_collocation: {toxinidir}/tests/functional/centos/7/jrn-col
# tests a 1 mon 1 osd centos7 cluster using collocated OSD scenario with disk autodiscovery
journal_collocation_auto: {toxinidir}/tests/functional/centos/7/jrn-col-auto
# tests a 1 mon 1 osd centos7 cluster using collocated OSD scenario with encrypted disk autodiscovery
journal_collocation_auto_dmcrypt: {toxinidir}/tests/functional/centos/7/jrn-col-auto-dm
# tests a 1 mon 1 osd centos7 cluster using dmcrypt non-collocated OSD scenario
dmcrypt_journal: {toxinidir}/tests/functional/centos/7/crypt-ded-jrn
# tests a 1 mon 1 osd centos7 cluster using dmcrypt collocated OSD scenario
dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/crypt-jrn-col
# tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using non-collocated OSD scenario
centos7_cluster: {toxinidir}/tests/functional/centos/7/cluster
filestore_osds_container: {toxinidir}/tests/functional/centos/7/fs-osds-container
bluestore_osds_container: {toxinidir}/tests/functional/centos/7/bs-osds-container
filestore_osds_non_container: {toxinidir}/tests/functional/centos/7/fs-osds-non-container
bluestore_osds_non_container: {toxinidir}/tests/functional/centos/7/bs-osds-non-container
shrink_mon: {toxinidir}/tests/functional/centos/7/cluster
shrink_mon_container: {toxinidir}/tests/functional/centos/7/docker
shrink_osd: {toxinidir}/tests/functional/centos/7/cluster
@ -178,19 +169,10 @@ changedir=
docker_cluster_collocation: {toxinidir}/tests/functional/centos/7/docker-collocation
update_docker_cluster: {toxinidir}/tests/functional/centos/7/docker
purge_docker_cluster: {toxinidir}/tests/functional/centos/7/docker
docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/docker-ded-jrn
docker_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/docker-crypt-jrn-col
purge_cluster: {toxinidir}/tests/functional/centos/7/cluster
purge_dmcrypt: {toxinidir}/tests/functional/centos/7/crypt-ded-jrn
update_dmcrypt: {toxinidir}/tests/functional/centos/7/crypt-ded-jrn
purge_cluster: {toxinidir}/tests/functional/centos/7/bs-osds-non-container
update_dmcrypt: {toxinidir}/tests/functional/centos/7/bs-osds-non-container
update_cluster: {toxinidir}/tests/functional/centos/7/cluster
switch_to_containers: {toxinidir}/tests/functional/centos/7/cluster
bluestore_journal_collocation: {toxinidir}/tests/functional/centos/7/bs-jrn-col
bluestore_cluster: {toxinidir}/tests/functional/centos/7/bluestore
bluestore_dmcrypt_journal: {toxinidir}/tests/functional/centos/7/bs-crypt-ded-jrn
bluestore_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/bs-crypt-jrn-col
bluestore_docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/bs-dock-ded-jrn
bluestore_docker_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/bs-dock-crypt-jrn-col
lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
purge_lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds