tests: play lvm_setup.yml on all scenarios

We should play lvm_setup.yml on all scenario except `lvm_batch`.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/3535/head
Guillaume Abrioux 2018-12-19 10:17:49 +01:00 committed by Sébastien Han
parent 6293a98a0c
commit 7d705395fb
29 changed files with 37 additions and 435 deletions

View File

@ -14,8 +14,12 @@ osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:
- data: /dev/sda
- data: /dev/sdb
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:

View File

@ -12,8 +12,12 @@ osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:
- data: /dev/sda
- data: /dev/sdb
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:

View File

@ -1 +0,0 @@
../../../../../Vagrantfile

View File

@ -1,21 +0,0 @@
{
"ceph_conf_overrides": {
"global": {
"osd_pool_default_pg_num": 12,
"osd_pool_default_size": 1
}
},
"cephfs_pools": [
{
"name": "cephfs_metadata",
"pgs": 8,
"size": 1
},
{
"name": "cephfs_data",
"pgs": 8,
"size": 1
}
],
"ceph_mon_docker_memory_limit": "2g"
}

View File

@ -1 +0,0 @@
../../../../../../Vagrantfile

View File

@ -1 +0,0 @@
../../cluster/ceph-override.json

View File

@ -1,40 +0,0 @@
---
# this is only here to let the CI tests know
# that this scenario is using docker
docker: True
containerized_deployment: True
monitor_interface: eth1
radosgw_interface: eth1
radosgw_num_instances: 2
ceph_mon_docker_subnet: "{{ public_network }}"
ceph_docker_on_openstack: False
public_network: "192.168.17.0/24"
cluster_network: "192.168.18.0/24"
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
osd_pool_default_size: 1
openstack_config: True
openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: 1
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: 1
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"

View File

@ -1,22 +0,0 @@
---
user_config: True
copy_admin_key: True
test:
name: "test"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
test2:
name: "test2"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
pools:
- "{{ test }}"
- "{{ test2 }}"

View File

@ -1,3 +0,0 @@
---
gateway_ip_list: 192.168.1.90
generate_crt: True

View File

@ -1,10 +0,0 @@
---
create_crush_tree: True
crush_rule_config: True
crush_rule_hdd:
name: HDD
root: HDD
type: host
default: true
crush_rules:
- "{{ crush_rule_hdd }}"

View File

@ -1,11 +0,0 @@
---
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore"
osd_scenario: lvm
lvm_volumes:
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals

View File

@ -1,7 +0,0 @@
---
copy_admin_key: True
rgw_create_pools:
foo:
pg_num: 17
bar:
pg_num: 19

View File

@ -1,27 +0,0 @@
[mons]
mon0
mon1
mon2
[osds]
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
[mdss]
mds0
[rgws]
rgw0
#[nfss]
#nfs0
[clients]
client0
client1
[rbdmirrors]
rbd-mirror0
[iscsigws]
iscsi-gw0

View File

@ -1,60 +0,0 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: True
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 3
osd_vms: 2
mds_vms: 1
rgw_vms: 1
nfs_vms: 0
rbd_mirror_vms: 1
client_vms: 2
iscsi_gw_vms: 1
mgr_vms: 0
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.17
cluster_subnet: 192.168.18
# MEMORY
# set 1024 for CentOS
memory: 1024
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sda', '/dev/sdb' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/atomic-host
#client_vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box

View File

@ -1,36 +0,0 @@
---
ceph_origin: repository
ceph_repository: community
public_network: "192.168.1.0/24"
cluster_network: "192.168.2.0/24"
radosgw_interface: eth1
radosgw_num_instances: 2
ceph_conf_overrides:
global:
osd_pool_default_size: 1
openstack_config: True
openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: 1
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: 1
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
nfs_ganesha_stable: true
nfs_ganesha_dev: false
nfs_ganesha_stable_branch: V2.5-stable
nfs_ganesha_flavor: "ceph_master"

View File

@ -1,24 +0,0 @@
---
copy_admin_key: True
user_config: True
test:
name: "test"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: "{{ osd_pool_default_size }}"
test2:
name: "test2"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: "{{ osd_pool_default_size }}"
pools:
- "{{ test }}"
- "{{ test2 }}"

View File

@ -1,3 +0,0 @@
---
gateway_ip_list: 192.168.1.90
generate_crt: True

View File

@ -1,10 +0,0 @@
---
create_crush_tree: True
crush_rule_config: True
crush_rule_hdd:
name: HDD
root: HDD
type: host
default: true
crush_rules:
- "{{ crush_rule_hdd }}"

View File

@ -1,7 +0,0 @@
copy_admin_key: true
nfs_file_gw: false
nfs_obj_gw: true
ganesha_conf_overrides: |
CACHEINODE {
Entries_HWMark = 100000;
}

View File

@ -1,12 +0,0 @@
---
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
osd_scenario: lvm
osd_objectstore: "bluestore"
lvm_volumes:
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals

View File

@ -1,8 +0,0 @@
copy_admin_key: true
rgw_create_pools:
foo:
pg_num: 17
bar:
pg_num: 19
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400

View File

@ -1,27 +0,0 @@
[mons]
ceph-mon0 monitor_address=192.168.1.10
ceph-mon1 monitor_interface=eth1
ceph-mon2 monitor_address=192.168.1.12
[osds]
ceph-osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd0' }"
ceph-osd1 osd_crush_location="{ 'root': 'default', 'host': 'ceph-osd1' }"
[mdss]
ceph-mds0
[rgws]
ceph-rgw0
[clients]
ceph-client0
ceph-client1
#[nfss]
#ceph-nfs0
[rbdmirrors]
ceph-rbd-mirror0
[iscsigws]
ceph-iscsi-gw0 ceph_repository="dev"

View File

@ -1,19 +0,0 @@
[all:vars]
docker=True
[mons]
ceph-mon0 monitor_address=192.168.1.10
ceph-mon1 monitor_interface=eth1
ceph-mon2 monitor_address=192.168.1.12
[osds]
ceph-osd0
[mdss]
ceph-mds0
[rgws]
ceph-rgw0
[clients]
ceph-client0

View File

@ -1,73 +0,0 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 3
osd_vms: 2
mds_vms: 1
rgw_vms: 1
nfs_vms: 0
rbd_mirror_vms: 1
client_vms: 2
iscsi_gw_vms: 1
mgr_vms: 0
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.1
cluster_subnet: 192.168.2
# MEMORY
# set 1024 for CentOS
memory: 1024
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sdb', '/dev/sdc' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
# VM prefix name, need to match the hostname
label_prefix: ceph

View File

@ -14,8 +14,12 @@ osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:
- data: /dev/sda
- data: /dev/sdb
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:

View File

@ -14,8 +14,12 @@ osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:
- data: /dev/sda
- data: /dev/sdb
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:

View File

@ -12,8 +12,12 @@ osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:
- data: /dev/sda
- data: /dev/sdb
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:

View File

@ -12,8 +12,12 @@ osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:
- data: /dev/sda
- data: /dev/sdb
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:

View File

@ -153,6 +153,7 @@ commands=
[add-osds]
commands=
ansible-playbook -vv -i {changedir}/hosts-2 --limit osd1 {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/hosts-2 --limit osd1 {toxinidir}/tests/functional/lvm_setup.yml
cp {toxinidir}/infrastructure-playbooks/add-osd.yml {toxinidir}/add-osd.yml
ansible-playbook -vv -i {changedir}/hosts-2 --limit osd1 {toxinidir}/add-osd.yml --extra-vars "\
ireallymeanit=yes \