test: convert all the tests to use lvm

ceph-disk is now deprecated in ceph-ansible so let's convert all the ci
tests to use lvm instead of ceph-disk.

Signed-off-by: Sébastien Han <seb@redhat.com>
pull/3186/head
Sébastien Han 2018-10-10 15:29:56 -04:00 committed by Guillaume Abrioux
parent 89e76e5baf
commit 876f6ced74
12 changed files with 48 additions and 46 deletions

View File

@ -1,7 +1,6 @@
---
ceph_origin: repository
ceph_repository: community
cluster: test
public_network: "192.168.1.0/24"
cluster_network: "192.168.2.0/24"
radosgw_interface: eth1
@ -12,12 +11,6 @@ nfs_ganesha_stable: true
nfs_ganesha_dev: false
nfs_ganesha_stable_branch: V2.5-stable
nfs_ganesha_flavor: "ceph_master"
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- '/dev/sdb'
dedicated_devices:
- '/dev/sdc'
- '/dev/sdc'
openstack_config: True
openstack_glance_pool:
name: "images"

View File

@ -1,4 +1,12 @@
---
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
osd_scenario: non-collocated
osd_scenario: lvm
osd_objectstore: "bluestore"
lvm_volumes:
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals

View File

@ -4,7 +4,6 @@
docker: True
containerized_deployment: True
cluster: test
monitor_interface: eth1
radosgw_interface: eth1
ceph_mon_docker_subnet: "{{ public_network }}"
@ -16,7 +15,4 @@ rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
devices:
- /dev/sda
- /dev/sdb
osd_pool_default_size: 1

View File

@ -1,5 +1,11 @@
---
ceph_osd_docker_run_script_path: /var/tmp
# OSD_FORCE_ZAP is only for Jewel, the function does not exist anymore on Luminous and above
ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
osd_scenario: collocated
osd_scenario: lvm
osd_objectstore: "bluestore"
lvm_volumes:
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals

View File

@ -4,7 +4,6 @@
docker: True
containerized_deployment: True
cluster: test
monitor_interface: eth1
radosgw_interface: eth1
ceph_mon_docker_subnet: "{{ public_network }}"
@ -16,9 +15,6 @@ rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
osd_pool_default_size: 1
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- /dev/sdb
openstack_config: True
openstack_glance_pool:
name: "images"

View File

@ -1,5 +1,11 @@
---
# OSD_FORCE_ZAP is only for Jewel, the function does not exist anymore on Luminous and above
ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
ceph_osd_docker_run_script_path: /var/tmp
osd_scenario: collocated
osd_objectstore: "bluestore"
osd_scenario: lvm
lvm_volumes:
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals

View File

@ -6,17 +6,16 @@ docker: True
ceph_origin: repository
ceph_repository: community
cluster: ceph
public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
monitor_interface: eth1
radosgw_interface: eth1
journal_size: 100
osd_objectstore: "filestore"
osd_scenario: lvm
copy_admin_key: true
containerized_deployment: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
osd_scenario: lvm
lvm_volumes:
- data: data-lv1
journal: /dev/sdc1

View File

@ -2,16 +2,15 @@
ceph_origin: repository
ceph_repository: community
cluster: ceph
public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
monitor_interface: eth1
radosgw_interface: eth1
journal_size: 100
osd_objectstore: "filestore"
osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
osd_scenario: lvm
lvm_volumes:
- data: data-lv1
journal: /dev/sdc1

View File

@ -1,19 +1,10 @@
---
ceph_origin: repository
ceph_repository: community
cluster: test
public_network: "192.168.71.0/24"
cluster_network: "192.168.72.0/24"
journal_size: 100
radosgw_interface: eth1
osd_objectstore: filestore
ceph_conf_overrides:
global:
osd_pool_default_size: 3
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- '/dev/sdb'
dedicated_devices:
- '/dev/sdc'
- '/dev/sdc'
openstack_config: False

View File

@ -1,4 +1,12 @@
---
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
osd_scenario: non-collocated
osd_scenario: lvm
lvm_volumes:
- data: data-lv1
journal: /dev/sdc1
data_vg: test_group
- data: data-lv2
journal: journal1
data_vg: test_group
journal_vg: journals

View File

@ -4,13 +4,10 @@
docker: True
containerized_deployment: True
cluster: test
monitor_interface: eth1
radosgw_interface: eth1
ceph_mon_docker_subnet: "{{ public_network }}"
journal_size: 100
ceph_docker_on_openstack: False
osd_objectstore: filestore
public_network: "192.168.73.0/24"
cluster_network: "192.168.74.0/24"
rgw_override_bucket_index_max_shards: 16
@ -18,7 +15,4 @@ rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
osd_pool_default_size: 1
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- /dev/sdb
openstack_config: False

View File

@ -1,5 +1,11 @@
---
# OSD_FORCE_ZAP is only for Jewel, the function does not exist anymore on Luminous and above
ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
ceph_osd_docker_run_script_path: /var/tmp
osd_scenario: collocated
osd_scenario: lvm
lvm_volumes:
- data: data-lv1
journal: /dev/sdc1
data_vg: test_group
- data: data-lv2
journal: journal1
data_vg: test_group
journal_vg: journals