tests: move erasure pool testing in lvm_osds

This commit moves the erasure pool creation testing from `all_daemons`
to `lvm_osds` so we can decrease the number of osd nodes we spawn so the
OVH Jenkins slaves aren't less overwhelmed when a `all_daemons` based
scenario is being tested.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 8476beb5b1)
pull/5697/head
Guillaume Abrioux 2020-08-11 15:26:16 +02:00
parent 489efd5689
commit 4685b411de
10 changed files with 50 additions and 14 deletions

View File

@ -22,7 +22,7 @@ openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
type: 3
type: 1
size: 1
pg_autoscale_mode: True
target_size_ratio: 0.2

View File

@ -7,12 +7,13 @@ test:
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
size: 1
test2:
name: "test2"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
size: 1
pools:
- "{{ test }}"
- "{{ test2 }}"

View File

@ -10,8 +10,6 @@ mgr0
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }"
osd3 osd_crush_location="{ 'root': 'default', 'host': 'osd3' }"
osd4 osd_crush_location="{ 'root': 'default', 'host': 'osd4' }"
[mdss]
mds0

View File

@ -5,7 +5,7 @@ docker: True
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 3
osd_vms: 5
osd_vms: 3
mds_vms: 3
rgw_vms: 1
nfs_vms: 1

View File

@ -14,7 +14,7 @@ openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
type: 3
type: 1
size: 1
application: rbd
pg_autoscale_mode: True

View File

@ -7,14 +7,13 @@ test:
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
size: "{{ osd_pool_default_size }}"
size: 1
test2:
name: "test2"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
size: "{{ osd_pool_default_size }}"
size: 1
pools:
- "{{ test }}"
- "{{ test2 }}"

View File

@ -10,8 +10,6 @@ mgr0
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }"
osd3 osd_crush_location="{ 'root': 'default', 'host': 'osd3' }"
osd4 osd_crush_location="{ 'root': 'default', 'host': 'osd4' }"
[mdss]
mds0

View File

@ -5,7 +5,7 @@ docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 3
osd_vms: 5
osd_vms: 3
mds_vms: 3
rgw_vms: 1
nfs_vms: 0

View File

@ -22,4 +22,24 @@ ceph_conf_overrides:
osd_pool_default_size: 1
dashboard_enabled: False
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
handler_health_osd_check_delay: 10
openstack_config: True
openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
type: 3
size: 1
application: rbd
pg_autoscale_mode: True
target_size_ratio: 0.2
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
type: 1
size: 1
application: rbd
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"

View File

@ -17,4 +17,24 @@ ceph_conf_overrides:
osd_pool_default_size: 1
dashboard_enabled: False
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
handler_health_osd_check_delay: 10
openstack_config: True
openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
type: 3
size: 1
application: rbd
pg_autoscale_mode: True
target_size_ratio: 0.2
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
type: 1
size: 1
application: rbd
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"