tests: move erasure pool testing in lvm_osds

This commit moves the erasure pool creation testing from `all_daemons`
to `lvm_osds` so we can decrease the number of osd nodes we spawn so the
OVH Jenkins slaves aren't less overwhelmed when a `all_daemons` based
scenario is being tested.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 8476beb5b1)
pull/5698/head
Guillaume Abrioux 2020-08-11 15:26:16 +02:00
parent 5b73af9c34
commit 2754895b89
10 changed files with 52 additions and 28 deletions

View File

@ -20,10 +20,7 @@ openstack_glance_pool:
name: "images" name: "images"
pg_num: "{{ osd_pool_default_pg_num }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" type: 1
type: 3
erasure_profile: ""
expected_num_objects: ""
size: 1 size: 1
pg_autoscale_mode: True pg_autoscale_mode: True
target_size_ratio: 0.2 target_size_ratio: 0.2

View File

@ -7,16 +7,13 @@ test:
pgp_num: "{{ osd_pool_default_pg_num }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" size: 1
expected_num_objects: ""
test2: test2:
name: "test2" name: "test2"
pg_num: "{{ osd_pool_default_pg_num }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" size: 1
expected_num_objects: ""
pools: pools:
- "{{ test }}" - "{{ test }}"
- "{{ test2 }}" - "{{ test2 }}"

View File

@ -10,8 +10,6 @@ mgr0
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }" osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }" osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }"
osd3 osd_crush_location="{ 'root': 'default', 'host': 'osd3' }"
osd4 osd_crush_location="{ 'root': 'default', 'host': 'osd4' }"
[mdss] [mdss]
mds0 mds0

View File

@ -5,7 +5,7 @@ docker: True
# DEFINE THE NUMBER OF VMS TO RUN # DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 3 mon_vms: 3
osd_vms: 5 osd_vms: 3
mds_vms: 3 mds_vms: 3
rgw_vms: 1 rgw_vms: 1
nfs_vms: 1 nfs_vms: 1

View File

@ -12,10 +12,7 @@ openstack_glance_pool:
name: "images" name: "images"
pg_num: "{{ osd_pool_default_pg_num }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" type: 1
type: 3
erasure_profile: ""
expected_num_objects: ""
size: 1 size: 1
application: rbd application: rbd
pg_autoscale_mode: True pg_autoscale_mode: True

View File

@ -7,18 +7,13 @@ test:
pgp_num: "{{ osd_pool_default_pg_num }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD" rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" size: 1
expected_num_objects: ""
size: "{{ osd_pool_default_size }}"
test2: test2:
name: "test2" name: "test2"
pg_num: "{{ osd_pool_default_pg_num }}" pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}" pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1 type: 1
erasure_profile: "" size: 1
expected_num_objects: ""
size: "{{ osd_pool_default_size }}"
pools: pools:
- "{{ test }}" - "{{ test }}"
- "{{ test2 }}" - "{{ test2 }}"

View File

@ -10,8 +10,6 @@ mgr0
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }" osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }" osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }"
osd3 osd_crush_location="{ 'root': 'default', 'host': 'osd3' }"
osd4 osd_crush_location="{ 'root': 'default', 'host': 'osd4' }"
[mdss] [mdss]
mds0 mds0

View File

@ -5,7 +5,7 @@ docker: false
# DEFINE THE NUMBER OF VMS TO RUN # DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 3 mon_vms: 3
osd_vms: 5 osd_vms: 3
mds_vms: 3 mds_vms: 3
rgw_vms: 1 rgw_vms: 1
nfs_vms: 1 nfs_vms: 1

View File

@ -20,4 +20,25 @@ ceph_conf_overrides:
osd_pool_default_size: 1 osd_pool_default_size: 1
dashboard_enabled: False dashboard_enabled: False
handler_health_mon_check_delay: 10 handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
openstack_config: True
openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
type: 3
size: 1
erasure_profile: ""
application: rbd
pg_autoscale_mode: True
target_size_ratio: 0.2
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
type: 1
size: 1
application: rbd
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"

View File

@ -15,4 +15,25 @@ ceph_conf_overrides:
osd_pool_default_size: 1 osd_pool_default_size: 1
dashboard_enabled: False dashboard_enabled: False
handler_health_mon_check_delay: 10 handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
openstack_config: True
openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
type: 3
size: 1
erasure_profile: ""
application: rbd
pg_autoscale_mode: True
target_size_ratio: 0.2
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
type: 1
size: 1
application: rbd
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"