tests: increase `mon_max_pg_per_osd`

we aren't deploying enough OSD daemon, so it fails like following:

```
  stderr: 'Error ERANGE: pool id 10 pg_num 256 size 2 would mean 1536 total pgs, which exceeds max 1500 (mon_max_pg_per_osd 250 * num_in_osds 6)'
```

Let's increase the value of `mon_max_pg_per_osd` in order to get around
this issue in the CI.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 682116023d)
pull/6279/head
Guillaume Abrioux 2021-02-10 15:49:38 +01:00
parent 980a0dd00e
commit 2feefdc861
7 changed files with 8 additions and 1 deletions

View File

@ -20,6 +20,7 @@ ceph_conf_overrides:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
rgw_create_pools:

View File

@ -17,6 +17,7 @@ ceph_conf_overrides:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
osd_objectstore: "bluestore"

View File

@ -4,7 +4,8 @@
"osd_pool_default_pg_num": 12,
"osd_pool_default_size": 1,
"mon_allow_pool_size_one": true,
"mon_warn_on_pool_no_redundancy": false
"mon_warn_on_pool_no_redundancy": false,
"mon_max_pg_per_osd": 300
}
},
"cephfs_pools": [

View File

@ -16,6 +16,7 @@ ceph_conf_overrides:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
openstack_config: True
openstack_glance_pool:
name: "images"

View File

@ -9,6 +9,7 @@ ceph_conf_overrides:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
openstack_config: True
openstack_glance_pool:
name: "images"

View File

@ -18,6 +18,7 @@ ceph_conf_overrides:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B!

View File

@ -15,6 +15,7 @@ ceph_conf_overrides:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B!