mirror of https://github.com/ceph/ceph-ansible.git
tests: leave an OSD node in default crush root
jewel used to create a default `rbd` pool in the default crush root `default`, we need to have at least 1 osd to satisfy the PGs for this created pool, otherwise the cluster will be in HEALTH_ERR state because of `pgs stuck unclean`/`pgs stuck inactive` Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>pull/2954/head
parent
a1ca2c8fd3
commit
578aa5c2d5
|
@ -8,6 +8,7 @@ ceph-mgr0
|
||||||
|
|
||||||
[osds]
|
[osds]
|
||||||
ceph-osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd0' }"
|
ceph-osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd0' }"
|
||||||
|
ceph-osd1 osd_crush_location="{ 'root': 'default', 'host': 'ceph-osd1' }"
|
||||||
|
|
||||||
[mdss]
|
[mdss]
|
||||||
ceph-mds0
|
ceph-mds0
|
||||||
|
|
|
@ -5,7 +5,7 @@ docker: false
|
||||||
|
|
||||||
# DEFINE THE NUMBER OF VMS TO RUN
|
# DEFINE THE NUMBER OF VMS TO RUN
|
||||||
mon_vms: 3
|
mon_vms: 3
|
||||||
osd_vms: 1
|
osd_vms: 2
|
||||||
mds_vms: 1
|
mds_vms: 1
|
||||||
rgw_vms: 1
|
rgw_vms: 1
|
||||||
nfs_vms: 1
|
nfs_vms: 1
|
||||||
|
|
Loading…
Reference in New Issue