debug

Signed-off-by: Guillaume Abrioux <guits@hive1.elisheba.io>
3.2_debug
Guillaume Abrioux 2021-04-08 03:50:13 +02:00
parent b84739812f
commit c1092dca84
11 changed files with 41 additions and 111 deletions

View File

@ -15,7 +15,7 @@ rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
osd_pool_default_size: 1
openstack_config: True
openstack_config: false
openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"

View File

@ -1,6 +1,6 @@
---
create_crush_tree: True
crush_rule_config: True
create_crush_tree: false
crush_rule_config: false
crush_rule_hdd:
name: HDD
root: default

View File

@ -1,11 +1,2 @@
---
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore"
osd_scenario: lvm
lvm_volumes:
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals

View File

@ -1,32 +1,8 @@
[mons]
mon0
mon1
mon2
[mgrs]
mgr0
mon0
[osds]
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
[mdss]
mds0
mds1
mds2
[rgws]
rgw0
[nfss]
nfs0
[clients]
client0
client1
[rbdmirrors]
rbd-mirror0
[iscsigws]
iscsi-gw0
osd0 osd_objectstore=bluestore osd_scenario=collocated devices="['/dev/sda', '/dev/sdb', '/dev/sdc']"

View File

@ -4,15 +4,15 @@
docker: True
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 3
osd_vms: 2
mds_vms: 3
rgw_vms: 1
nfs_vms: 1
rbd_mirror_vms: 1
client_vms: 2
iscsi_gw_vms: 1
mgr_vms: 1
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# Deploy RESTAPI on each of the Monitors
restapi: true

View File

@ -7,7 +7,7 @@ radosgw_interface: eth1
ceph_conf_overrides:
global:
osd_pool_default_size: 1
openstack_config: True
openstack_config: false
openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"

View File

@ -1,6 +1,6 @@
---
create_crush_tree: True
crush_rule_config: True
create_crush_tree: false
crush_rule_config: false
crush_rule_hdd:
name: HDD
root: default
@ -8,4 +8,4 @@ crush_rule_hdd:
class: hdd
default: true
crush_rules:
- "{{ crush_rule_hdd }}"
- "{{ crush_rule_hdd }}"

View File

@ -1,12 +1,3 @@
---
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
osd_scenario: lvm
osd_objectstore: "bluestore"
lvm_volumes:
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals

View File

@ -1,38 +1,8 @@
[mons]
mon0 monitor_address=192.168.1.10
mon1 monitor_interface=eth1
mon2 monitor_address=192.168.1.12
[mgrs]
mgr0
mon0
[osds]
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
[mdss]
mds0
mds1
mds2
[rgws]
rgw0
[clients]
client0
client1
[nfss]
nfs0
[rbdmirrors]
rbd-mirror0
[iscsigws]
iscsi-gw0
[all:vars]
nfs_ganesha_stable=True
nfs_ganesha_dev=False
nfs_ganesha_stable_branch="V2.7-stable"
nfs_ganesha_flavor="ceph_master"
osd0 osd_scenario=collocated osd_objectstore=bluestore devices="['/dev/sda', '/dev/sdb', '/dev/sdc']"

View File

@ -4,15 +4,15 @@
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 3
osd_vms: 2
mds_vms: 3
rgw_vms: 1
nfs_vms: 1
rbd_mirror_vms: 1
client_vms: 2
iscsi_gw_vms: 1
mgr_vms: 1
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'

22
tox.ini
View File

@ -221,7 +221,7 @@ commands=
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
# configure lvm
!lvm_batch-!lvm_auto_discovery: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
# !lvm_batch-!lvm_auto_discovery: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
rhcs: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"
@ -237,24 +237,26 @@ commands=
# wait 30sec for services to be ready
sleep 30
# test cluster state using ceph-ansible tests
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# reboot all vms
all_daemons: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
# wait 30sec for services to be ready
# retest to ensure cluster came back up correctly after rebooting
all_daemons: py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# all_daemons: py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# handlers/idempotency test
all_daemons,all_in_one: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-luminous} copy_admin_key={env:COPY_ADMIN_KEY:False} " --extra-vars @ceph-override.json
#
# purge: {[purge]commands}
# switch_to_containers: {[switch-to-containers]commands}
# shrink_mon: {[shrink-mon]commands}
# shrink_osd: {[shrink-osd]commands}
# shrink_osd_legacy: {[shrink-osd-legacy]commands}
# add_osds: {[add-osds]commands}
# rgw_multisite: {[rgw-multisite]commands}
purge: {[purge]commands}
switch_to_containers: {[switch-to-containers]commands}
shrink_mon: {[shrink-mon]commands}
shrink_osd: {[shrink-osd]commands}
shrink_osd_legacy: {[shrink-osd-legacy]commands}
add_osds: {[add-osds]commands}
rgw_multisite: {[rgw-multisite]commands}
sleep 365d
vagrant destroy --force