Merge pull request #1255 from ceph/backport-1250

Backport: 'CI testing updates'
pull/1260/head
Andrew Schoen 2017-01-27 11:22:45 -06:00 committed by GitHub
commit 1b97602776
3 changed files with 25 additions and 10 deletions

View File

@ -407,7 +407,7 @@
- name: delete dm-crypt devices if any
command: dmsetup remove {{ item }}
with_items: encrypted_ceph_partuuid.stdout_lines
with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
when: "{{ encrypted_ceph_partuuid.stdout_lines | length > 0 }}"
- name: zap osd disks
@ -439,6 +439,7 @@
with_items: "{{ ceph_journal_partition_to_erase_path.stdout_lines }}"
when:
- ceph_journal_partlabels.rc == 0
- raw_multi_journal is defined
- raw_multi_journal

View File

@ -12,3 +12,7 @@ journal_collocation: True
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1

28
tox.ini
View File

@ -1,7 +1,20 @@
[tox]
envlist = {ansible2.2}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster_collocated}
envlist = {ansible2.2}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt}
skipsdist = True
# extra commands for purging clusters
# that purge the cluster and then set it up again to
# ensure that a purge can clear nodes well enough that they
# can be redployed to.
[purge]
commands=
cp {toxinidir}/infrastructure-playbooks/purge-cluster.yml {toxinidir}/purge-cluster.yml
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/purge-cluster.yml --extra-vars="ireallymeanit=yes fetch_directory={changedir}/fetch"
# set up the cluster again
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/site.yml.sample --extra-vars="fetch_directory={changedir}/fetch"
# test that the cluster can be redeployed in a healthy state
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
[testenv]
whitelist_externals =
vagrant
@ -33,8 +46,8 @@ changedir=
centos7_cluster: {toxinidir}/tests/functional/centos/7/cluster
# tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using docker
docker_cluster: {toxinidir}/tests/functional/centos/7/docker-cluster
# creates a cluster, purges the cluster and then brings the cluster back up
purge_cluster_collocated: {toxinidir}/tests/functional/centos/7/journal-collocation
purge_cluster: {toxinidir}/tests/functional/ubuntu/16.04/cluster
purge_dmcrypt: {toxinidir}/tests/functional/centos/7/dmcrypt-dedicated-journal
commands=
vagrant up --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
@ -44,10 +57,7 @@ commands=
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
purge_cluster_collocated: cp {toxinidir}/infrastructure-playbooks/purge-cluster.yml {toxinidir}/purge-cluster.yml
purge_cluster_collocated: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/purge-cluster.yml --extra-vars="ireallymeanit=yes fetch_directory={changedir}/fetch"
# set up the cluster again
purge_cluster_collocated: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars="fetch_directory={changedir}/fetch"
# test that the cluster can be redeployed in a healthy state
purge_cluster_collocated: testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
purge_cluster: {[purge]commands}
purge_dmcrypt: {[purge]commands}
vagrant destroy --force