Merge pull request #1215 from ceph/purge-cluster-tests

tests: adds a purge_cluster_collocated scenario
pull/1217/head
Alfredo Deza 2017-01-03 17:18:29 -05:00 committed by GitHub
commit 0d1104c48e
2 changed files with 41 additions and 32 deletions

View File

@ -74,9 +74,9 @@
- include_vars: roles/ceph-common/defaults/main.yml - include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-mds/defaults/main.yml - include_vars: roles/ceph-mds/defaults/main.yml
- include_vars: group_vars/all.yml - include_vars: group_vars/all.yml
failed_when: false ignore_errors: true
- include_vars: group_vars/{{ mds_group_name }}.yml - include_vars: group_vars/{{ mds_group_name }}.yml
failed_when: false ignore_errors: true
- name: stop ceph.target with systemd - name: stop ceph.target with systemd
service: service:
@ -125,9 +125,9 @@
- include_vars: roles/ceph-common/defaults/main.yml - include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-rgw/defaults/main.yml - include_vars: roles/ceph-rgw/defaults/main.yml
- include_vars: group_vars/all.yml - include_vars: group_vars/all.yml
failed_when: false ignore_errors: true
- include_vars: group_vars/{{ rgw_group_name }}.yml - include_vars: group_vars/{{ rgw_group_name }}.yml
failed_when: false ignore_errors: true
- name: stop ceph.target with systemd - name: stop ceph.target with systemd
service: service:
@ -176,9 +176,9 @@
- include_vars: roles/ceph-common/defaults/main.yml - include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-rbd-mirror/defaults/main.yml - include_vars: roles/ceph-rbd-mirror/defaults/main.yml
- include_vars: group_vars/all.yml - include_vars: group_vars/all.yml
failed_when: false ignore_errors: true
- include_vars: group_vars/{{ rbdmirror_group_name }}.yml - include_vars: group_vars/{{ rbdmirror_group_name }}.yml
failed_when: false ignore_errors: true
- name: stop ceph.target with systemd - name: stop ceph.target with systemd
service: service:
@ -221,9 +221,9 @@
- include_vars: roles/ceph-common/defaults/main.yml - include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-nfs/defaults/main.yml - include_vars: roles/ceph-nfs/defaults/main.yml
- include_vars: group_vars/all.yml - include_vars: group_vars/all.yml
failed_when: false ignore_errors: true
- include_vars: group_vars/{{ nfs_group_name }}.yml - include_vars: group_vars/{{ nfs_group_name }}.yml
failed_when: false ignore_errors: true
- name: stop ceph.target with systemd - name: stop ceph.target with systemd
service: service:
@ -290,9 +290,9 @@
- include_vars: roles/ceph-common/defaults/main.yml - include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-osd/defaults/main.yml - include_vars: roles/ceph-osd/defaults/main.yml
- include_vars: group_vars/all.yml - include_vars: group_vars/all.yml
failed_when: false ignore_errors: true
- include_vars: group_vars/{{ osd_group_name }}.yml - include_vars: group_vars/{{ osd_group_name }}.yml
failed_when: false ignore_errors: true
- name: check for a device list - name: check for a device list
fail: fail:
@ -357,14 +357,6 @@
failed_when: false failed_when: false
register: ceph_journal_partlabels register: ceph_journal_partlabels
- name: get ceph journal partitions
shell: |
blkid | awk '/ceph journal/ { sub (":", "", $1); print $1 }'
when:
- ceph_journal_partlabels.rc == 0
failed_when: false
register: ceph_journal_partition_to_erase_path
- name: get osd data mount points - name: get osd data mount points
shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'" shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'"
register: mounted_osd register: mounted_osd
@ -408,6 +400,14 @@
ceph_data_partlabels.rc == 0 and ceph_data_partlabels.rc == 0 and
zap_block_devs zap_block_devs
- name: get ceph journal partitions
shell: |
blkid | awk '/ceph journal/ { sub (":", "", $1); print $1 }'
when:
- ceph_journal_partlabels.rc == 0
failed_when: false
register: ceph_journal_partition_to_erase_path
- name: zap ceph journal partitions - name: zap ceph journal partitions
shell: | shell: |
# if the disk passed is a raw device AND the boot system disk # if the disk passed is a raw device AND the boot system disk
@ -422,8 +422,8 @@
sgdisk --delete $partition_nb $raw_device sgdisk --delete $partition_nb $raw_device
with_items: "{{ ceph_journal_partition_to_erase_path.stdout_lines }}" with_items: "{{ ceph_journal_partition_to_erase_path.stdout_lines }}"
when: when:
ceph_journal_partlabels.rc == 0 and - ceph_journal_partlabels.rc == 0
zap_block_devs - zap_block_devs
- name: purge ceph mon cluster - name: purge ceph mon cluster
@ -444,11 +444,11 @@
- include_vars: roles/ceph-mon/defaults/main.yml - include_vars: roles/ceph-mon/defaults/main.yml
- include_vars: roles/ceph-restapi/defaults/main.yml - include_vars: roles/ceph-restapi/defaults/main.yml
- include_vars: group_vars/all.yml - include_vars: group_vars/all.yml
failed_when: false ignore_errors: true
- include_vars: group_vars/{{ mon_group_name }}.yml - include_vars: group_vars/{{ mon_group_name }}.yml
failed_when: false ignore_errors: true
- include_vars: group_vars/{{ restapi_group_name }}.yml - include_vars: group_vars/{{ restapi_group_name }}.yml
failed_when: false ignore_errors: true
- name: stop ceph.target with systemd - name: stop ceph.target with systemd
service: service:
@ -671,21 +671,21 @@
tasks: tasks:
- include_vars: roles/ceph-common/defaults/main.yml - include_vars: roles/ceph-common/defaults/main.yml
- include_vars: group_vars/all.yml - include_vars: group_vars/all.yml
failed_when: false ignore_errors: true
- include_vars: group_vars/{{ mds_group_name }}.yml - include_vars: group_vars/{{ mds_group_name }}.yml
failed_when: false ignore_errors: true
- include_vars: group_vars/{{ rgw_group_name }}.yml - include_vars: group_vars/{{ rgw_group_name }}.yml
failed_when: false ignore_errors: true
- include_vars: group_vars/{{ rbdmirror_group_name }}.yml - include_vars: group_vars/{{ rbdmirror_group_name }}.yml
failed_when: false ignore_errors: true
- include_vars: group_vars/{{ nfs_group_name }}.yml - include_vars: group_vars/{{ nfs_group_name }}.yml
failed_when: false ignore_errors: true
- include_vars: group_vars/{{ osd_group_name }}.yml - include_vars: group_vars/{{ osd_group_name }}.yml
failed_when: false ignore_errors: true
- include_vars: group_vars/{{ mon_group_name }}.yml - include_vars: group_vars/{{ mon_group_name }}.yml
failed_when: false ignore_errors: true
- include_vars: group_vars/{{ restapi_group_name }}.yml - include_vars: group_vars/{{ restapi_group_name }}.yml
failed_when: false ignore_errors: true
- name: purge fetch directory for localhost - name: purge fetch directory for localhost
file: file:

11
tox.ini
View File

@ -1,5 +1,5 @@
[tox] [tox]
envlist = {ansible2.2}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster} envlist = {ansible2.2}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster_collocated}
skipsdist = True skipsdist = True
[testenv] [testenv]
@ -31,6 +31,8 @@ changedir=
centos7_cluster: {toxinidir}/tests/functional/centos/7/cluster centos7_cluster: {toxinidir}/tests/functional/centos/7/cluster
# tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using docker # tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using docker
docker_cluster: {toxinidir}/tests/functional/centos/7/docker-cluster docker_cluster: {toxinidir}/tests/functional/centos/7/docker-cluster
# creates a cluster, purges the cluster and then brings the cluster back up
purge_cluster_collocated: {toxinidir}/tests/functional/centos/7/journal-collocation
commands= commands=
vagrant up --no-provision {posargs:--provider=virtualbox} vagrant up --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
@ -39,4 +41,11 @@ commands=
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/setup.yml ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/setup.yml
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
# use infrastructure-playbooks/purge-cluster.yml to purge the cluster
purge_cluster_collocated: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/purge-cluster.yml --extra-vars="ireallymeanit=yes fetch_directory={changedir}/fetch"
# set up the cluster again
purge_cluster_collocated: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars="fetch_directory={changedir}/fetch"
# test that the cluster can be redeployed in a healthy state
purge_cluster_collocated: testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
vagrant destroy --force vagrant destroy --force