tests: use shaman to test against ceph pacific

Given there's no pacific packages available at
https://download.ceph.com, let's use shaman in order to test against
Ceph Pacific

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/6279/head
Guillaume Abrioux 2021-02-10 14:42:27 +01:00
parent 9102d6c090
commit 7dd4a8a059
35 changed files with 94 additions and 85 deletions

View File

@ -555,7 +555,7 @@ dummy:
# DOCKER # # DOCKER #
########## ##########
#ceph_docker_image: "ceph/daemon" #ceph_docker_image: "ceph/daemon"
#ceph_docker_image_tag: latest-master #ceph_docker_image_tag: latest-pacific
#ceph_docker_registry: docker.io #ceph_docker_registry: docker.io
#ceph_docker_registry_auth: false #ceph_docker_registry_auth: false
#ceph_docker_registry_username: #ceph_docker_registry_username:

View File

@ -547,7 +547,7 @@ ceph_tcmalloc_max_total_thread_cache: 0
# DOCKER # # DOCKER #
########## ##########
ceph_docker_image: "ceph/daemon" ceph_docker_image: "ceph/daemon"
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific
ceph_docker_registry: docker.io ceph_docker_registry: docker.io
ceph_docker_registry_auth: false ceph_docker_registry_auth: false
#ceph_docker_registry_username: #ceph_docker_registry_username:

View File

@ -29,4 +29,4 @@ ceph_conf_overrides:
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -29,4 +29,4 @@ ceph_conf_overrides:
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -29,4 +29,4 @@ ceph_conf_overrides:
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -29,4 +29,4 @@ ceph_conf_overrides:
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -29,4 +29,4 @@ ceph_conf_overrides:
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -31,4 +31,4 @@ rgw_bucket_default_quota_max_objects: 1638400
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -44,4 +44,4 @@ lvm_volumes:
db_vg: journals db_vg: journals
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -36,7 +36,7 @@ dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"

View File

@ -8,3 +8,4 @@ ganesha_conf_overrides: |
nfs_ganesha_stable: true nfs_ganesha_stable: true
nfs_ganesha_dev: false nfs_ganesha_dev: false
nfs_ganesha_flavor: "ceph_master" nfs_ganesha_flavor: "ceph_master"
nfs_ganesha_stable_branch: "V3.5-stable"

View File

@ -5,4 +5,4 @@ cluster_network: "192.168.31.0/24"
dashboard_admin_password: $sX!cD$rYU6qR^B! dashboard_admin_password: $sX!cD$rYU6qR^B!
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon-base ceph_docker_image: ceph-ci/daemon-base
ceph_docker_image_tag: latest-master-devel ceph_docker_image_tag: latest-pacific-devel

View File

@ -25,7 +25,7 @@ dashboard_admin_user_ro: true
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"

View File

@ -30,7 +30,9 @@
regexp: "nfs_ganesha_dev:.*" regexp: "nfs_ganesha_dev:.*"
replace: "nfs_ganesha_dev: true" replace: "nfs_ganesha_dev: true"
dest: "{{ group_vars_path }}/nfss" dest: "{{ group_vars_path }}/nfss"
when: "'all_daemons' in group_vars_path.split('/')" when:
- setup_nfs_dev_repo | default(True) | bool
- "'all_daemons' in group_vars_path.split('/')"
when: change_dir is defined when: change_dir is defined
- name: print contents of {{ group_vars_path }}/all - name: print contents of {{ group_vars_path }}/all

View File

@ -35,7 +35,7 @@ dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"

View File

@ -39,4 +39,4 @@ fsid: 40358a87-ab6e-4bdc-83db-1d909147861c
generate_fsid: false generate_fsid: false
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -25,4 +25,4 @@ handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -29,4 +29,4 @@ handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -31,4 +31,4 @@ handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -39,4 +39,4 @@ openstack_pools:
- "{{ openstack_cinder_pool }}" - "{{ openstack_cinder_pool }}"
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -11,7 +11,7 @@ all:
rgw_keystone_url: 'http://192.168.95.10:5000', rgw_s3_auth_use_keystone: 'true', rgw_keystone_revocation_interval: 0} rgw_keystone_url: 'http://192.168.95.10:5000', rgw_s3_auth_use_keystone: 'true', rgw_keystone_revocation_interval: 0}
cluster: mycluster cluster: mycluster
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
cephfs_data_pool: cephfs_data_pool:
name: 'manila_data' name: 'manila_data'

View File

@ -34,7 +34,7 @@ dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"

View File

@ -30,4 +30,4 @@ ceph_conf_overrides:
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -30,4 +30,4 @@ ceph_conf_overrides:
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -18,4 +18,4 @@ dashboard_enabled: False
copy_admin_key: True copy_admin_key: True
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -17,4 +17,4 @@ openstack_config: False
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -17,4 +17,4 @@ openstack_config: False
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -18,4 +18,4 @@ dashboard_enabled: False
copy_admin_key: True copy_admin_key: True
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -17,4 +17,4 @@ dashboard_enabled: False
copy_admin_key: True copy_admin_key: True
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -19,4 +19,4 @@ dashboard_enabled: False
copy_admin_key: True copy_admin_key: True
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-pacific

View File

@ -38,10 +38,10 @@ commands=
# configure lvm # configure lvm
ansible-playbook -vv -i {changedir}/inventory/hosts {toxinidir}/tests/functional/lvm_setup.yml ansible-playbook -vv -i {changedir}/inventory/hosts {toxinidir}/tests/functional/lvm_setup.yml
non_container: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch=master ceph_dev_sha1=latest" --tags "vagrant_setup" non_container: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch=pacififc ceph_dev_sha1=latest" --tags "vagrant_setup"
ansible-playbook -vv -i {changedir}/inventory/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit 'all:!clients' --extra-vars "\ ansible-playbook -vv -i {changedir}/inventory/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit 'all:!clients' --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch=master \ ceph_dev_branch=pacific \
ceph_dev_sha1=latest \ ceph_dev_sha1=latest \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
@ -55,7 +55,7 @@ commands=
fsid=40358a87-ab6e-4bdc-83db-1d909147861c \ fsid=40358a87-ab6e-4bdc-83db-1d909147861c \
external_cluster_mon_ips=192.168.31.10,192.168.31.11,192.168.31.12 \ external_cluster_mon_ips=192.168.31.10,192.168.31.11,192.168.31.12 \
generate_fsid=false \ generate_fsid=false \
ceph_dev_branch=master \ ceph_dev_branch=pacific \
ceph_dev_sha1=latest \ ceph_dev_sha1=latest \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
@ -69,7 +69,7 @@ commands=
fsid=40358a87-ab6e-4bdc-83db-1d909147861c \ fsid=40358a87-ab6e-4bdc-83db-1d909147861c \
external_cluster_mon_ips=192.168.31.10,192.168.31.11,192.168.31.12 \ external_cluster_mon_ips=192.168.31.10,192.168.31.11,192.168.31.12 \
generate_fsid=false \ generate_fsid=false \
ceph_dev_branch=master \ ceph_dev_branch=pacific \
ceph_dev_sha1=latest \ ceph_dev_sha1=latest \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \

View File

@ -30,8 +30,8 @@ setenv=
non_container: PLAYBOOK = site.yml.sample non_container: PLAYBOOK = site.yml.sample
non_container: DEV_SETUP = True non_container: DEV_SETUP = True
CEPH_DOCKER_IMAGE_TAG = latest-master CEPH_DOCKER_IMAGE_TAG = latest-pacific
CEPH_STABLE_RELEASE = octopus CEPH_STABLE_RELEASE = pacific
deps= -r{toxinidir}/tests/requirements.txt deps= -r{toxinidir}/tests/requirements.txt
changedir={toxinidir}/tests/functional/filestore-to-bluestore{env:CONTAINER_DIR:} changedir={toxinidir}/tests/functional/filestore-to-bluestore{env:CONTAINER_DIR:}
@ -41,7 +41,7 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch=pacific ceph_dev_sha1=latest" --tags "vagrant_setup"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd0:osd1' ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd0:osd1'
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd3:osd4' --tags partitions ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd3:osd4' --tags partitions
@ -49,20 +49,20 @@ commands=
# deploy the cluster # deploy the cluster
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \ ceph_stable_release=pacific \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_branch=pacific \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_dev_sha1=latest \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
" "
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/filestore-to-bluestore.yml --limit osds --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/filestore-to-bluestore.yml --limit osds --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \ ceph_stable_release=pacific \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_branch=pacific \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_dev_sha1=latest \
" "
bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:octopus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests" bash -c "CEPH_STABLE_RELEASE=pacific py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
vagrant destroy --force vagrant destroy --force

View File

@ -69,9 +69,9 @@ setenv=
container: PURGE_PLAYBOOK = purge-container-cluster.yml container: PURGE_PLAYBOOK = purge-container-cluster.yml
non_container: PLAYBOOK = site.yml.sample non_container: PLAYBOOK = site.yml.sample
CEPH_DOCKER_IMAGE_TAG = latest-master CEPH_DOCKER_IMAGE_TAG = latest-pacific
CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-master CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-pacific
UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-master UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-pacific
CEPH_STABLE_RELEASE = pacific CEPH_STABLE_RELEASE = pacific
deps= -r{toxinidir}/tests/requirements.txt deps= -r{toxinidir}/tests/requirements.txt
@ -81,7 +81,7 @@ changedir=
commands= commands=
ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch=pacific ceph_dev_sha1=latest" --tags "vagrant_setup"
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
@ -93,8 +93,8 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_branch=pacific \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_dev_sha1=latest \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -110,8 +110,8 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit osds --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit osds --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_branch=pacific \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_dev_sha1=latest \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \

View File

@ -28,8 +28,8 @@ setenv=
container: PLAYBOOK = site-container.yml.sample container: PLAYBOOK = site-container.yml.sample
non_container: PLAYBOOK = site.yml.sample non_container: PLAYBOOK = site.yml.sample
UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-master UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-bis-pacific
UPDATE_CEPH_DEV_BRANCH = master UPDATE_CEPH_DEV_BRANCH = pacific
UPDATE_CEPH_DEV_SHA1 = latest UPDATE_CEPH_DEV_SHA1 = latest
ROLLING_UPDATE = True ROLLING_UPDATE = True
deps= -r{toxinidir}/tests/requirements.txt deps= -r{toxinidir}/tests/requirements.txt
@ -38,30 +38,37 @@ commands=
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml # use the stable-5.0 branch to deploy an octopus cluster
git clone -b stable-5.0 --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml'
# configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file) # configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2' bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml --extra-vars "osd_scenario=lvm"'
non_container: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" # deploy the cluster
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \ ceph_stable_release=octopus \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \
ceph_docker_image_tag=latest-octopus \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
" "'
non_container: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch=pacific ceph_dev_sha1=latest setup_nfs_dev_repo=false" --tags "vagrant_setup"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \ ceph_dev_branch=pacific \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ ceph_dev_sha1=latest \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
" "
bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:octopus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests" bash -c "CEPH_STABLE_RELEASE=pacific py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
vagrant destroy --force vagrant destroy --force

43
tox.ini
View File

@ -45,7 +45,7 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rbd_map_devices.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rbd_map_devices.yml --extra-vars "\
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-pacific} \
" "
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
@ -53,7 +53,7 @@ commands=
remove_packages=yes \ remove_packages=yes \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-pacific} \
" "
# re-setup lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file) # re-setup lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
@ -61,7 +61,7 @@ commands=
# set up the cluster again # set up the cluster again
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_branch=pacific \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
@ -84,7 +84,7 @@ commands=
# set up the cluster again # set up the cluster again
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_branch=pacific \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
" "
# test that the cluster can be redeployed in a healthy state # test that the cluster can be redeployed in a healthy state
@ -135,7 +135,7 @@ commands=
commands= commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
ceph_docker_image_tag=latest-master-devel \ ceph_docker_image_tag=latest-pacific-devel \
ceph_docker_registry=quay.ceph.io \ ceph_docker_registry=quay.ceph.io \
ceph_docker_image=ceph-ci/daemon \ ceph_docker_image=ceph-ci/daemon \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
@ -154,7 +154,7 @@ commands=
ansible-playbook -vv -i {changedir}/hosts-2 --limit mon1 {toxinidir}/tests/functional/setup.yml ansible-playbook -vv -i {changedir}/hosts-2 --limit mon1 {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/hosts-2 {toxinidir}/infrastructure-playbooks/add-mon.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/hosts-2 {toxinidir}/infrastructure-playbooks/add-mon.yml --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_branch=pacific \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
" "
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
@ -164,7 +164,7 @@ commands=
ansible-playbook -vv -i {changedir}/hosts-2 --limit mgrs {toxinidir}/tests/functional/setup.yml ansible-playbook -vv -i {changedir}/hosts-2 --limit mgrs {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/hosts-2 --limit mgrs {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook -vv -i {changedir}/hosts-2 --limit mgrs {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_branch=pacific \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
@ -177,7 +177,7 @@ commands=
ansible-playbook -vv -i {changedir}/hosts-2 --limit mdss {toxinidir}/tests/functional/setup.yml ansible-playbook -vv -i {changedir}/hosts-2 --limit mdss {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/hosts-2 --limit mdss {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook -vv -i {changedir}/hosts-2 --limit mdss {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_branch=pacific \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
@ -190,7 +190,7 @@ commands=
ansible-playbook -vv -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/tests/functional/setup.yml ansible-playbook -vv -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook -vv -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_branch=pacific \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
@ -203,7 +203,7 @@ commands=
ansible-playbook -vv -i {changedir}/hosts-2 --limit rgws {toxinidir}/tests/functional/setup.yml ansible-playbook -vv -i {changedir}/hosts-2 --limit rgws {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/hosts-2 --limit rgws {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook -vv -i {changedir}/hosts-2 --limit rgws {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_branch=pacific \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
@ -216,21 +216,21 @@ commands=
bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}" bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}"
bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}/secondary" bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}/secondary"
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/setup.yml ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir}/secondary ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir}/secondary ceph_dev_branch=pacific ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/lvm_setup.yml ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/lvm_setup.yml
# ensure the rule isn't already present # ensure the rule isn't already present
ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent' ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent'
ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=present' ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=present'
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_branch=pacific \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
" "
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit rgws --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit rgws --extra-vars "\
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_branch=pacific \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
@ -247,7 +247,7 @@ commands=
[storage-inventory] [storage-inventory]
commands= commands=
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/storage-inventory.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/storage-inventory.yml --extra-vars "\
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-pacific} \
" "
[cephadm-adopt] [cephadm-adopt]
@ -295,12 +295,11 @@ setenv=
shrink_rbdmirror: RBDMIRROR_TO_KILL = rbd-mirror0 shrink_rbdmirror: RBDMIRROR_TO_KILL = rbd-mirror0
shrink_rgw: RGW_TO_KILL = rgw0.rgw0 shrink_rgw: RGW_TO_KILL = rgw0.rgw0
CEPH_DOCKER_IMAGE_TAG = latest-master CEPH_DOCKER_IMAGE_TAG = latest-pacific
CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-master CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-pacific
UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-master
CEPH_STABLE_RELEASE = pacific CEPH_STABLE_RELEASE = pacific
switch_to_containers: CEPH_DOCKER_IMAGE_TAG = latest-master-devel switch_to_containers: CEPH_DOCKER_IMAGE_TAG = latest-pacific-devel
deps= -r{toxinidir}/tests/requirements.txt deps= -r{toxinidir}/tests/requirements.txt
changedir= changedir=
@ -332,7 +331,7 @@ changedir=
commands= commands=
rhcs: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "change_dir={changedir}" --tags "vagrant_setup" rhcs: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "change_dir={changedir}" --tags "vagrant_setup"
ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch=pacific ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
@ -347,8 +346,8 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
deploy_secondary_zones=False \ deploy_secondary_zones=False \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_branch=pacific \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_dev_sha1=latest \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -364,7 +363,7 @@ commands=
all_daemons,collocation: py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests all_daemons,collocation: py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# handlers/idempotency test # handlers/idempotency test
all_daemons,all_in_one,collocation: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-master} ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --extra-vars @ceph-override.json all_daemons,all_in_one,collocation: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-pacific} ceph_dev_branch=pacific ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --extra-vars @ceph-override.json
purge: {[purge]commands} purge: {[purge]commands}
switch_to_containers: {[switch-to-containers]commands} switch_to_containers: {[switch-to-containers]commands}