stable-7.0: support Ceph Quincy

This adds required changes in order to support Ceph Quincy
with `stable-7.0` branch.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/7216/head
Guillaume Abrioux 2022-06-15 08:29:20 +02:00
parent 1c740c424a
commit c698859617
36 changed files with 74 additions and 129 deletions

View File

@ -561,7 +561,7 @@ dummy:
# DOCKER #
##########
#ceph_docker_image: "ceph/daemon"
#ceph_docker_image_tag: latest-master
#ceph_docker_image_tag: latest-quincy
#ceph_docker_registry: quay.io
#ceph_docker_registry_auth: false
#ceph_docker_registry_username:

View File

@ -553,7 +553,7 @@ ceph_tcmalloc_max_total_thread_cache: 134217728
# DOCKER #
##########
ceph_docker_image: "ceph/daemon"
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy
ceph_docker_registry: quay.io
ceph_docker_registry_auth: false
#ceph_docker_registry_username:

View File

@ -29,4 +29,4 @@ ceph_conf_overrides:
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -29,4 +29,4 @@ ceph_conf_overrides:
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -29,4 +29,4 @@ ceph_conf_overrides:
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -29,4 +29,4 @@ ceph_conf_overrides:
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -29,4 +29,4 @@ ceph_conf_overrides:
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -31,4 +31,4 @@ rgw_bucket_default_quota_max_objects: 1638400
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -45,4 +45,4 @@ lvm_volumes:
db_vg: journals
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -38,7 +38,7 @@ dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"

View File

@ -5,5 +5,5 @@ cluster_network: "192.168.31.0/24"
dashboard_admin_password: $sX!cD$rYU6qR^B!
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon-base
ceph_docker_image_tag: latest-master-devel
ceph_docker_image_tag: latest-quincy-devel
containerized_deployment: true

View File

@ -27,7 +27,7 @@ dashboard_admin_user_ro: true
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"

View File

@ -35,7 +35,7 @@ dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"

View File

@ -39,4 +39,4 @@ fsid: 40358a87-ab6e-4bdc-83db-1d909147861c
generate_fsid: false
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -25,4 +25,4 @@ handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -29,4 +29,4 @@ handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -31,4 +31,4 @@ handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -39,4 +39,4 @@ openstack_pools:
- "{{ openstack_cinder_pool }}"
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -11,7 +11,7 @@ all:
rgw_keystone_url: 'http://192.168.95.10:5000', rgw_s3_auth_use_keystone: 'true', rgw_keystone_revocation_interval: 0}
cluster: mycluster
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy
ceph_docker_registry: quay.ceph.io
cephfs_data_pool:
name: 'manila_data'

View File

@ -34,7 +34,7 @@ dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"

View File

@ -30,4 +30,4 @@ ceph_conf_overrides:
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -30,4 +30,4 @@ ceph_conf_overrides:
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -18,4 +18,4 @@ dashboard_enabled: False
copy_admin_key: True
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -17,4 +17,4 @@ openstack_config: False
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -17,4 +17,4 @@ openstack_config: False
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -18,4 +18,4 @@ dashboard_enabled: False
copy_admin_key: True
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -17,4 +17,4 @@ dashboard_enabled: False
copy_admin_key: True
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -19,4 +19,4 @@ dashboard_enabled: False
copy_admin_key: True
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy

View File

@ -29,7 +29,7 @@ dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master
ceph_docker_image_tag: latest-quincy
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"

View File

@ -4,7 +4,7 @@ import ca_test_common
import cephadm_bootstrap
fake_fsid = '0f1e0605-db0b-485c-b366-bd8abaa83f3b'
fake_image = 'quay.ceph.io/ceph/daemon-base:latest-master-devel'
fake_image = 'quay.ceph.io/ceph/daemon-base:latest-quincy-devel'
fake_ip = '192.168.42.1'
fake_registry = 'quay.ceph.io'
fake_registry_user = 'foo'

View File

@ -38,11 +38,8 @@ commands=
# configure lvm
ansible-playbook -vv -i {changedir}/inventory/hosts {toxinidir}/tests/functional/lvm_setup.yml
non_container: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch=master ceph_dev_sha1=latest" --tags "vagrant_setup"
ansible-playbook -vv -i {changedir}/inventory/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit 'all:!clients' --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch=master \
ceph_dev_sha1=latest \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -55,8 +52,6 @@ commands=
fsid=40358a87-ab6e-4bdc-83db-1d909147861c \
external_cluster_mon_ips=192.168.31.10,192.168.31.11,192.168.31.12 \
generate_fsid=false \
ceph_dev_branch=master \
ceph_dev_sha1=latest \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -69,8 +64,6 @@ commands=
fsid=40358a87-ab6e-4bdc-83db-1d909147861c \
external_cluster_mon_ips=192.168.31.10,192.168.31.11,192.168.31.12 \
generate_fsid=false \
ceph_dev_branch=master \
ceph_dev_sha1=latest \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \

View File

@ -30,7 +30,7 @@ setenv=
non_container: PLAYBOOK = site.yml.sample
non_container: DEV_SETUP = True
CEPH_DOCKER_IMAGE_TAG = latest-master
CEPH_DOCKER_IMAGE_TAG = latest-quincy
deps= -r{toxinidir}/tests/requirements.txt
changedir={toxinidir}/tests/functional/filestore-to-bluestore{env:CONTAINER_DIR:}
@ -40,25 +40,17 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd0:osd1'
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd3:osd4' --tags partitions
# deploy the cluster
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/filestore-to-bluestore.yml --limit osds --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/filestore-to-bluestore.yml --limit osds --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True}"
bash -c "CEPH_STABLE_RELEASE=quincy py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"

View File

@ -69,9 +69,9 @@ setenv=
container: PURGE_PLAYBOOK = purge-container-cluster.yml
non_container: PLAYBOOK = site.yml.sample
CEPH_DOCKER_IMAGE_TAG = latest-master
CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-master
UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-master
CEPH_DOCKER_IMAGE_TAG = latest-quincy
CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-quincy
UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-quincy
deps= -r{toxinidir}/tests/requirements.txt
changedir=
@ -80,8 +80,6 @@ changedir=
commands=
ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
@ -92,8 +90,6 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -101,7 +97,7 @@ commands=
# test cluster state using ceph-ansible tests
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
shrink_osd_single: {[shrink-osd-single]commands}
shrink_osd_multiple: {[shrink-osd-multiple]commands}
@ -109,8 +105,6 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit osds --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \

View File

@ -28,9 +28,7 @@ setenv=
container: PLAYBOOK = site-container.yml.sample
non_container: PLAYBOOK = site.yml.sample
UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-master
UPDATE_CEPH_DEV_BRANCH = master
UPDATE_CEPH_DEV_SHA1 = latest
UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-quincy
ROLLING_UPDATE = True
deps= -r{toxinidir}/tests/requirements.txt
changedir={toxinidir}/tests/functional/subset_update{env:CONTAINER_DIR:}
@ -40,11 +38,8 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
non_container: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -54,8 +49,6 @@ commands=
# mon1
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit mon1 --tags=mons --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -63,8 +56,6 @@ commands=
# mon0 and mon2
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit 'mons:!mon1' --tags=mons --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -72,8 +63,6 @@ commands=
# upgrade mgrs
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=mgrs --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -81,8 +70,6 @@ commands=
# upgrade osd1
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit=osd1 --tags=osds --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -90,8 +77,6 @@ commands=
# upgrade remaining osds (serially)
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit='osds:!osd1' --tags=osds --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -99,8 +84,6 @@ commands=
# upgrade rgws
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=rgws --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -108,8 +91,6 @@ commands=
# post upgrade actions
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=post_upgrade --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \

View File

@ -28,35 +28,46 @@ setenv=
container: PLAYBOOK = site-container.yml.sample
non_container: PLAYBOOK = site.yml.sample
UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-master
UPDATE_CEPH_DEV_BRANCH = master
UPDATE_CEPH_DEV_SHA1 = latest
UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-quincy
ROLLING_UPDATE = True
deps= -r{toxinidir}/tests/requirements.txt
changedir={toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:}
commands=
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
# use the stable-6.0 branch to deploy an octopus cluster
git clone -b stable-6.0 --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
ansible-galaxy install -r {envdir}/tmp/ceph-ansible/requirements.yml -v
bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml'
# configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2'
bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml --extra-vars "osd_scenario=lvm" --limit "osds:!osd2"'
non_container: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
# deploy the cluster
# passing ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key because of a weird behavior in the CI:
# When rendering the ganesha.conf.j2 template, it complains because of undefined variables in the block "{% if nfs_obj_gw | bool %}" although we explicitly set this variable to false (see below).
bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-pacific} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
nfs_file_gw=True \
nfs_obj_gw=False \
ceph_nfs_rgw_access_key=fake_access_key \
ceph_nfs_rgw_secret_key=fake_secret_key \
"'
pip uninstall -y ansible
pip install -r {toxinidir}/tests/requirements.txt
ansible-galaxy install -r {toxinidir}/requirements.yml -v -f
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \

52
tox.ini
View File

@ -45,7 +45,7 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rbd_map_devices.yml --extra-vars "\
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-quincy} \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
@ -53,7 +53,7 @@ commands=
remove_packages=yes \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-quincy} \
"
# re-setup lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
@ -61,8 +61,6 @@ commands=
# set up the cluster again
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -76,13 +74,11 @@ commands=
ireallymeanit=yes \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-quincy} \
"
# set up the cluster again
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -103,10 +99,7 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
# set up the cluster again
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample}
# test that the cluster can be redeployed in a healthy state
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
@ -155,7 +148,7 @@ commands=
commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\
ireallymeanit=yes \
ceph_docker_image_tag=latest-master-devel \
ceph_docker_image_tag=latest-quincy-devel \
ceph_docker_registry=quay.ceph.io \
ceph_docker_image=ceph-ci/daemon \
ceph_docker_registry_auth=True \
@ -172,11 +165,7 @@ commands=
[add-mons]
commands=
ansible-playbook -vv -i {changedir}/hosts-2 --limit mon1 {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/hosts-2 {toxinidir}/infrastructure-playbooks/add-mon.yml --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
ansible-playbook -vv -i {changedir}/hosts-2 {toxinidir}/infrastructure-playbooks/add-mon.yml --extra-vars "ireallymeanit=yes"
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
[add-mgrs]
@ -184,8 +173,6 @@ commands=
ansible-playbook -vv -i {changedir}/hosts-2 --limit mgrs {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/hosts-2 --limit mgrs {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -197,8 +184,6 @@ commands=
ansible-playbook -vv -i {changedir}/hosts-2 --limit mdss {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/hosts-2 --limit mdss {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -210,8 +195,6 @@ commands=
ansible-playbook -vv -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -223,8 +206,6 @@ commands=
ansible-playbook -vv -i {changedir}/hosts-2 --limit rgws {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/hosts-2 --limit rgws {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -236,22 +217,18 @@ commands=
bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}"
bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}/secondary"
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir}/secondary ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir}/secondary" --tags "vagrant_setup"
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/lvm_setup.yml
# ensure the rule isn't already present
ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent'
ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=present'
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit rgws --extra-vars "\
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -267,7 +244,7 @@ commands=
[storage-inventory]
commands=
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/storage-inventory.yml --extra-vars "\
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-quincy} \
"
[cephadm-adopt]
@ -317,11 +294,11 @@ setenv=
shrink_rbdmirror: RBDMIRROR_TO_KILL = rbd-mirror0
shrink_rgw: RGW_TO_KILL = rgw0.rgw0
CEPH_DOCKER_IMAGE_TAG = latest-master
CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-master
UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-master
CEPH_DOCKER_IMAGE_TAG = latest-quincy
CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-quincy
UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-quincy
switch_to_containers: CEPH_DOCKER_IMAGE_TAG = latest-master-devel
switch_to_containers: CEPH_DOCKER_IMAGE_TAG = latest-quincy-devel
deps= -r{toxinidir}/tests/requirements.txt
changedir=
@ -355,7 +332,6 @@ changedir=
commands=
ansible-galaxy install -r {toxinidir}/requirements.yml -v
rhcs: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "change_dir={changedir}" --tags "vagrant_setup"
non_container: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
@ -370,8 +346,6 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
deploy_secondary_zones=False \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -387,7 +361,7 @@ commands=
all_daemons,collocation: py.test --reruns 20 --reruns-delay 3 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# handlers/idempotency test
all_daemons,all_in_one,collocation: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-master} ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --extra-vars @ceph-override.json
all_daemons,all_in_one,collocation: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-quincy}" --extra-vars @ceph-override.json
purge: {[purge]commands}
purge_dashboard: {[purge-dashboard]commands}