tests: migrate to quay.ceph.io registry

in order to avoid docker.io rate limiting

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 218aedaab6)
pull/5756/head
Guillaume Abrioux 2020-09-04 16:50:26 +02:00
parent 858e50da6b
commit 2001039c0e
37 changed files with 162 additions and 175 deletions

View File

@ -30,9 +30,9 @@ The following environent variables are available for use:
* ``UPDATE_CEPH_STABLE_RELEASE``: (default: ``kraken``) This would configure the ``ceph-ansible`` variable ``ceph_stable_relese`` during an ``update`` * ``UPDATE_CEPH_STABLE_RELEASE``: (default: ``kraken``) This would configure the ``ceph-ansible`` variable ``ceph_stable_relese`` during an ``update``
scenario. This is set automatically when using the ``jewel-*`` or ``kraken-*`` testing scenarios. scenario. This is set automatically when using the ``jewel-*`` or ``kraken-*`` testing scenarios.
* ``CEPH_DOCKER_REGISTRY``: (default: ``docker.io``) This would configure the ``ceph-ansible`` variable ``ceph_docker_registry``. * ``CEPH_DOCKER_REGISTRY``: (default: ``quay.ceph.io``) This would configure the ``ceph-ansible`` variable ``ceph_docker_registry``.
* ``CEPH_DOCKER_IMAGE``: (default: ``ceph/daemon``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image``. * ``CEPH_DOCKER_IMAGE``: (default: ``ceph-ci/daemon``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image``.
* ``CEPH_DOCKER_IMAGE_TAG``: (default: ``latest``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image_name``. * ``CEPH_DOCKER_IMAGE_TAG``: (default: ``latest``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image_name``.

View File

@ -25,3 +25,6 @@ ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -26,3 +26,6 @@ ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -26,3 +26,6 @@ ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -25,3 +25,6 @@ ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -26,3 +26,6 @@ ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -28,3 +28,6 @@ ceph_conf_overrides:
rgw_override_bucket_index_max_shards: 16 rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400 rgw_bucket_default_quota_max_objects: 1638400
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -41,3 +41,6 @@ lvm_volumes:
data_vg: test_group data_vg: test_group
db: journal1 db: journal1
db_vg: journals db_vg: journals
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -42,3 +42,9 @@ handler_health_osd_check_delay: 10
mds_max_mds: 2 mds_max_mds: 2
dashboard_admin_password: $sX!cD$rYU6qR^B! dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"

View File

@ -35,3 +35,9 @@ handler_health_osd_check_delay: 10
mds_max_mds: 2 mds_max_mds: 2
dashboard_admin_password: $sX!cD$rYU6qR^B! dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"

View File

@ -22,3 +22,9 @@ handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B! dashboard_admin_password: $sX!cD$rYU6qR^B!
dashboard_admin_user_ro: true dashboard_admin_user_ro: true
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"

View File

@ -19,3 +19,9 @@ handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B! dashboard_admin_password: $sX!cD$rYU6qR^B!
dashboard_admin_user_ro: true dashboard_admin_user_ro: true
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"

View File

@ -42,3 +42,6 @@ handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B! dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -37,3 +37,6 @@ lvm_volumes:
db_vg: journals db_vg: journals
fsid: 40358a87-ab6e-4bdc-83db-1d909147861c fsid: 40358a87-ab6e-4bdc-83db-1d909147861c
generate_fsid: false generate_fsid: false
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -22,3 +22,6 @@ ceph_conf_overrides:
dashboard_enabled: False dashboard_enabled: False
handler_health_mon_check_delay: 10 handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -25,3 +25,6 @@ ceph_conf_overrides:
dashboard_enabled: False dashboard_enabled: False
handler_health_mon_check_delay: 10 handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -27,3 +27,6 @@ ceph_conf_overrides:
dashboard_enabled: False dashboard_enabled: False
handler_health_mon_check_delay: 10 handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -42,3 +42,6 @@ openstack_cinder_pool:
openstack_pools: openstack_pools:
- "{{ openstack_glance_pool }}" - "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}" - "{{ openstack_cinder_pool }}"
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -9,9 +9,9 @@ all:
rgw_keystone_url: 'http://192.168.95.10:5000', rgw_s3_auth_use_keystone: 'true', rgw_keystone_revocation_interval: 0} rgw_keystone_url: 'http://192.168.95.10:5000', rgw_s3_auth_use_keystone: 'true', rgw_keystone_revocation_interval: 0}
ceph_mgr_docker_extra_env: '-e MGR_DASHBOARD=0' ceph_mgr_docker_extra_env: '-e MGR_DASHBOARD=0'
cluster: mycluster cluster: mycluster
ceph_docker_image: ceph/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: v3.0.3-stable-3.0-luminous-centos-7-x86_64 ceph_docker_image_tag: latest-nautilus
ceph_docker_registry: docker.io ceph_docker_registry: quay.ceph.io
cephfs_data_pool: cephfs_data_pool:
name: 'manila_data' name: 'manila_data'
pg_num: "{{ osd_pool_default_pg_num }}" pg_num: "{{ osd_pool_default_pg_num }}"

View File

@ -41,3 +41,6 @@ handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B! dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -25,3 +25,6 @@ ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -25,3 +25,6 @@ ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -15,3 +15,6 @@ ceph_conf_overrides:
openstack_config: False openstack_config: False
dashboard_enabled: False dashboard_enabled: False
copy_admin_key: True copy_admin_key: True
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -14,3 +14,6 @@ ceph_conf_overrides:
osd_pool_default_size: 1 osd_pool_default_size: 1
openstack_config: False openstack_config: False
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -14,3 +14,6 @@ ceph_conf_overrides:
osd_pool_default_size: 1 osd_pool_default_size: 1
openstack_config: False openstack_config: False
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -15,3 +15,6 @@ ceph_conf_overrides:
openstack_config: False openstack_config: False
dashboard_enabled: False dashboard_enabled: False
copy_admin_key: True copy_admin_key: True
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -14,3 +14,6 @@ ceph_conf_overrides:
openstack_config: False openstack_config: False
dashboard_enabled: False dashboard_enabled: False
copy_admin_key: True copy_admin_key: True
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -16,3 +16,6 @@ ceph_conf_overrides:
openstack_config: False openstack_config: False
dashboard_enabled: False dashboard_enabled: False
copy_admin_key: True copy_admin_key: True
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -75,7 +75,7 @@ class TestCephKeyModule(object):
fake_args = ['arg'] fake_args = ['arg']
fake_user = "fake-user" fake_user = "fake-user"
fake_key = "/tmp/my-key" fake_key = "/tmp/my-key"
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
expected_command_list = ['docker', expected_command_list = ['docker',
'run', 'run',
'--rm', '--rm',
@ -84,7 +84,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph', '--entrypoint=ceph',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'-n', '-n',
"fake-user", "fake-user",
'-k', '-k',
@ -138,7 +138,7 @@ class TestCephKeyModule(object):
fake_dest = "/fake/ceph" fake_dest = "/fake/ceph"
fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
expected_command_list = ['docker', expected_command_list = ['docker',
'run', 'run',
'--rm', '--rm',
@ -147,7 +147,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-authtool', '--entrypoint=ceph-authtool',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'--create-keyring', '--create-keyring',
fake_file_destination, fake_file_destination,
'--name', '--name',
@ -202,7 +202,7 @@ class TestCephKeyModule(object):
fake_import_key = True fake_import_key = True
fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
expected_command_list = [ expected_command_list = [
['docker', # noqa E128 ['docker', # noqa E128
'run', 'run',
@ -212,7 +212,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-authtool', '--entrypoint=ceph-authtool',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'--create-keyring', fake_file_destination, '--create-keyring', fake_file_destination,
'--name', fake_name, '--name', fake_name,
'--add-key', fake_secret, '--add-key', fake_secret,
@ -226,7 +226,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph', '--entrypoint=ceph',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'-n', 'client.admin', '-n', 'client.admin',
'-k', '/etc/ceph/fake.client.admin.keyring', '-k', '/etc/ceph/fake.client.admin.keyring',
'--cluster', fake_cluster, '--cluster', fake_cluster,
@ -286,7 +286,7 @@ class TestCephKeyModule(object):
fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
# create_key passes (one for ceph-authtool and one for itself) itw own array so the expected result is an array within an array # noqa E501 # create_key passes (one for ceph-authtool and one for itself) itw own array so the expected result is an array within an array # noqa E501
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
expected_command_list = [['docker', # noqa E128 expected_command_list = [['docker', # noqa E128
'run', 'run',
'--rm', '--rm',
@ -295,7 +295,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-authtool', '--entrypoint=ceph-authtool',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'--create-keyring', '--create-keyring',
fake_file_destination, fake_file_destination,
'--name', '--name',
@ -325,7 +325,7 @@ class TestCephKeyModule(object):
def test_delete_key_container(self): def test_delete_key_container(self):
fake_cluster = "fake" fake_cluster = "fake"
fake_name = "client.fake" fake_name = "client.fake"
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
expected_command_list = [['docker', # noqa E128 expected_command_list = [['docker', # noqa E128
'run', 'run',
'--rm', '--rm',
@ -334,7 +334,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph', '--entrypoint=ceph',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'-n', 'client.admin', '-n', 'client.admin',
'-k', '/etc/ceph/fake.client.admin.keyring', '-k', '/etc/ceph/fake.client.admin.keyring',
'--cluster', fake_cluster, '--cluster', fake_cluster,
@ -364,7 +364,7 @@ class TestCephKeyModule(object):
fake_user = "fake-user" fake_user = "fake-user"
fake_key = "/tmp/my-key" fake_key = "/tmp/my-key"
fake_output_format = "json" fake_output_format = "json"
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
expected_command_list = [['docker', # noqa E128 expected_command_list = [['docker', # noqa E128
'run', 'run',
'--rm', '--rm',
@ -373,7 +373,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph', '--entrypoint=ceph',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'-n', "fake-user", '-n', "fake-user",
'-k', "/tmp/my-key", '-k', "/tmp/my-key",
'--cluster', fake_cluster, '--cluster', fake_cluster,
@ -398,7 +398,7 @@ class TestCephKeyModule(object):
def test_get_key_container(self): def test_get_key_container(self):
fake_cluster = "fake" fake_cluster = "fake"
fake_name = "client.fake" fake_name = "client.fake"
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
fake_dest = "/fake/ceph" fake_dest = "/fake/ceph"
fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
@ -410,7 +410,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph', '--entrypoint=ceph',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'-n', "client.admin", '-n', "client.admin",
'-k', "/etc/ceph/fake.client.admin.keyring", # noqa E501 '-k', "/etc/ceph/fake.client.admin.keyring", # noqa E501
'--cluster', fake_cluster, '--cluster', fake_cluster,
@ -454,7 +454,7 @@ class TestCephKeyModule(object):
fake_user = "mon." fake_user = "mon."
fake_keyring_dirname = fake_cluster + "-" + fake_hostname fake_keyring_dirname = fake_cluster + "-" + fake_hostname
fake_key = os.path.join("/var/lib/ceph/mon/", fake_keyring_dirname, 'keyring') # noqa E501 fake_key = os.path.join("/var/lib/ceph/mon/", fake_keyring_dirname, 'keyring') # noqa E501
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
expected_command_list = [['docker', # noqa E128 expected_command_list = [['docker', # noqa E128
'run', 'run',
'--rm', '--rm',
@ -463,7 +463,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph', '--entrypoint=ceph',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'-n', "mon.", '-n', "mon.",
'-k', "/var/lib/ceph/mon/fake-mon01/keyring", # noqa E501 '-k', "/var/lib/ceph/mon/fake-mon01/keyring", # noqa E501
'--cluster', fake_cluster, '--cluster', fake_cluster,
@ -477,7 +477,7 @@ class TestCephKeyModule(object):
fake_cluster = "fake" fake_cluster = "fake"
fake_user = "fake-user" fake_user = "fake-user"
fake_key = "/tmp/my-key" fake_key = "/tmp/my-key"
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
expected_command_list = [['docker', # noqa E128 expected_command_list = [['docker', # noqa E128
'run', 'run',
'--rm', '--rm',
@ -486,7 +486,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph', '--entrypoint=ceph',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'-n', "fake-user", '-n', "fake-user",
'-k', "/tmp/my-key", '-k', "/tmp/my-key",
'--cluster', fake_cluster, '--cluster', fake_cluster,

View File

@ -53,7 +53,7 @@ class TestCephVolumeModule(object):
def test_container_exec(self): def test_container_exec(self):
fake_binary = "ceph-volume" fake_binary = "ceph-volume"
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501 expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096', '--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z', '-v', '/run/lock/lvm:/run/lock/lvm:z',
@ -63,14 +63,14 @@ class TestCephVolumeModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume', '--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous'] 'quay.ceph.io/ceph-ci/daemon:latest-nautilus']
result = ceph_volume.container_exec(fake_binary, fake_container_image) result = ceph_volume.container_exec(fake_binary, fake_container_image)
assert result == expected_command_list assert result == expected_command_list
def test_zap_osd_container(self): def test_zap_osd_container(self):
fake_module = MagicMock() fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda'} fake_module.params = {'data': '/dev/sda'}
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501 expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096', '--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z', '-v', '/run/lock/lvm:/run/lock/lvm:z',
@ -80,7 +80,7 @@ class TestCephVolumeModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume', '--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'--cluster', '--cluster',
'ceph', 'ceph',
'lvm', 'lvm',
@ -147,7 +147,7 @@ class TestCephVolumeModule(object):
def test_list_osd_container(self): def test_list_osd_container(self):
fake_module = MagicMock() fake_module = MagicMock()
fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'} fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'}
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501 expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096', '--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z', '-v', '/run/lock/lvm:/run/lock/lvm:z',
@ -157,7 +157,7 @@ class TestCephVolumeModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume', '--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'--cluster', '--cluster',
'ceph', 'ceph',
'lvm', 'lvm',
@ -182,7 +182,7 @@ class TestCephVolumeModule(object):
def test_list_storage_inventory_container(self): def test_list_storage_inventory_container(self):
fake_module = MagicMock() fake_module = MagicMock()
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501 expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096', '--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z', '-v', '/run/lock/lvm:/run/lock/lvm:z',
@ -192,7 +192,7 @@ class TestCephVolumeModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume', '--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'--cluster', '--cluster',
'ceph', 'ceph',
'inventory', 'inventory',
@ -208,7 +208,7 @@ class TestCephVolumeModule(object):
'cluster': 'ceph', } 'cluster': 'ceph', }
fake_action = "create" fake_action = "create"
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501 expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096', '--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z', '-v', '/run/lock/lvm:/run/lock/lvm:z',
@ -218,7 +218,7 @@ class TestCephVolumeModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume', '--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'--cluster', '--cluster',
'ceph', 'ceph',
'lvm', 'lvm',
@ -257,7 +257,7 @@ class TestCephVolumeModule(object):
'cluster': 'ceph', } 'cluster': 'ceph', }
fake_action = "prepare" fake_action = "prepare"
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501 expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096', '--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z', '-v', '/run/lock/lvm:/run/lock/lvm:z',
@ -267,7 +267,7 @@ class TestCephVolumeModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume', '--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'--cluster', '--cluster',
'ceph', 'ceph',
'lvm', 'lvm',
@ -307,7 +307,7 @@ class TestCephVolumeModule(object):
'cluster': 'ceph', 'cluster': 'ceph',
'batch_devices': ["/dev/sda", "/dev/sdb"]} 'batch_devices': ["/dev/sda", "/dev/sdb"]}
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501 expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096', '--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z', '-v', '/run/lock/lvm:/run/lock/lvm:z',
@ -317,7 +317,7 @@ class TestCephVolumeModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume', '--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'--cluster', '--cluster',
'ceph', 'ceph',
'lvm', 'lvm',

View File

@ -39,9 +39,6 @@ commands=
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/site-container.yml.sample --extra-vars "\ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/site-container.yml.sample --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -50,9 +47,6 @@ commands=
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/docker-to-podman.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/docker-to-podman.yml --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
" "
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests

View File

@ -42,9 +42,6 @@ commands=
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release=nautilus \ ceph_stable_release=nautilus \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image=ceph/daemon \
ceph_docker_image_tag=latest-nautilus \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -59,9 +56,6 @@ commands=
external_cluster_mon_ips=192.168.31.10,192.168.31.11,192.168.31.12 \ external_cluster_mon_ips=192.168.31.10,192.168.31.11,192.168.31.12 \
generate_fsid=false \ generate_fsid=false \
ceph_stable_release=nautilus \ ceph_stable_release=nautilus \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image=ceph/daemon \
ceph_docker_image_tag=latest-nautilus \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \

View File

@ -51,9 +51,6 @@ commands=
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -62,9 +59,6 @@ commands=
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
" "
bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:nautilus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests" bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:nautilus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"

View File

@ -45,9 +45,6 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
container_binary=podman \ container_binary=podman \
container_package_name=podman \ container_package_name=podman \

View File

@ -7,58 +7,34 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
osd_to_kill=0 \ osd_to_kill=0 \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
" "
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
osd_to_kill=1 \ osd_to_kill=1 \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
" "
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
osd_to_kill=2 \ osd_to_kill=2 \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
" "
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
osd_to_kill=3 \ osd_to_kill=3 \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
" "
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
osd_to_kill=4 \ osd_to_kill=4 \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
" "
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
osd_to_kill=5 \ osd_to_kill=5 \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
" "
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
osd_to_kill=6 \ osd_to_kill=6 \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
" "
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
osd_to_kill=7 \ osd_to_kill=7 \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
" "
[shrink-osd-multiple] [shrink-osd-multiple]
@ -66,9 +42,6 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
osd_to_kill=0,1,2,3,4,5,6,7 \ osd_to_kill=0,1,2,3,4,5,6,7 \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
" "
[testenv] [testenv]
@ -121,9 +94,6 @@ commands=
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -143,9 +113,6 @@ commands=
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \

View File

@ -54,8 +54,8 @@ commands=
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
"' "'
@ -64,9 +64,6 @@ commands=
ireallymeanit=yes \ ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:UPDATE_CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:UPDATE_CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:UPDATE_CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:UPDATE_CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \

58
tox.ini
View File

@ -44,7 +44,7 @@ commands=
ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:luminous} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
" "
# wait 30sec for services to be ready # wait 30sec for services to be ready
@ -61,7 +61,7 @@ commands=
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:UPDATE_CEPH_DOCKER_IMAGE:ceph/daemon} \ ceph_docker_image={env:UPDATE_CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:UPDATE_CEPH_DOCKER_IMAGE_TAG:latest-master} \ ceph_docker_image_tag={env:UPDATE_CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
ceph_stable_release=nautilus \ ceph_stable_release=nautilus \
osd_scenario=lvm \ osd_scenario=lvm \
" "
@ -113,20 +113,13 @@ commands=
# can be redployed to. # can be redployed to.
[purge] [purge]
commands= commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rbd_map_devices.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rbd_map_devices.yml
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
remove_packages=yes \ remove_packages=yes \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
" "
# re-setup lvm # re-setup lvm
@ -136,9 +129,6 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -153,9 +143,6 @@ commands=
remove_packages=yes \ remove_packages=yes \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -167,9 +154,6 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
" "
# test that the cluster can be redeployed in a healthy state # test that the cluster can be redeployed in a healthy state
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
@ -185,9 +169,6 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
osd_to_kill={env:OSD_TO_KILL:0} \ osd_to_kill={env:OSD_TO_KILL:0} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
" "
[shrink-mgr] [shrink-mgr]
@ -223,9 +204,6 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -240,9 +218,6 @@ commands=
ireallymeanit=yes \ ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
" "
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
@ -254,9 +229,6 @@ commands=
ireallymeanit=yes \ ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -270,9 +242,6 @@ commands=
ireallymeanit=yes \ ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -286,9 +255,6 @@ commands=
ireallymeanit=yes \ ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -302,9 +268,6 @@ commands=
ireallymeanit=yes \ ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -318,9 +281,6 @@ commands=
ireallymeanit=yes \ ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -340,15 +300,12 @@ commands=
ireallymeanit=yes \ ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/secondary/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/secondary/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
" "
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rgw_multisite.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest}" ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rgw_multisite.yml
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/rgw_multisite.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest}" ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/rgw_multisite.yml
bash -c "cd {changedir}/secondary && vagrant destroy --force" bash -c "cd {changedir}/secondary && vagrant destroy --force"
# clean rule after the scenario is complete # clean rule after the scenario is complete
ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent' ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent'
@ -447,9 +404,6 @@ commands=
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -469,7 +423,7 @@ commands=
all_daemons: py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests all_daemons: py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# handlers/idempotency test # handlers/idempotency test
all_daemons,all_in_one: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-nautilus} " --extra-vars @ceph-override.json all_daemons,all_in_one: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-nautilus}" --extra-vars @ceph-override.json
purge: {[purge]commands} purge: {[purge]commands}
switch_to_containers: {[switch-to-containers]commands} switch_to_containers: {[switch-to-containers]commands}