tests: use quay.io instead of quay.ceph.io

This makes the CI use quay.io instead of quay.ceph.io

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit e55ca01881)
pull/7372/head
Guillaume Abrioux 2022-12-06 13:14:07 +01:00 committed by Teoman ONAY
parent fc202a7598
commit b368ea1c33
38 changed files with 146 additions and 151 deletions

View File

@ -29,8 +29,8 @@ You can configure your own container register, image and tag by using the ``ceph
.. code-block:: yaml
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest
.. note::

View File

@ -8,8 +8,3 @@ ceph-ansible can deploy Ceph either in a non-containerized context (via packages
non-containerized
containerized
<<<<<<< HEAD
=======
The difference here is that you don't have the rbd command on the host when using the containerized deployment so everything related to ceph needs to be executed within a container. So in the case there is software like e.g. Open Nebula which requires that the rbd command is accessible directly on the host (non-containerized) then you have to install the rbd command by yourself on those servers outside of containers (or make sure that this software somehow runs within containers as well and that it can access rbd).
>>>>>>> 8b1474ab7... Docs: fix some typos

View File

@ -21,9 +21,9 @@ runs of ``ceph-ansible``.
The following environent variables are available for use:
* ``CEPH_DOCKER_REGISTRY``: (default: ``quay.ceph.io``) This would configure the ``ceph-ansible`` variable ``ceph_docker_registry``.
* ``CEPH_DOCKER_REGISTRY``: (default: ``quay.io``) This would configure the ``ceph-ansible`` variable ``ceph_docker_registry``.
* ``CEPH_DOCKER_IMAGE``: (default: ``ceph-ci/daemon``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image``.
* ``CEPH_DOCKER_IMAGE``: (default: ``ceph/daemon``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image``.
* ``CEPH_DOCKER_IMAGE_TAG``: (default: ``latest``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image_name``.

View File

@ -25,6 +25,6 @@ ceph_conf_overrides:
global:
osd_pool_default_size: 1
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -26,6 +26,6 @@ ceph_conf_overrides:
global:
osd_pool_default_size: 1
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -26,6 +26,6 @@ ceph_conf_overrides:
global:
osd_pool_default_size: 1
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -25,6 +25,6 @@ ceph_conf_overrides:
global:
osd_pool_default_size: 1
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -26,6 +26,6 @@ ceph_conf_overrides:
global:
osd_pool_default_size: 1
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -28,6 +28,6 @@ ceph_conf_overrides:
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -41,6 +41,6 @@ lvm_volumes:
data_vg: test_group
db: journal1
db_vg: journals
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -42,10 +42,10 @@ handler_health_osd_check_delay: 10
mds_max_mds: 2
dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:5.4.3"
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.io/ceph/ceph-grafana:5.4.3"

View File

@ -35,10 +35,10 @@ handler_health_osd_check_delay: 10
mds_max_mds: 2
dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:5.4.3"
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.io/ceph/ceph-grafana:5.4.3"

View File

@ -22,10 +22,10 @@ handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B!
dashboard_admin_user_ro: true
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:5.4.3"
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.io/ceph/ceph-grafana:5.4.3"

View File

@ -19,10 +19,10 @@ handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B!
dashboard_admin_user_ro: true
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:5.4.3"
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.io/ceph/ceph-grafana:5.4.3"

View File

@ -42,10 +42,10 @@ handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:5.4.3"
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.io/ceph/ceph-grafana:5.4.3"

View File

@ -37,6 +37,6 @@ lvm_volumes:
db_vg: journals
fsid: 40358a87-ab6e-4bdc-83db-1d909147861c
generate_fsid: false
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -22,6 +22,6 @@ ceph_conf_overrides:
dashboard_enabled: False
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -25,6 +25,6 @@ ceph_conf_overrides:
dashboard_enabled: False
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -27,6 +27,6 @@ ceph_conf_overrides:
dashboard_enabled: False
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -42,6 +42,6 @@ openstack_cinder_pool:
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -9,9 +9,9 @@ all:
rgw_keystone_url: 'http://192.168.95.10:5000', rgw_s3_auth_use_keystone: 'true', rgw_keystone_revocation_interval: 0}
ceph_mgr_docker_extra_env: '-e MGR_DASHBOARD=0'
cluster: mycluster
ceph_docker_image: ceph-ci/daemon
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus
ceph_docker_registry: quay.ceph.io
ceph_docker_registry: quay.io
cephfs_data_pool:
name: 'manila_data'
pg_num: "{{ osd_pool_default_pg_num }}"

View File

@ -41,10 +41,10 @@ handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:5.4.3"
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.io/ceph/ceph-grafana:5.4.3"

View File

@ -27,6 +27,6 @@ ceph_conf_overrides:
osd_pool_default_size: 1
mon_max_pg_per_osd: 512
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-main
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific

View File

@ -27,6 +27,6 @@ ceph_conf_overrides:
osd_pool_default_size: 1
mon_max_pg_per_osd: 512
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-main
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific

View File

@ -26,6 +26,6 @@ ceph_conf_overrides:
osd_pool_default_size: 1
mon_max_pg_per_osd: 512
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -26,6 +26,6 @@ ceph_conf_overrides:
osd_pool_default_size: 1
mon_max_pg_per_osd: 512
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -15,6 +15,6 @@ ceph_conf_overrides:
openstack_config: False
dashboard_enabled: False
copy_admin_key: True
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -14,6 +14,6 @@ ceph_conf_overrides:
osd_pool_default_size: 1
openstack_config: False
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -14,6 +14,6 @@ ceph_conf_overrides:
osd_pool_default_size: 1
openstack_config: False
dashboard_enabled: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -15,6 +15,6 @@ ceph_conf_overrides:
openstack_config: False
dashboard_enabled: False
copy_admin_key: True
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -14,6 +14,6 @@ ceph_conf_overrides:
openstack_config: False
dashboard_enabled: False
copy_admin_key: True
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -16,6 +16,6 @@ ceph_conf_overrides:
openstack_config: False
dashboard_enabled: False
copy_admin_key: True
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus

View File

@ -27,10 +27,10 @@ mds_max_mds: 2
dashboard_enabled: false
dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-nautilus
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4"
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.io/ceph/ceph-grafana:5.4.3"

View File

@ -19,9 +19,9 @@ mds_max_mds: 2
dashboard_enabled: false
dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4"
ceph_docker_registry: quay.io
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
grafana_server_group_name: ceph_monitoring

View File

@ -101,8 +101,8 @@ class TestCephKeyModule(object):
fake_cluster = "fake"
fake_args = ['arg']
fake_user = "fake-user"
fake_key = "/tmp/my-key"
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
fake_user_key = "/tmp/my-key"
fake_container_image = "quay.io/ceph/daemon:latest-nautilus"
expected_command_list = ['docker',
'run',
'--rm',
@ -111,7 +111,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph',
'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'quay.io/ceph/daemon:latest-nautilus',
'-n',
"fake-user",
'-k',
@ -121,7 +121,7 @@ class TestCephKeyModule(object):
'auth',
'arg']
result = ceph_key.generate_ceph_cmd(
fake_cluster, fake_args, fake_user, fake_key, fake_container_image)
fake_cluster, fake_args, fake_user, fake_user_key, fake_container_image)
assert result == expected_command_list
def test_generate_ceph_authtool_cmd_non_container_no_auid(self):
@ -165,7 +165,7 @@ class TestCephKeyModule(object):
fake_dest = "/fake/ceph"
fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
fake_container_image = "quay.io/ceph/daemon:latest-nautilus"
expected_command_list = ['docker',
'run',
'--rm',
@ -174,7 +174,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-authtool',
'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'quay.io/ceph/daemon:latest-nautilus',
'--create-keyring',
fake_file_destination,
'--name',
@ -233,7 +233,7 @@ class TestCephKeyModule(object):
fake_import_key = True
fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
fake_container_image = "quay.io/ceph/daemon:latest-nautilus"
expected_command_list = [
['docker',
'run',
@ -243,7 +243,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-authtool',
'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'quay.io/ceph/daemon:latest-nautilus',
'--create-keyring', fake_file_destination,
'--name', fake_name,
'--add-key', fake_secret,
@ -257,7 +257,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph',
'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'quay.io/ceph/daemon:latest-nautilus',
'-n', 'client.admin',
'-k', '/etc/ceph/fake.client.admin.keyring',
'--cluster', fake_cluster,
@ -320,8 +320,8 @@ class TestCephKeyModule(object):
fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
# create_key passes (one for ceph-authtool and one for itself) itw own array so the expected result is an array within an array # noqa E501
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
expected_command_list = [['docker',
fake_container_image = "quay.io/ceph/daemon:latest-nautilus"
expected_command_list = [['docker', # noqa E128
'run',
'--rm',
'--net=host',
@ -329,7 +329,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-authtool',
'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'quay.io/ceph/daemon:latest-nautilus',
'--create-keyring',
fake_file_destination,
'--name',
@ -363,7 +363,7 @@ class TestCephKeyModule(object):
fake_user_key = '/etc/ceph/fake.client.admin.keyring'
fake_cluster = "fake"
fake_name = "client.fake"
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
fake_container_image = "quay.io/ceph/daemon:latest-nautilus"
expected_command_list = [['docker',
'run',
'--rm',
@ -372,7 +372,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph',
'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'quay.io/ceph/daemon:latest-nautilus',
'-n', 'client.admin',
'-k', '/etc/ceph/fake.client.admin.keyring',
'--cluster', fake_cluster,
@ -402,7 +402,7 @@ class TestCephKeyModule(object):
fake_name = "client.fake"
fake_user = 'client.admin'
fake_user_key = '/etc/ceph/fake.client.admin.keyring'
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
fake_container_image = "quay.io/ceph/daemon:latest-nautilus"
expected_command_list = [['docker', # noqa E128
'run',
'--rm',
@ -411,7 +411,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph',
'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'quay.io/ceph/daemon:latest-nautilus',
'-n', fake_user,
'-k', fake_user_key,
'--cluster', fake_cluster,
@ -437,7 +437,7 @@ class TestCephKeyModule(object):
fake_user = 'client.admin'
fake_user_key = '/etc/ceph/fake.client.admin.keyring'
fake_name = "client.fake"
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
fake_container_image = "quay.io/ceph/daemon:latest-nautilus"
fake_dest = "/fake/ceph"
fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
@ -449,7 +449,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph',
'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'quay.io/ceph/daemon:latest-nautilus',
'-n', fake_user,
'-k', fake_user_key,
'--cluster', fake_cluster,
@ -494,7 +494,7 @@ class TestCephKeyModule(object):
fake_user = "mon."
fake_keyring_dirname = fake_cluster + "-" + fake_hostname
fake_key = os.path.join("/var/lib/ceph/mon/", fake_keyring_dirname, 'keyring')
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
fake_container_image = "quay.io/ceph/daemon:latest-nautilus"
expected_command_list = [['docker',
'run',
'--rm',
@ -503,7 +503,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph',
'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'quay.io/ceph/daemon:latest-nautilus',
'-n', "mon.",
'-k', "/var/lib/ceph/mon/fake-mon01/keyring",
'--cluster', fake_cluster,
@ -516,7 +516,7 @@ class TestCephKeyModule(object):
fake_cluster = "fake"
fake_user = "fake-user"
fake_key = "/tmp/my-key"
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-nautilus"
fake_container_image = "quay.io/ceph/daemon:latest-nautilus"
expected_command_list = [['docker',
'run',
'--rm',
@ -525,7 +525,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph',
'quay.ceph.io/ceph-ci/daemon:latest-nautilus',
'quay.io/ceph/daemon:latest-nautilus',
'-n', "fake-user",
'-k', "/tmp/my-key",
'--cluster', fake_cluster,

View File

@ -8,7 +8,7 @@ import ceph_osd_flag
fake_cluster = 'ceph'
fake_container_binary = 'podman'
fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
fake_container_image = 'quay.io/ceph/daemon:latest'
fake_flag = 'noup'
fake_user = 'client.admin'
fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user)

View File

@ -50,8 +50,8 @@ commands=
bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts-upgrade-to-nautilus {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_stable_release=luminous \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
"'

View File

@ -40,7 +40,7 @@ commands=
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_stable_release=luminous \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
"
@ -56,7 +56,7 @@ commands=
# migrate osds to ceph-volume and upgrade to nautilus
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\
ireallymeanit=yes \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} \
ceph_docker_image={env:UPDATE_CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag=latest-nautilus \
osd_scenario=lvm \
@ -132,8 +132,8 @@ commands=
commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/purge-dashboard.yml --extra-vars "\
ireallymeanit=yes \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
"