tests: use quay.io instead of quay.ceph.io

This makes the CI use quay.io instead of quay.ceph.io

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit e55ca01881)
pull/7368/head
Guillaume Abrioux 2022-12-06 13:14:07 +01:00 committed by Guillaume Abrioux
parent fb6098fa7c
commit d464f82bad
56 changed files with 182 additions and 182 deletions

View File

@ -29,8 +29,8 @@ You can configure your own container register, image and tag by using the ``ceph
.. code-block:: yaml .. code-block:: yaml
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest ceph_docker_image_tag: latest
.. note:: .. note::

View File

@ -21,9 +21,9 @@ runs of ``ceph-ansible``.
The following environent variables are available for use: The following environent variables are available for use:
* ``CEPH_DOCKER_REGISTRY``: (default: ``quay.ceph.io``) This would configure the ``ceph-ansible`` variable ``ceph_docker_registry``. * ``CEPH_DOCKER_REGISTRY``: (default: ``quay.io``) This would configure the ``ceph-ansible`` variable ``ceph_docker_registry``.
* ``CEPH_DOCKER_IMAGE``: (default: ``ceph-ci/daemon``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image``. * ``CEPH_DOCKER_IMAGE``: (default: ``ceph/daemon``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image``.
* ``CEPH_DOCKER_IMAGE_TAG``: (default: ``latest``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image_name``. * ``CEPH_DOCKER_IMAGE_TAG``: (default: ``latest``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image_name``.

View File

@ -84,7 +84,7 @@ EXAMPLES = '''
cephadm_adopt: cephadm_adopt:
name: mon.foo name: mon.foo
style: legacy style: legacy
image: quay.ceph.io/ceph/daemon-base:latest-master-devel image: quay.io/ceph/daemon-base:latest-main-devel
pull: false pull: false
firewalld: false firewalld: false
@ -93,7 +93,7 @@ EXAMPLES = '''
name: mon.foo name: mon.foo
style: legacy style: legacy
environment: environment:
CEPHADM_IMAGE: quay.ceph.io/ceph/daemon-base:latest-master-devel CEPHADM_IMAGE: quay.io/ceph/daemon-base:latest-main-devel
''' '''
RETURN = '''# ''' RETURN = '''# '''

View File

@ -124,7 +124,7 @@ EXAMPLES = '''
cephadm_bootstrap: cephadm_bootstrap:
mon_ip: 192.168.42.1 mon_ip: 192.168.42.1
fsid: 3c9ba63a-c7df-4476-a1e7-317dfc711f82 fsid: 3c9ba63a-c7df-4476-a1e7-317dfc711f82
image: quay.ceph.io/ceph/daemon-base:latest-master-devel image: quay.io/ceph/daemon-base:latest-main-devel
dashboard: false dashboard: false
monitoring: false monitoring: false
firewalld: false firewalld: false
@ -133,7 +133,7 @@ EXAMPLES = '''
cephadm_bootstrap: cephadm_bootstrap:
mon_ip: 192.168.42.1 mon_ip: 192.168.42.1
environment: environment:
CEPHADM_IMAGE: quay.ceph.io/ceph/daemon-base:latest-master-devel CEPHADM_IMAGE: quay.io/ceph/daemon-base:latest-main-devel
''' '''
RETURN = '''# ''' RETURN = '''# '''

View File

@ -27,6 +27,6 @@ ceph_conf_overrides:
mon_warn_on_pool_no_redundancy: false mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1 osd_pool_default_size: 1
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -27,6 +27,6 @@ ceph_conf_overrides:
mon_warn_on_pool_no_redundancy: false mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1 osd_pool_default_size: 1
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -27,6 +27,6 @@ ceph_conf_overrides:
mon_warn_on_pool_no_redundancy: false mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1 osd_pool_default_size: 1
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -27,6 +27,6 @@ ceph_conf_overrides:
mon_warn_on_pool_no_redundancy: false mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1 osd_pool_default_size: 1
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -27,6 +27,6 @@ ceph_conf_overrides:
mon_warn_on_pool_no_redundancy: false mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1 osd_pool_default_size: 1
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -29,6 +29,6 @@ ceph_conf_overrides:
rgw_override_bucket_index_max_shards: 16 rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400 rgw_bucket_default_quota_max_objects: 1638400
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -43,6 +43,6 @@ lvm_volumes:
data_vg: test_group data_vg: test_group
db: journal1 db: journal1
db_vg: journals db_vg: journals
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -35,10 +35,10 @@ handler_health_osd_check_delay: 10
mds_max_mds: 2 mds_max_mds: 2
dashboard_admin_password: $sX!cD$rYU6qR^B! dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0" node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4" grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"

View File

@ -29,9 +29,9 @@ handler_health_osd_check_delay: 10
mds_max_mds: 2 mds_max_mds: 2
dashboard_admin_password: $sX!cD$rYU6qR^B! dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0" node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4" grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
grafana_server_group_name: ceph_monitoring grafana_server_group_name: ceph_monitoring

View File

@ -3,7 +3,7 @@ monitor_interface: eth1
public_network: "192.168.30.0/24" public_network: "192.168.30.0/24"
cluster_network: "192.168.31.0/24" cluster_network: "192.168.31.0/24"
dashboard_admin_password: $sX!cD$rYU6qR^B! dashboard_admin_password: $sX!cD$rYU6qR^B!
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon-base ceph_docker_image: ceph/daemon-base
ceph_docker_image_tag: latest-pacific-devel ceph_docker_image_tag: latest-pacific-devel
containerized_deployment: true containerized_deployment: true

View File

@ -24,10 +24,10 @@ handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B! dashboard_admin_password: $sX!cD$rYU6qR^B!
dashboard_admin_user_ro: true dashboard_admin_user_ro: true
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0" node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4" grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"

View File

@ -21,8 +21,8 @@ handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B! dashboard_admin_password: $sX!cD$rYU6qR^B!
dashboard_admin_user_ro: true dashboard_admin_user_ro: true
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0" node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4" grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"

View File

@ -33,10 +33,10 @@ handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B! dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0" node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4" grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"

View File

@ -37,6 +37,6 @@ lvm_volumes:
db_vg: journals db_vg: journals
fsid: 40358a87-ab6e-4bdc-83db-1d909147861c fsid: 40358a87-ab6e-4bdc-83db-1d909147861c
generate_fsid: false generate_fsid: false
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -23,6 +23,6 @@ ceph_conf_overrides:
dashboard_enabled: False dashboard_enabled: False
handler_health_mon_check_delay: 10 handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -27,6 +27,6 @@ ceph_conf_overrides:
dashboard_enabled: False dashboard_enabled: False
handler_health_mon_check_delay: 10 handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -29,6 +29,6 @@ ceph_conf_overrides:
dashboard_enabled: False dashboard_enabled: False
handler_health_mon_check_delay: 10 handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -37,6 +37,6 @@ openstack_cinder_pool:
openstack_pools: openstack_pools:
- "{{ openstack_glance_pool }}" - "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}" - "{{ openstack_cinder_pool }}"
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -10,9 +10,9 @@ all:
rgw_keystone_admin_user: swift, rgw_keystone_api_version: 3, rgw_keystone_implicit_tenants: 'true', rgw_keystone_admin_user: swift, rgw_keystone_api_version: 3, rgw_keystone_implicit_tenants: 'true',
rgw_keystone_url: 'http://192.168.95.10:5000', rgw_s3_auth_use_keystone: 'true', rgw_keystone_revocation_interval: 0} rgw_keystone_url: 'http://192.168.95.10:5000', rgw_s3_auth_use_keystone: 'true', rgw_keystone_revocation_interval: 0}
cluster: mycluster cluster: mycluster
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
cephfs_data_pool: cephfs_data_pool:
name: 'manila_data' name: 'manila_data'
application: "cephfs" application: "cephfs"

View File

@ -32,10 +32,10 @@ handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B! dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0" node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4" grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"

View File

@ -27,6 +27,6 @@ ceph_conf_overrides:
osd_pool_default_size: 1 osd_pool_default_size: 1
mon_max_pg_per_osd: 512 mon_max_pg_per_osd: 512
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-main ceph_docker_image_tag: latest-pacific

View File

@ -27,6 +27,6 @@ ceph_conf_overrides:
osd_pool_default_size: 1 osd_pool_default_size: 1
mon_max_pg_per_osd: 512 mon_max_pg_per_osd: 512
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-main ceph_docker_image_tag: latest-pacific

View File

@ -28,6 +28,6 @@ ceph_conf_overrides:
osd_pool_default_size: 1 osd_pool_default_size: 1
mon_max_pg_per_osd: 512 mon_max_pg_per_osd: 512
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -28,6 +28,6 @@ ceph_conf_overrides:
osd_pool_default_size: 1 osd_pool_default_size: 1
mon_max_pg_per_osd: 512 mon_max_pg_per_osd: 512
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -16,6 +16,6 @@ ceph_conf_overrides:
openstack_config: False openstack_config: False
dashboard_enabled: False dashboard_enabled: False
copy_admin_key: True copy_admin_key: True
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -15,6 +15,6 @@ ceph_conf_overrides:
osd_pool_default_size: 1 osd_pool_default_size: 1
openstack_config: False openstack_config: False
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -15,6 +15,6 @@ ceph_conf_overrides:
osd_pool_default_size: 1 osd_pool_default_size: 1
openstack_config: False openstack_config: False
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -16,6 +16,6 @@ ceph_conf_overrides:
openstack_config: False openstack_config: False
dashboard_enabled: False dashboard_enabled: False
copy_admin_key: True copy_admin_key: True
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -15,6 +15,6 @@ ceph_conf_overrides:
openstack_config: False openstack_config: False
dashboard_enabled: False dashboard_enabled: False
copy_admin_key: True copy_admin_key: True
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -17,6 +17,6 @@ ceph_conf_overrides:
openstack_config: False openstack_config: False
dashboard_enabled: False dashboard_enabled: False
copy_admin_key: True copy_admin_key: True
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific

View File

@ -27,10 +27,10 @@ mds_max_mds: 2
dashboard_enabled: false dashboard_enabled: false
dashboard_admin_password: $sX!cD$rYU6qR^B! dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph/daemon
ceph_docker_image_tag: latest-pacific ceph_docker_image_tag: latest-pacific
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0" node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4" grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"

View File

@ -19,9 +19,9 @@ mds_max_mds: 2
dashboard_enabled: false dashboard_enabled: false
dashboard_admin_password: $sX!cD$rYU6qR^B! dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.io
node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0" node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4" grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
grafana_server_group_name: ceph_monitoring grafana_server_group_name: ceph_monitoring

View File

@ -6,7 +6,7 @@ import ceph_crush_rule
fake_cluster = 'ceph' fake_cluster = 'ceph'
fake_container_binary = 'podman' fake_container_binary = 'podman'
fake_container_image = 'quay.ceph.io/ceph/daemon:latest' fake_container_image = 'quay.io/ceph/daemon:latest'
fake_name = 'foo' fake_name = 'foo'
fake_bucket_root = 'default' fake_bucket_root = 'default'
fake_bucket_type = 'host' fake_bucket_type = 'host'

View File

@ -70,8 +70,8 @@ class TestCephKeyModule(object):
fake_cluster = "fake" fake_cluster = "fake"
fake_args = ['arg'] fake_args = ['arg']
fake_user = "fake-user" fake_user = "fake-user"
fake_key = "/tmp/my-key" fake_user_key = "/tmp/my-key"
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous" fake_container_image = "quay.io/ceph/daemon:latest-luminous"
expected_command_list = ['docker', expected_command_list = ['docker',
'run', 'run',
'--rm', '--rm',
@ -80,7 +80,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph', '--entrypoint=ceph',
'quay.ceph.io/ceph-ci/daemon:latest-luminous', 'quay.io/ceph/daemon:latest-luminous',
'-n', '-n',
"fake-user", "fake-user",
'-k', '-k',
@ -90,7 +90,7 @@ class TestCephKeyModule(object):
'auth', 'auth',
'arg'] 'arg']
result = ceph_key.generate_ceph_cmd( result = ceph_key.generate_ceph_cmd(
fake_cluster, fake_args, fake_user, fake_key, fake_container_image) fake_cluster, fake_args, fake_user, fake_user_key, fake_container_image)
assert result == expected_command_list assert result == expected_command_list
def test_generate_ceph_authtool_cmd_non_container_no_auid(self): def test_generate_ceph_authtool_cmd_non_container_no_auid(self):
@ -134,7 +134,7 @@ class TestCephKeyModule(object):
fake_dest = "/fake/ceph" fake_dest = "/fake/ceph"
fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous" fake_container_image = "quay.io/ceph/daemon:latest-luminous"
expected_command_list = ['docker', expected_command_list = ['docker',
'run', 'run',
'--rm', '--rm',
@ -143,7 +143,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-authtool', '--entrypoint=ceph-authtool',
'quay.ceph.io/ceph-ci/daemon:latest-luminous', 'quay.io/ceph/daemon:latest-luminous',
'--create-keyring', '--create-keyring',
fake_file_destination, fake_file_destination,
'--name', '--name',
@ -202,7 +202,7 @@ class TestCephKeyModule(object):
fake_import_key = True fake_import_key = True
fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous" fake_container_image = "quay.io/ceph/daemon:latest-luminous"
expected_command_list = [ expected_command_list = [
['docker', ['docker',
'run', 'run',
@ -212,7 +212,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-authtool', '--entrypoint=ceph-authtool',
'quay.ceph.io/ceph-ci/daemon:latest-luminous', 'quay.io/ceph/daemon:latest-luminous',
'--create-keyring', fake_file_destination, '--create-keyring', fake_file_destination,
'--name', fake_name, '--name', fake_name,
'--add-key', fake_secret, '--add-key', fake_secret,
@ -226,7 +226,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph', '--entrypoint=ceph',
'quay.ceph.io/ceph-ci/daemon:latest-luminous', 'quay.io/ceph/daemon:latest-luminous',
'-n', 'client.admin', '-n', 'client.admin',
'-k', '/etc/ceph/fake.client.admin.keyring', '-k', '/etc/ceph/fake.client.admin.keyring',
'--cluster', fake_cluster, '--cluster', fake_cluster,
@ -289,7 +289,7 @@ class TestCephKeyModule(object):
fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
# create_key passes (one for ceph-authtool and one for itself) itw own array so the expected result is an array within an array # noqa E501 # create_key passes (one for ceph-authtool and one for itself) itw own array so the expected result is an array within an array # noqa E501
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous" fake_container_image = "quay.io/ceph/daemon:latest-luminous"
expected_command_list = [['docker', # noqa E128 expected_command_list = [['docker', # noqa E128
'run', 'run',
'--rm', '--rm',
@ -298,7 +298,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-authtool', '--entrypoint=ceph-authtool',
'quay.ceph.io/ceph-ci/daemon:latest-luminous', 'quay.io/ceph/daemon:latest-luminous',
'--create-keyring', '--create-keyring',
fake_file_destination, fake_file_destination,
'--name', '--name',
@ -332,7 +332,7 @@ class TestCephKeyModule(object):
fake_user_key = '/etc/ceph/fake.client.admin.keyring' fake_user_key = '/etc/ceph/fake.client.admin.keyring'
fake_cluster = "fake" fake_cluster = "fake"
fake_name = "client.fake" fake_name = "client.fake"
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous" fake_container_image = "quay.io/ceph/daemon:latest-luminous"
expected_command_list = [['docker', expected_command_list = [['docker',
'run', 'run',
'--rm', '--rm',
@ -341,7 +341,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph', '--entrypoint=ceph',
'quay.ceph.io/ceph-ci/daemon:latest-luminous', 'quay.io/ceph/daemon:latest-luminous',
'-n', 'client.admin', '-n', 'client.admin',
'-k', '/etc/ceph/fake.client.admin.keyring', '-k', '/etc/ceph/fake.client.admin.keyring',
'--cluster', fake_cluster, '--cluster', fake_cluster,
@ -371,7 +371,7 @@ class TestCephKeyModule(object):
fake_name = "client.fake" fake_name = "client.fake"
fake_user = 'client.admin' fake_user = 'client.admin'
fake_user_key = '/etc/ceph/fake.client.admin.keyring' fake_user_key = '/etc/ceph/fake.client.admin.keyring'
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous" fake_container_image = "quay.io/ceph/daemon:latest-luminous"
expected_command_list = [['docker', # noqa E128 expected_command_list = [['docker', # noqa E128
'run', 'run',
'--rm', '--rm',
@ -380,7 +380,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph', '--entrypoint=ceph',
'quay.ceph.io/ceph-ci/daemon:latest-luminous', 'quay.io/ceph/daemon:latest-luminous',
'-n', fake_user, '-n', fake_user,
'-k', fake_user_key, '-k', fake_user_key,
'--cluster', fake_cluster, '--cluster', fake_cluster,
@ -406,7 +406,7 @@ class TestCephKeyModule(object):
fake_user = 'client.admin' fake_user = 'client.admin'
fake_user_key = '/etc/ceph/fake.client.admin.keyring' fake_user_key = '/etc/ceph/fake.client.admin.keyring'
fake_name = "client.fake" fake_name = "client.fake"
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous" fake_container_image = "quay.io/ceph/daemon:latest-luminous"
fake_dest = "/fake/ceph" fake_dest = "/fake/ceph"
fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
@ -418,7 +418,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph', '--entrypoint=ceph',
'quay.ceph.io/ceph-ci/daemon:latest-luminous', 'quay.io/ceph/daemon:latest-luminous',
'-n', fake_user, '-n', fake_user,
'-k', fake_user_key, '-k', fake_user_key,
'--cluster', fake_cluster, '--cluster', fake_cluster,
@ -463,7 +463,7 @@ class TestCephKeyModule(object):
fake_user = "mon." fake_user = "mon."
fake_keyring_dirname = fake_cluster + "-" + fake_hostname fake_keyring_dirname = fake_cluster + "-" + fake_hostname
fake_key = os.path.join("/var/lib/ceph/mon/", fake_keyring_dirname, 'keyring') fake_key = os.path.join("/var/lib/ceph/mon/", fake_keyring_dirname, 'keyring')
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous" fake_container_image = "quay.io/ceph/daemon:latest-luminous"
expected_command_list = [['docker', expected_command_list = [['docker',
'run', 'run',
'--rm', '--rm',
@ -472,7 +472,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph', '--entrypoint=ceph',
'quay.ceph.io/ceph-ci/daemon:latest-luminous', 'quay.io/ceph/daemon:latest-luminous',
'-n', "mon.", '-n', "mon.",
'-k', "/var/lib/ceph/mon/fake-mon01/keyring", '-k', "/var/lib/ceph/mon/fake-mon01/keyring",
'--cluster', fake_cluster, '--cluster', fake_cluster,
@ -485,7 +485,7 @@ class TestCephKeyModule(object):
fake_cluster = "fake" fake_cluster = "fake"
fake_user = "fake-user" fake_user = "fake-user"
fake_key = "/tmp/my-key" fake_key = "/tmp/my-key"
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous" fake_container_image = "quay.io/ceph/daemon:latest-luminous"
expected_command_list = [['docker', expected_command_list = [['docker',
'run', 'run',
'--rm', '--rm',
@ -494,7 +494,7 @@ class TestCephKeyModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph', '--entrypoint=ceph',
'quay.ceph.io/ceph-ci/daemon:latest-luminous', 'quay.io/ceph/daemon:latest-luminous',
'-n', "fake-user", '-n', "fake-user",
'-k', "/tmp/my-key", '-k', "/tmp/my-key",
'--cluster', fake_cluster, '--cluster', fake_cluster,

View File

@ -6,7 +6,7 @@ import ceph_mgr_module
fake_cluster = 'ceph' fake_cluster = 'ceph'
fake_container_binary = 'podman' fake_container_binary = 'podman'
fake_container_image = 'quay.ceph.io/ceph/daemon:latest' fake_container_image = 'quay.io/ceph/daemon:latest'
fake_module = 'noup' fake_module = 'noup'
fake_user = 'client.admin' fake_user = 'client.admin'
fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user) fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user)

View File

@ -6,7 +6,7 @@ import ceph_osd
fake_cluster = 'ceph' fake_cluster = 'ceph'
fake_container_binary = 'podman' fake_container_binary = 'podman'
fake_container_image = 'quay.ceph.io/ceph/daemon:latest' fake_container_image = 'quay.io/ceph/daemon:latest'
fake_id = '42' fake_id = '42'
fake_ids = ['0', '7', '13'] fake_ids = ['0', '7', '13']
fake_user = 'client.admin' fake_user = 'client.admin'

View File

@ -6,7 +6,7 @@ import ceph_osd_flag
fake_cluster = 'ceph' fake_cluster = 'ceph'
fake_container_binary = 'podman' fake_container_binary = 'podman'
fake_container_image = 'quay.ceph.io/ceph/daemon:latest' fake_container_image = 'quay.io/ceph/daemon:latest'
fake_flag = 'noup' fake_flag = 'noup'
fake_user = 'client.admin' fake_user = 'client.admin'
fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user) fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user)

View File

@ -8,7 +8,7 @@ fake_user = 'client.admin'
fake_user_key = '/etc/ceph/ceph.client.admin.keyring' fake_user_key = '/etc/ceph/ceph.client.admin.keyring'
fake_pool_name = 'foo' fake_pool_name = 'foo'
fake_cluster_name = 'ceph' fake_cluster_name = 'ceph'
fake_container_image_name = 'quay.ceph.io/ceph-ci/daemon:latest-luminous' fake_container_image_name = 'quay.io/ceph/daemon:latest-luminous'
@patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': 'podman'}) @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': 'podman'})

View File

@ -76,7 +76,7 @@ class TestCephVolumeModule(object):
def test_container_exec(self): def test_container_exec(self):
fake_binary = "ceph-volume" fake_binary = "ceph-volume"
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest" fake_container_image = "quay.io/ceph/daemon:latest"
expected_command_list = get_container_cmd() + [fake_container_image] expected_command_list = get_container_cmd() + [fake_container_image]
result = ceph_volume.container_exec(fake_binary, fake_container_image) result = ceph_volume.container_exec(fake_binary, fake_container_image)
assert result == expected_command_list assert result == expected_command_list
@ -84,7 +84,7 @@ class TestCephVolumeModule(object):
def test_zap_osd_container(self): def test_zap_osd_container(self):
fake_module = MagicMock() fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda'} fake_module.params = {'data': '/dev/sda'}
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest" fake_container_image = "quay.io/ceph/daemon:latest"
expected_command_list = get_container_cmd() + \ expected_command_list = get_container_cmd() + \
[fake_container_image, [fake_container_image,
'--cluster', '--cluster',
@ -167,7 +167,7 @@ class TestCephVolumeModule(object):
def test_list_osd_container(self): def test_list_osd_container(self):
fake_module = MagicMock() fake_module = MagicMock()
fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'} fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'}
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest" fake_container_image = "quay.io/ceph/daemon:latest"
expected_command_list = get_container_cmd( expected_command_list = get_container_cmd(
{ {
'/var/lib/ceph': '/var/lib/ceph:ro' '/var/lib/ceph': '/var/lib/ceph:ro'
@ -196,7 +196,7 @@ class TestCephVolumeModule(object):
def test_list_storage_inventory_container(self): def test_list_storage_inventory_container(self):
fake_module = MagicMock() fake_module = MagicMock()
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest" fake_container_image = "quay.io/ceph/daemon:latest"
expected_command_list = get_container_cmd() + \ expected_command_list = get_container_cmd() + \
[fake_container_image, [fake_container_image,
'--cluster', '--cluster',
@ -214,7 +214,7 @@ class TestCephVolumeModule(object):
'cluster': 'ceph', } 'cluster': 'ceph', }
fake_action = "create" fake_action = "create"
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest" fake_container_image = "quay.io/ceph/daemon:latest"
expected_command_list = get_container_cmd() + \ expected_command_list = get_container_cmd() + \
[fake_container_image, [fake_container_image,
'--cluster', '--cluster',
@ -257,7 +257,7 @@ class TestCephVolumeModule(object):
'cluster': 'ceph', } 'cluster': 'ceph', }
fake_action = "prepare" fake_action = "prepare"
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest" fake_container_image = "quay.io/ceph/daemon:latest"
expected_command_list = get_container_cmd() + \ expected_command_list = get_container_cmd() + \
[fake_container_image, [fake_container_image,
'--cluster', '--cluster',
@ -302,7 +302,7 @@ class TestCephVolumeModule(object):
'cluster': 'ceph', 'cluster': 'ceph',
'batch_devices': ["/dev/sda", "/dev/sdb"]} 'batch_devices': ["/dev/sda", "/dev/sdb"]}
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest" fake_container_image = "quay.io/ceph/daemon:latest"
expected_command_list = get_container_cmd() + \ expected_command_list = get_container_cmd() + \
[fake_container_image, [fake_container_image,
'--cluster', '--cluster',

View File

@ -6,7 +6,7 @@ import ceph_volume_simple_activate
fake_cluster = 'ceph' fake_cluster = 'ceph'
fake_container_binary = 'podman' fake_container_binary = 'podman'
fake_container_image = 'quay.ceph.io/ceph/daemon:latest' fake_container_image = 'quay.io/ceph/daemon:latest'
fake_id = '42' fake_id = '42'
fake_uuid = '0c4a7eca-0c2a-4c12-beff-08a80f064c52' fake_uuid = '0c4a7eca-0c2a-4c12-beff-08a80f064c52'
fake_path = '/etc/ceph/osd/{}-{}.json'.format(fake_id, fake_uuid) fake_path = '/etc/ceph/osd/{}-{}.json'.format(fake_id, fake_uuid)

View File

@ -6,7 +6,7 @@ import ceph_volume_simple_scan
fake_cluster = 'ceph' fake_cluster = 'ceph'
fake_container_binary = 'podman' fake_container_binary = 'podman'
fake_container_image = 'quay.ceph.io/ceph/daemon:latest' fake_container_image = 'quay.io/ceph/daemon:latest'
fake_path = '/var/lib/ceph/osd/ceph-0' fake_path = '/var/lib/ceph/osd/ceph-0'

View File

@ -4,7 +4,7 @@ import ca_test_common
import cephadm_adopt import cephadm_adopt
fake_cluster = 'ceph' fake_cluster = 'ceph'
fake_image = 'quay.ceph.io/ceph/daemon-base:latest' fake_image = 'quay.io/ceph/daemon-base:latest'
fake_name = 'mon.foo01' fake_name = 'mon.foo01'

View File

@ -4,9 +4,9 @@ import ca_test_common
import cephadm_bootstrap import cephadm_bootstrap
fake_fsid = '0f1e0605-db0b-485c-b366-bd8abaa83f3b' fake_fsid = '0f1e0605-db0b-485c-b366-bd8abaa83f3b'
fake_image = 'quay.ceph.io/ceph/daemon-base:latest-master-devel' fake_image = 'quay.io/ceph/daemon-base:latest-pacific-devel'
fake_ip = '192.168.42.1' fake_ip = '192.168.42.1'
fake_registry = 'quay.ceph.io' fake_registry = 'quay.io'
fake_registry_user = 'foo' fake_registry_user = 'foo'
fake_registry_pass = 'bar' fake_registry_pass = 'bar'
fake_registry_json = 'registry.json' fake_registry_json = 'registry.json'

View File

@ -4,7 +4,7 @@ envlist = centos-container-cephadm
skipsdist = True skipsdist = True
[testenv] [testenv]
whitelist_externals = allowlist_externals =
vagrant vagrant
bash bash
pip pip

View File

@ -4,7 +4,7 @@ envlist = centos-container-docker_to_podman
skipsdist = True skipsdist = True
[testenv] [testenv]
whitelist_externals = allowlist_externals =
vagrant vagrant
bash bash
pip pip

View File

@ -4,7 +4,7 @@ envlist = centos-{container,non_container}-external_clients
skipsdist = True skipsdist = True
[testenv] [testenv]
whitelist_externals = allowlist_externals =
vagrant vagrant
bash bash
git git

View File

@ -4,7 +4,7 @@ envlist = centos-{container,non_container}-filestore_to_bluestore
skipsdist = True skipsdist = True
[testenv] [testenv]
whitelist_externals = allowlist_externals =
vagrant vagrant
bash bash
git git

View File

@ -4,7 +4,7 @@ envlist = centos-container-podman
skipsdist = True skipsdist = True
[testenv] [testenv]
whitelist_externals = allowlist_externals =
vagrant vagrant
bash bash
pip pip

View File

@ -45,7 +45,7 @@ commands=
" "
[testenv] [testenv]
whitelist_externals = allowlist_externals =
vagrant vagrant
bash bash
passenv=* passenv=*

View File

@ -4,7 +4,7 @@ envlist = centos-{container,non_container}-subset_update
skipsdist = True skipsdist = True
[testenv] [testenv]
whitelist_externals = allowlist_externals =
vagrant vagrant
bash bash
git git
@ -44,8 +44,8 @@ commands=
# deploy the cluster # deploy the cluster
bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {changedir}/{env:INVENTORY} {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {changedir}/{env:INVENTORY} {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_docker_registry=quay.ceph.io \ ceph_docker_registry=quay.io \
ceph_docker_image=ceph-ci/daemon \ ceph_docker_image=ceph/daemon \
ceph_docker_image_tag=latest-octopus \ ceph_docker_image_tag=latest-octopus \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \

View File

@ -4,7 +4,7 @@ envlist = centos-{container,non_container}-update
skipsdist = True skipsdist = True
[testenv] [testenv]
whitelist_externals = allowlist_externals =
vagrant vagrant
bash bash
git git
@ -50,8 +50,8 @@ commands=
# When rendering the ganesha.conf.j2 template, it complains because of undefined variables in the block "{% if nfs_obj_gw | bool %}" although we explicitly set this variable to false (see below). # When rendering the ganesha.conf.j2 template, it complains because of undefined variables in the block "{% if nfs_obj_gw | bool %}" although we explicitly set this variable to false (see below).
bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \

22
tox.ini
View File

@ -8,7 +8,7 @@ skipsdist = True
# a test scenario for the lv-create.yml and lv-teardown playbooks # a test scenario for the lv-create.yml and lv-teardown playbooks
[testenv:infra_lv_create] [testenv:infra_lv_create]
whitelist_externals = allowlist_externals =
vagrant vagrant
bash bash
mkdir mkdir
@ -43,16 +43,16 @@ commands=
[purge] [purge]
commands= commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rbd_map_devices.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rbd_map_devices.yml --extra-vars "\
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-pacific} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-pacific} \
" "
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
remove_packages=yes \ remove_packages=yes \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-pacific} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-pacific} \
" "
@ -72,8 +72,8 @@ commands=
commands= commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/purge-dashboard.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/purge-dashboard.yml --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-pacific} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-pacific} \
" "
@ -149,8 +149,8 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
ceph_docker_image_tag=latest-pacific-devel \ ceph_docker_image_tag=latest-pacific-devel \
ceph_docker_registry=quay.ceph.io \ ceph_docker_registry=quay.io \
ceph_docker_image=ceph-ci/daemon \ ceph_docker_image=ceph/daemon \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
@ -259,7 +259,7 @@ commands=
" "
[testenv] [testenv]
whitelist_externals = allowlist_externals =
vagrant vagrant
bash bash
pip pip
@ -334,7 +334,7 @@ commands=
!lvm_batch-!lvm_auto_discovery: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2' !lvm_batch-!lvm_auto_discovery: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2'
lvm_osds,all_in_one: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit osd2 lvm_osds,all_in_one: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit osd2
rhcs: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup" rhcs: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml