tests: set copy_admin_key at group_vars level

setting it at extra vars level prevent from setting it per node.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 5bb6a4da42)
pull/4475/head
Guillaume Abrioux 2019-09-24 19:13:31 +02:00
parent e1d06f498c
commit b1e61be9c6
9 changed files with 34 additions and 25 deletions

View File

@ -1,11 +1,24 @@
---
- name: copy ceph admin keyring
block:
- name: get keys from monitors
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} auth get {{ item.name }}"
register: _client_keys
with_items:
- { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
delegate_to: "{{ groups.get(mon_group_name)[0] }}"
when:
- cephx | bool
- item.copy_key | bool
- name: copy ceph key(s) if needed
copy:
src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.admin.keyring"
dest: "/etc/ceph/"
dest: "{{ item.item.path }}"
content: "{{ item.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
with_items: "{{ _client_keys.results }}"
when:
- cephx | bool
- copy_admin_key | bool
- item.item.copy_key | bool
when: cephx | bool

View File

@ -14,3 +14,4 @@ ceph_conf_overrides:
osd_pool_default_size: 1
openstack_config: False
dashboard_enabled: False
copy_admin_key: True

View File

@ -14,3 +14,4 @@ ceph_conf_overrides:
osd_pool_default_size: 1
openstack_config: False
dashboard_enabled: False
copy_admin_key: True

View File

@ -8,3 +8,4 @@ ceph_conf_overrides:
osd_pool_default_size: 3
openstack_config: False
dashboard_enabled: False
copy_admin_key: True

View File

@ -13,3 +13,4 @@ ceph_conf_overrides:
osd_pool_default_size: 1
openstack_config: False
dashboard_enabled: False
copy_admin_key: True

View File

@ -15,3 +15,4 @@ ceph_conf_overrides:
osd_pool_default_size: 1
openstack_config: False
dashboard_enabled: False
copy_admin_key: True

View File

@ -51,7 +51,6 @@ commands=
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
copy_admin_key={env:COPY_ADMIN_KEY:False} \
container_binary=podman \
container_package_name=podman \
container_service_name=podman \

View File

@ -61,7 +61,6 @@ commands=
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
copy_admin_key={env:COPY_ADMIN_KEY:False} \
"'
pip install -r {toxinidir}/tests/requirements.txt

11
tox.ini
View File

@ -44,7 +44,6 @@ commands=
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
copy_admin_key={env:COPY_ADMIN_KEY:False} \
"
# wait 30sec for services to be ready
@ -378,14 +377,9 @@ setenv=
container: CONTAINER_DIR = /container
container: PLAYBOOK = site-docker.yml.sample
container: PURGE_PLAYBOOK = purge-docker-cluster.yml
storage_inventory: COPY_ADMIN_KEY = True
non_container: PLAYBOOK = site.yml.sample
shrink_mon: MON_TO_KILL = mon2
shrink_osd: COPY_ADMIN_KEY = True
shrink_mgr: MGR_TO_KILL = mgr1
shrink_mds: COPY_ADMIN_KEY = True
shrink_rbdmirror: COPY_ADMIN_KEY = True
shrink_rgw: COPY_ADMIN_KEY = True
rhcs: CEPH_STABLE_RELEASE = luminous
lvm_osds: CEPH_STABLE_RELEASE = nautilus
@ -444,8 +438,7 @@ commands=
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
copy_admin_key={env:COPY_ADMIN_KEY:False} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
"
# wait 30sec for services to be ready
@ -462,7 +455,7 @@ commands=
all_daemons: py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# handlers/idempotency test
all_daemons: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-master} copy_admin_key={env:COPY_ADMIN_KEY:False} " --extra-vars @ceph-override.json
all_daemons: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-nautilus} " --extra-vars @ceph-override.json
purge: {[purge]commands}
switch_to_containers: {[switch-to-containers]commands}