tests: set copy_admin_key at group_vars level

setting it at extra vars level prevent from setting it per node.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/4450/head
Guillaume Abrioux 2019-09-24 19:13:31 +02:00
parent ab370b6ad8
commit 5bb6a4da42
9 changed files with 33 additions and 24 deletions

View File

@ -1,11 +1,24 @@
---
- name: copy ceph admin keyring
copy:
src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.admin.keyring"
dest: "/etc/ceph/"
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
when:
- cephx | bool
- copy_admin_key | bool
block:
- name: get keys from monitors
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} auth get {{ item.name }}"
register: _client_keys
with_items:
- { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
delegate_to: "{{ groups.get(mon_group_name)[0] }}"
when:
- cephx | bool
- item.copy_key | bool
- name: copy ceph key(s) if needed
copy:
dest: "{{ item.item.path }}"
content: "{{ item.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
with_items: "{{ _client_keys.results }}"
when:
- item.item.copy_key | bool
when: cephx | bool

View File

@ -13,4 +13,5 @@ ceph_conf_overrides:
global:
osd_pool_default_size: 1
openstack_config: False
dashboard_enabled: False
dashboard_enabled: False
copy_admin_key: True

View File

@ -13,4 +13,5 @@ ceph_conf_overrides:
global:
osd_pool_default_size: 1
openstack_config: False
dashboard_enabled: False
dashboard_enabled: False
copy_admin_key: True

View File

@ -7,4 +7,5 @@ ceph_conf_overrides:
global:
osd_pool_default_size: 3
openstack_config: False
dashboard_enabled: False
dashboard_enabled: False
copy_admin_key: True

View File

@ -12,4 +12,5 @@ ceph_conf_overrides:
global:
osd_pool_default_size: 1
openstack_config: False
dashboard_enabled: False
dashboard_enabled: False
copy_admin_key: True

View File

@ -15,3 +15,4 @@ ceph_conf_overrides:
osd_pool_default_size: 1
openstack_config: False
dashboard_enabled: False
copy_admin_key: True

View File

@ -51,7 +51,6 @@ commands=
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
copy_admin_key={env:COPY_ADMIN_KEY:False} \
container_binary=podman \
container_package_name=podman \
container_service_name=podman \
@ -71,4 +70,4 @@ commands=
# retest to ensure cluster came back up correctly after rebooting
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
vagrant destroy -f
vagrant destroy -f

View File

@ -63,7 +63,6 @@ commands=
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-mimic} \
copy_admin_key={env:COPY_ADMIN_KEY:False} \
"'
pip install -r {toxinidir}/tests/requirements.txt

View File

@ -44,7 +44,6 @@ commands=
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
copy_admin_key={env:COPY_ADMIN_KEY:False} \
"
# wait 30sec for services to be ready
@ -391,14 +390,9 @@ setenv=
container: CONTAINER_DIR = /container
container: PLAYBOOK = site-docker.yml.sample
container: PURGE_PLAYBOOK = purge-docker-cluster.yml
storage_inventory: COPY_ADMIN_KEY = True
non_container: PLAYBOOK = site.yml.sample
shrink_mon: MON_TO_KILL = mon2
shrink_osd: COPY_ADMIN_KEY = True
shrink_mgr: MGR_TO_KILL = mgr1
shrink_mds: COPY_ADMIN_KEY = True
shrink_rbdmirror: COPY_ADMIN_KEY = True
shrink_rgw: COPY_ADMIN_KEY = True
rhcs: CEPH_STABLE_RELEASE = luminous
lvm_osds: CEPH_STABLE_RELEASE = luminous
@ -466,7 +460,6 @@ commands=
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
copy_admin_key={env:COPY_ADMIN_KEY:False} \
"
# wait 30sec for services to be ready
@ -483,7 +476,7 @@ commands=
all_daemons: py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# handlers/idempotency test
all_daemons: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-master} ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} copy_admin_key={env:COPY_ADMIN_KEY:False}" --extra-vars @ceph-override.json
all_daemons: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-master} ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --extra-vars @ceph-override.json
purge: {[purge]commands}
switch_to_containers: {[switch-to-containers]commands}