tests: set copy_admin_key at group_vars level

setting it at extra vars level prevent from setting it per node.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 5bb6a4da42)
pull/4475/head
Guillaume Abrioux 2019-09-24 19:13:31 +02:00
parent e1d06f498c
commit b1e61be9c6
9 changed files with 34 additions and 25 deletions

View File

@ -1,11 +1,24 @@
--- ---
- name: copy ceph admin keyring - name: copy ceph admin keyring
copy: block:
src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.admin.keyring" - name: get keys from monitors
dest: "/etc/ceph/" command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} auth get {{ item.name }}"
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}" register: _client_keys
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}" with_items:
mode: "{{ ceph_keyring_permissions }}" - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
when: delegate_to: "{{ groups.get(mon_group_name)[0] }}"
- cephx | bool when:
- copy_admin_key | bool - cephx | bool
- item.copy_key | bool
- name: copy ceph key(s) if needed
copy:
dest: "{{ item.item.path }}"
content: "{{ item.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
with_items: "{{ _client_keys.results }}"
when:
- item.item.copy_key | bool
when: cephx | bool

View File

@ -13,4 +13,5 @@ ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
openstack_config: False openstack_config: False
dashboard_enabled: False dashboard_enabled: False
copy_admin_key: True

View File

@ -13,4 +13,5 @@ ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
openstack_config: False openstack_config: False
dashboard_enabled: False dashboard_enabled: False
copy_admin_key: True

View File

@ -7,4 +7,5 @@ ceph_conf_overrides:
global: global:
osd_pool_default_size: 3 osd_pool_default_size: 3
openstack_config: False openstack_config: False
dashboard_enabled: False dashboard_enabled: False
copy_admin_key: True

View File

@ -12,4 +12,5 @@ ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
openstack_config: False openstack_config: False
dashboard_enabled: False dashboard_enabled: False
copy_admin_key: True

View File

@ -15,3 +15,4 @@ ceph_conf_overrides:
osd_pool_default_size: 1 osd_pool_default_size: 1
openstack_config: False openstack_config: False
dashboard_enabled: False dashboard_enabled: False
copy_admin_key: True

View File

@ -51,7 +51,6 @@ commands=
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
copy_admin_key={env:COPY_ADMIN_KEY:False} \
container_binary=podman \ container_binary=podman \
container_package_name=podman \ container_package_name=podman \
container_service_name=podman \ container_service_name=podman \
@ -71,4 +70,4 @@ commands=
# retest to ensure cluster came back up correctly after rebooting # retest to ensure cluster came back up correctly after rebooting
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
vagrant destroy -f vagrant destroy -f

View File

@ -61,7 +61,6 @@ commands=
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-luminous} \
copy_admin_key={env:COPY_ADMIN_KEY:False} \
"' "'
pip install -r {toxinidir}/tests/requirements.txt pip install -r {toxinidir}/tests/requirements.txt

11
tox.ini
View File

@ -44,7 +44,6 @@ commands=
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
copy_admin_key={env:COPY_ADMIN_KEY:False} \
" "
# wait 30sec for services to be ready # wait 30sec for services to be ready
@ -378,14 +377,9 @@ setenv=
container: CONTAINER_DIR = /container container: CONTAINER_DIR = /container
container: PLAYBOOK = site-docker.yml.sample container: PLAYBOOK = site-docker.yml.sample
container: PURGE_PLAYBOOK = purge-docker-cluster.yml container: PURGE_PLAYBOOK = purge-docker-cluster.yml
storage_inventory: COPY_ADMIN_KEY = True
non_container: PLAYBOOK = site.yml.sample non_container: PLAYBOOK = site.yml.sample
shrink_mon: MON_TO_KILL = mon2 shrink_mon: MON_TO_KILL = mon2
shrink_osd: COPY_ADMIN_KEY = True
shrink_mgr: MGR_TO_KILL = mgr1 shrink_mgr: MGR_TO_KILL = mgr1
shrink_mds: COPY_ADMIN_KEY = True
shrink_rbdmirror: COPY_ADMIN_KEY = True
shrink_rgw: COPY_ADMIN_KEY = True
rhcs: CEPH_STABLE_RELEASE = luminous rhcs: CEPH_STABLE_RELEASE = luminous
lvm_osds: CEPH_STABLE_RELEASE = nautilus lvm_osds: CEPH_STABLE_RELEASE = nautilus
@ -444,8 +438,7 @@ commands=
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
copy_admin_key={env:COPY_ADMIN_KEY:False} \
" "
# wait 30sec for services to be ready # wait 30sec for services to be ready
@ -462,7 +455,7 @@ commands=
all_daemons: py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests all_daemons: py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# handlers/idempotency test # handlers/idempotency test
all_daemons: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-master} copy_admin_key={env:COPY_ADMIN_KEY:False} " --extra-vars @ceph-override.json all_daemons: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-nautilus} " --extra-vars @ceph-override.json
purge: {[purge]commands} purge: {[purge]commands}
switch_to_containers: {[switch-to-containers]commands} switch_to_containers: {[switch-to-containers]commands}