tests: add more coverage in external_clients scenario

Run create_users_keys.yml in external_clients scenario

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/5227/head
Guillaume Abrioux 2020-03-27 17:56:26 +01:00 committed by Dimitri Savineau
parent 5b0476385c
commit 8c1c34b201
14 changed files with 84 additions and 11 deletions

View File

@ -1,6 +1,7 @@
---
- name: include pre_requisite.yml
include_tasks: pre_requisite.yml
when: groups.get(mon_group_name, []) | length > 0
- name: include create_users_keys.yml
include_tasks: create_users_keys.yml

View File

@ -39,7 +39,7 @@ def setup(host):
cluster_interface = "ens7"
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
num_mons = len(ansible_vars["groups"]["mons"])
num_mons = len(ansible_vars["groups"].get('mons', []))
if osd_auto_discovery:
num_osds = 3
else:

View File

@ -9,7 +9,7 @@
- block:
- name: set_fact group_vars_path
set_fact:
group_vars_path: "{{ change_dir + '/hosts' if 'ooo-collocation' in change_dir.split('/') else change_dir + '/group_vars' }}"
group_vars_path: "{{ change_dir + '/hosts' if 'ooo-collocation' in change_dir.split('/') else change_dir + '/inventory/group_vars' if 'external_clients' in change_dir.split('/') else change_dir + '/group_vars' }}"
- block:
- name: change ceph_repository to 'dev'

View File

@ -0,0 +1,24 @@
---
copy_admin_key: True
user_config: True
test:
name: "test"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: "{{ osd_pool_default_size }}"
test2:
name: "test2"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: "{{ osd_pool_default_size }}"
pools:
- "{{ test }}"
- "{{ test2 }}"

View File

@ -0,0 +1,23 @@
---
copy_admin_key: True
user_config: True
test:
name: "test"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: "{{ osd_pool_default_size }}"
test2:
name: "test2"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: "{{ osd_pool_default_size }}"
pools:
- "{{ test }}"
- "{{ test2 }}"

View File

@ -0,0 +1,27 @@
---
- hosts: clients
gather_facts: false
become: yes
tasks:
- name: get keys from monitors
command: "{{ 'podman exec ceph-mon-mon0' if containerized_deployment | bool else '' }} ceph --cluster ceph auth get client.admin"
register: _key
delegate_to: "{{ groups.get('mons')[0] }}"
run_once: true
- name: create /etc/ceph
file:
path: /etc/ceph
state: directory
owner: 167
group: 167
mode: "0755"
- name: copy ceph key(s) if needed
copy:
dest: "/etc/ceph/ceph.client.admin.keyring"
content: "{{ _key.stdout + '\n' }}"
owner: 167
group: 167
mode: "0600"

View File

@ -22,11 +22,7 @@ setenv=
# Set the vagrant box image to use
centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/8
centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/8
ubuntu: CEPH_ANSIBLE_VAGRANT_BOX = guits/ubuntu-bionic64
# Set the ansible inventory host file to be used according to which distrib we are running on
ubuntu: _INVENTORY = hosts-ubuntu
INVENTORY = {env:_INVENTORY:hosts}
container: CONTAINER_DIR = /container
container: PLAYBOOK = site-container.yml.sample
non_container: PLAYBOOK = site.yml.sample
@ -37,13 +33,13 @@ commands=
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/inventory {toxinidir}/tests/functional/setup.yml
# configure lvm
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
ansible-playbook -vv -i {changedir}/inventory/hosts {toxinidir}/tests/functional/lvm_setup.yml
non_container: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch=master ceph_dev_sha1=latest" --tags "vagrant_setup"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit 'all:!clients' --extra-vars "\
ansible-playbook -vv -i {changedir}/inventory/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit 'all:!clients' --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
@ -53,7 +49,9 @@ commands=
ceph_dev_sha1=latest \
"
ansible-playbook -vv -i {changedir}/external_clients-hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ansible-playbook -vv -i {changedir}/inventory {toxinidir}/tests/functional/external_clients_admin_key.yml
ansible-playbook -vv -i {changedir}/inventory/external_clients-hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
fsid=40358a87-ab6e-4bdc-83db-1d909147861c \
@ -66,6 +64,6 @@ commands=
ceph_dev_sha1=latest \
"
bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:octopus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/inventory/external_clients-hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests/test_install.py::TestCephConf
vagrant destroy --force