tests: add more coverage in external_clients scenario

Run create_users_keys.yml in external_clients scenario

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 8c1c34b201)
pull/5231/head
Guillaume Abrioux 2020-03-27 17:56:26 +01:00 committed by Dimitri Savineau
parent 4c5e0f7f78
commit 5b89635a50
14 changed files with 84 additions and 11 deletions

View File

@ -1,6 +1,7 @@
--- ---
- name: include pre_requisite.yml - name: include pre_requisite.yml
include_tasks: pre_requisite.yml include_tasks: pre_requisite.yml
when: groups.get(mon_group_name, []) | length > 0
- name: include create_users_keys.yml - name: include create_users_keys.yml
include_tasks: create_users_keys.yml include_tasks: create_users_keys.yml

View File

@ -39,7 +39,7 @@ def setup(host):
cluster_interface = "ens7" cluster_interface = "ens7"
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1]) subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
num_mons = len(ansible_vars["groups"]["mons"]) num_mons = len(ansible_vars["groups"].get('mons', []))
if osd_auto_discovery: if osd_auto_discovery:
num_osds = 3 num_osds = 3
else: else:

View File

@ -9,7 +9,7 @@
- block: - block:
- name: set_fact group_vars_path - name: set_fact group_vars_path
set_fact: set_fact:
group_vars_path: "{{ change_dir + '/hosts' if 'ooo-collocation' in change_dir.split('/') else change_dir + '/group_vars' }}" group_vars_path: "{{ change_dir + '/hosts' if 'ooo-collocation' in change_dir.split('/') else change_dir + '/inventory/group_vars' if 'external_clients' in change_dir.split('/') else change_dir + '/group_vars' }}"
- block: - block:
- name: change ceph_repository to 'dev' - name: change ceph_repository to 'dev'

View File

@ -0,0 +1,24 @@
---
copy_admin_key: True
user_config: True
test:
name: "test"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: "{{ osd_pool_default_size }}"
test2:
name: "test2"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: "{{ osd_pool_default_size }}"
pools:
- "{{ test }}"
- "{{ test2 }}"

View File

@ -0,0 +1,23 @@
---
copy_admin_key: True
user_config: True
test:
name: "test"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: "{{ osd_pool_default_size }}"
test2:
name: "test2"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: "{{ osd_pool_default_size }}"
pools:
- "{{ test }}"
- "{{ test2 }}"

View File

@ -0,0 +1,27 @@
---
- hosts: clients
gather_facts: false
become: yes
tasks:
- name: get keys from monitors
command: "{{ 'podman exec ceph-mon-mon0' if containerized_deployment | bool else '' }} ceph --cluster ceph auth get client.admin"
register: _key
delegate_to: "{{ groups.get('mons')[0] }}"
run_once: true
- name: create /etc/ceph
file:
path: /etc/ceph
state: directory
owner: 167
group: 167
mode: "0755"
- name: copy ceph key(s) if needed
copy:
dest: "/etc/ceph/ceph.client.admin.keyring"
content: "{{ _key.stdout + '\n' }}"
owner: 167
group: 167
mode: "0600"

View File

@ -22,11 +22,7 @@ setenv=
# Set the vagrant box image to use # Set the vagrant box image to use
centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/8 centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/8
centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/8 centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/8
ubuntu: CEPH_ANSIBLE_VAGRANT_BOX = guits/ubuntu-bionic64
# Set the ansible inventory host file to be used according to which distrib we are running on
ubuntu: _INVENTORY = hosts-ubuntu
INVENTORY = {env:_INVENTORY:hosts}
container: CONTAINER_DIR = /container container: CONTAINER_DIR = /container
container: PLAYBOOK = site-container.yml.sample container: PLAYBOOK = site-container.yml.sample
non_container: PLAYBOOK = site.yml.sample non_container: PLAYBOOK = site.yml.sample
@ -38,12 +34,12 @@ commands=
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml ansible-playbook -vv -i {changedir}/inventory {toxinidir}/tests/functional/setup.yml
# configure lvm # configure lvm
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml ansible-playbook -vv -i {changedir}/inventory/hosts {toxinidir}/tests/functional/lvm_setup.yml
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit 'all:!clients' --extra-vars "\ ansible-playbook -vv -i {changedir}/inventory/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit 'all:!clients' --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
@ -52,7 +48,9 @@ commands=
ceph_docker_image_tag=latest-octopus \ ceph_docker_image_tag=latest-octopus \
" "
ansible-playbook -vv -i {changedir}/external_clients-hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook -vv -i {changedir}/inventory {toxinidir}/tests/functional/external_clients_admin_key.yml
ansible-playbook -vv -i {changedir}/inventory/external_clients-hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ireallymeanit=yes \ ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
fsid=40358a87-ab6e-4bdc-83db-1d909147861c \ fsid=40358a87-ab6e-4bdc-83db-1d909147861c \
@ -64,6 +62,6 @@ commands=
ceph_docker_image_tag=latest-octopus \ ceph_docker_image_tag=latest-octopus \
" "
bash -c "CEPH_STABLE_RELEASE={env:CEPH_STABLE_RELEASE:octopus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests" bash -c "CEPH_STABLE_RELEASE={env:CEPH_STABLE_RELEASE:octopus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/inventory/external_clients-hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests/test_install.py::TestCephConf"
vagrant destroy --force vagrant destroy --force