Adds RGWs pool creation to containerized installation.

ceph command has to be executed from one of the monitor containers
if not admin copy present in RGWs. Task has to be delegated then.

Adds test to check proper RGW pool creation for Docker container scenarios.

Signed-off-by: Jorge Tudela <jtudelag@redhat.com>
pull/2696/head
jtudelag 2018-03-04 23:06:48 +01:00 committed by Sébastien Han
parent aae37b44f5
commit 8704144e31
7 changed files with 84 additions and 4 deletions

View File

@ -6,3 +6,8 @@
- name: include start_docker_rgw.yml
include: start_docker_rgw.yml
- name: include rgw_pool_pgs.yml
include: rgw_pool_pgs.yml
when:
- create_pools is defined

View File

@ -0,0 +1,26 @@
---
# If admin key has been copied to the RGWs, we can run the command from them.
- name: create rgw pools if create_pools is defined
command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
changed_when: false
run_once: true
with_dict: "{{ create_pools }}"
when:
- copy_admin_key
# If no admin key has been copied to the RGWs, we have to run the command from the first monitor.
- name: set_fact docker_exec_mon_cmd
set_fact:
docker_exec_mon_cmd: "docker exec ceph-mon-{{ hostvars[groups.get(mon_group_name)[0]]['ansible_hostname'] }}"
when:
- not copy_admin_key
# If no admin key has been copied to the RGWs, we have to run the command from the first monitor.
- name: create rgw pools if create_pools is defined, delegated to first monitor
command: "{{ docker_exec_mon_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
changed_when: false
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
with_dict: "{{ create_pools }}"
when:
- not copy_admin_key

View File

@ -33,8 +33,9 @@
include: rgw_pool_pgs.yml
when:
- create_pools is defined
static: False
- not containerized_deployment
static: False
- name: include multisite/main.yml
include: multisite/main.yml
when:
@ -50,3 +51,9 @@
- containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- name: include rgw_pool_pgs.yml
include: rgw_pool_pgs.yml
when:
- create_pools is defined
static: False

View File

@ -2,7 +2,23 @@
- name: create rgw pools if create_pools is defined
command: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}
changed_when: false
failed_when: false
run_once: true
with_dict: "{{ create_pools }}"
when:
- create_pools is defined
- not containerized_deployment
# On first monitor.
- name: set_fact docker_exec_rgw_cmd
set_fact:
docker_exec_rgw_cmd: "docker exec ceph-mon-{{ hostvars[groups.get(mon_group_name)[0]]['ansible_hostname'] }}"
when:
- containerized_deployment
- name: create rgw pools if create_pools is defined
command: "{{ docker_exec_rgw_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
changed_when: false
run_once: true
with_dict: "{{ create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- containerized_deployment

View File

@ -0,0 +1,6 @@
---
create_pools:
foo:
pg_num: 17
bar:
pg_num: 19

View File

@ -1,2 +1,7 @@
---
copy_admin_key: True
create_pools:
foo:
pg_num: 17
bar:
pg_num: 19

View File

@ -26,3 +26,18 @@ class TestRGWs(object):
assert pool_name in output
pg_num_str = "pg_num {pg_num}".format(pg_num=pg_num["pg_num"])
assert pg_num_str in output
@pytest.mark.docker
def test_docker_rgw_tuning_pools_are_set(self, node, host):
hostname = node["vars"]["inventory_hostname"]
cluster = node['cluster_name']
cmd = "sudo docker exec ceph-rgw-{hostname} ceph --cluster={cluster} --connect-timeout 5 osd dump".format(
hostname=hostname,
cluster=cluster
)
output = host.check_output(cmd)
pools = node["vars"]["create_pools"]
for pool_name, pg_num in pools.items():
assert pool_name in output
pg_num_str = "pg_num {pg_num}".format(pg_num=pg_num["pg_num"])
assert pg_num_str in output