rgw: refact rgw pools creation

Refact of 8704144e31
There is no need to have duplicated tasks for this. The rgw pools
creation should be delegated on a monitor node se we don't have to care
if the admin keyring is present on rgw node.
By the way, only one task is needed to create the pools, we just need to
use the `docker_exec_cmd` fact already defined in `ceph-defaults` to
achieve it.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1550281

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/2705/head
Guillaume Abrioux 2018-06-01 17:33:54 +02:00 committed by Sébastien Han
parent 1f3c9ce4f3
commit 2cf06b515f
6 changed files with 15 additions and 69 deletions

View File

@ -6,8 +6,3 @@
- name: include start_docker_rgw.yml
include: start_docker_rgw.yml
- name: include rgw_pool_pgs.yml
include: rgw_pool_pgs.yml
when:
- rgw_create_pools is defined

View File

@ -1,26 +0,0 @@
---
# If admin key has been copied to the RGWs, we can run the command from them.
- name: create rgw pools if rgw_create_pools is defined
command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
changed_when: false
run_once: true
with_dict: "{{ rgw_create_pools }}"
when:
- copy_admin_key
# If no admin key has been copied to the RGWs, we have to run the command from the first monitor.
- name: set_fact docker_exec_mon_cmd
set_fact:
docker_exec_mon_cmd: "docker exec ceph-mon-{{ hostvars[groups.get(mon_group_name)[0]]['ansible_hostname'] }}"
when:
- not copy_admin_key
# If no admin key has been copied to the RGWs, we have to run the command from the first monitor.
- name: create rgw pools if rgw_create_pools is defined, delegated to first monitor
command: "{{ docker_exec_mon_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
changed_when: false
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
with_dict: "{{ rgw_create_pools }}"
when:
- not copy_admin_key

View File

@ -1,10 +1,4 @@
---
- name: set_fact docker_exec_cmd
set_fact:
docker_exec_cmd: "docker exec ceph-rgw-{{ ansible_hostname }}"
when:
- containerized_deployment
- name: include common.yml
include: common.yml
@ -29,13 +23,6 @@
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- name: include rgw_pool_pgs.yml
include: rgw_pool_pgs.yml
when:
- rgw_create_pools is defined
- not containerized_deployment
static: False
- name: include multisite/main.yml
include: multisite/main.yml
when:
@ -51,3 +38,12 @@
- containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- name: create rgw pools if rgw_create_pools is defined
command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
changed_when: false
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when:
- rgw_create_pools is defined

View File

@ -1,24 +0,0 @@
---
- name: create rgw pools if rgw_create_pools is defined
command: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}
changed_when: false
run_once: true
with_dict: "{{ rgw_create_pools }}"
when:
- not containerized_deployment
# On first monitor.
- name: set_fact docker_exec_rgw_cmd
set_fact:
docker_exec_rgw_cmd: "docker exec ceph-mon-{{ hostvars[groups.get(mon_group_name)[0]]['ansible_hostname'] }}"
when:
- containerized_deployment
- name: create rgw pools if rgw_create_pools is defined
command: "{{ docker_exec_rgw_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
changed_when: false
run_once: true
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- containerized_deployment

View File

@ -67,6 +67,11 @@ all:
radosgw_keystone_ssl: false
user_config: true
copy_admin_key: true
rgw_create_pools:
foo:
pg_num: 17
bar:
pg_num: 19
clients:
hosts:
client0: {}