diff --git a/roles/ceph-rgw/tasks/docker/main.yml b/roles/ceph-rgw/tasks/docker/main.yml index 4850e4b7c..a46797a31 100644 --- a/roles/ceph-rgw/tasks/docker/main.yml +++ b/roles/ceph-rgw/tasks/docker/main.yml @@ -6,3 +6,8 @@ - name: include start_docker_rgw.yml include: start_docker_rgw.yml + +- name: include rgw_pool_pgs.yml + include: rgw_pool_pgs.yml + when: + - create_pools is defined diff --git a/roles/ceph-rgw/tasks/docker/rgw_pool_pgs.yml b/roles/ceph-rgw/tasks/docker/rgw_pool_pgs.yml new file mode 100644 index 000000000..ba781bb76 --- /dev/null +++ b/roles/ceph-rgw/tasks/docker/rgw_pool_pgs.yml @@ -0,0 +1,26 @@ +--- +# If admin key has been copied to the RGWs, we can run the command from them. +- name: create rgw pools if create_pools is defined + command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}" + changed_when: false + run_once: true + with_dict: "{{ create_pools }}" + when: + - copy_admin_key + +# If no admin key has been copied to the RGWs, we have to run the command from the first monitor. +- name: set_fact docker_exec_mon_cmd + set_fact: + docker_exec_mon_cmd: "docker exec ceph-mon-{{ hostvars[groups.get(mon_group_name)[0]]['ansible_hostname'] }}" + when: + - not copy_admin_key + +# If no admin key has been copied to the RGWs, we have to run the command from the first monitor. +- name: create rgw pools if create_pools is defined, delegated to first monitor + command: "{{ docker_exec_mon_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}" + changed_when: false + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + with_dict: "{{ create_pools }}" + when: + - not copy_admin_key diff --git a/roles/ceph-rgw/tasks/main.yml b/roles/ceph-rgw/tasks/main.yml index 6ae24e997..f5db97691 100644 --- a/roles/ceph-rgw/tasks/main.yml +++ b/roles/ceph-rgw/tasks/main.yml @@ -33,8 +33,9 @@ include: rgw_pool_pgs.yml when: - create_pools is defined - static: False - + - not containerized_deployment + static: False + - name: include multisite/main.yml include: multisite/main.yml when: @@ -50,3 +51,9 @@ - containerized_deployment # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False + +- name: include rgw_pool_pgs.yml + include: rgw_pool_pgs.yml + when: + - create_pools is defined + static: False diff --git a/roles/ceph-rgw/tasks/rgw_pool_pgs.yml b/roles/ceph-rgw/tasks/rgw_pool_pgs.yml index 4e6d38817..3675475b3 100644 --- a/roles/ceph-rgw/tasks/rgw_pool_pgs.yml +++ b/roles/ceph-rgw/tasks/rgw_pool_pgs.yml @@ -2,7 +2,23 @@ - name: create rgw pools if create_pools is defined command: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }} changed_when: false - failed_when: false + run_once: true with_dict: "{{ create_pools }}" when: - - create_pools is defined + - not containerized_deployment + +# On first monitor. +- name: set_fact docker_exec_rgw_cmd + set_fact: + docker_exec_rgw_cmd: "docker exec ceph-mon-{{ hostvars[groups.get(mon_group_name)[0]]['ansible_hostname'] }}" + when: + - containerized_deployment + +- name: create rgw pools if create_pools is defined + command: "{{ docker_exec_rgw_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}" + changed_when: false + run_once: true + with_dict: "{{ create_pools }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + when: + - containerized_deployment diff --git a/tests/functional/centos/7/docker-collocation/group_vars/rgws b/tests/functional/centos/7/docker-collocation/group_vars/rgws new file mode 100644 index 000000000..75c89230f --- /dev/null +++ b/tests/functional/centos/7/docker-collocation/group_vars/rgws @@ -0,0 +1,6 @@ +--- +create_pools: + foo: + pg_num: 17 + bar: + pg_num: 19 diff --git a/tests/functional/centos/7/docker/group_vars/rgws b/tests/functional/centos/7/docker/group_vars/rgws index faec15233..65f9b2311 100644 --- a/tests/functional/centos/7/docker/group_vars/rgws +++ b/tests/functional/centos/7/docker/group_vars/rgws @@ -1,2 +1,7 @@ --- copy_admin_key: True +create_pools: + foo: + pg_num: 17 + bar: + pg_num: 19 diff --git a/tests/functional/tests/rgw/test_rgw_tuning.py b/tests/functional/tests/rgw/test_rgw_tuning.py index 9140d23e5..2b3d75c25 100644 --- a/tests/functional/tests/rgw/test_rgw_tuning.py +++ b/tests/functional/tests/rgw/test_rgw_tuning.py @@ -26,3 +26,18 @@ class TestRGWs(object): assert pool_name in output pg_num_str = "pg_num {pg_num}".format(pg_num=pg_num["pg_num"]) assert pg_num_str in output + + @pytest.mark.docker + def test_docker_rgw_tuning_pools_are_set(self, node, host): + hostname = node["vars"]["inventory_hostname"] + cluster = node['cluster_name'] + cmd = "sudo docker exec ceph-rgw-{hostname} ceph --cluster={cluster} --connect-timeout 5 osd dump".format( + hostname=hostname, + cluster=cluster + ) + output = host.check_output(cmd) + pools = node["vars"]["create_pools"] + for pool_name, pg_num in pools.items(): + assert pool_name in output + pg_num_str = "pg_num {pg_num}".format(pg_num=pg_num["pg_num"]) + assert pg_num_str in output