mirror of https://github.com/ceph/ceph-ansible.git
ceph-dashboard: update create/get rgw user tasks
Since [1] if a rgw user already exists then the radosgw-admin user create command will return an error instead of modifying the current user. We were already doing separated tasks for create and get operation but only for multisite configuration but it's not enough. Instead we should do the get task first and depending on the result execute the create. This commit also adds missing run_once and delegate_to statement. [1] https://github.com/ceph/ceph/commit/269e9b9 Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>pull/5086/head
parent
2a2656a985
commit
ac0f68ccf0
|
@ -126,6 +126,16 @@
|
||||||
- name: dashboard object gateway management frontend
|
- name: dashboard object gateway management frontend
|
||||||
when: groups.get(rgw_group_name, []) | length > 0
|
when: groups.get(rgw_group_name, []) | length > 0
|
||||||
block:
|
block:
|
||||||
|
- name: get radosgw system user
|
||||||
|
command: "timeout --foreground -s KILL 20 {{ container_exec_cmd }} radosgw-admin --cluster {{ cluster }} user info --uid={{ dashboard_rgw_api_user_id }}"
|
||||||
|
register: get_rgw_user
|
||||||
|
until: get_rgw_user.rc == 0
|
||||||
|
retries: 3
|
||||||
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
|
run_once: true
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
- name: create radosgw system user
|
- name: create radosgw system user
|
||||||
command: "timeout --foreground -s KILL 20 {{ container_exec_cmd }} radosgw-admin --cluster {{ cluster }} user create --uid={{ dashboard_rgw_api_user_id }} --display-name='Ceph dashboard' --system"
|
command: "timeout --foreground -s KILL 20 {{ container_exec_cmd }} radosgw-admin --cluster {{ cluster }} user create --uid={{ dashboard_rgw_api_user_id }} --display-name='Ceph dashboard' --system"
|
||||||
register: create_rgw_user
|
register: create_rgw_user
|
||||||
|
@ -133,22 +143,15 @@
|
||||||
retries: 3
|
retries: 3
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
when: not rgw_multisite | bool or rgw_zonemaster | bool
|
|
||||||
|
|
||||||
- name: get radosgw system user
|
|
||||||
command: "timeout --foreground -s KILL 20 {{ container_exec_cmd }} radosgw-admin --cluster {{ cluster }} user info --uid={{ dashboard_rgw_api_user_id }}"
|
|
||||||
register: get_rgw_user
|
|
||||||
until: get_rgw_user.rc == 0
|
|
||||||
retries: 3
|
|
||||||
when:
|
when:
|
||||||
- rgw_multisite | bool
|
- not rgw_multisite | bool or rgw_zonemaster | bool
|
||||||
- not rgw_zonemaster | bool
|
- get_rgw_user.rc == 22
|
||||||
- rgw_zonesecondary | bool
|
|
||||||
|
|
||||||
- name: get the rgw access and secret keys
|
- name: get the rgw access and secret keys
|
||||||
set_fact:
|
set_fact:
|
||||||
rgw_access_key: "{{ (create_rgw_user.stdout | default(get_rgw_user.stdout) | from_json)['keys'][0]['access_key'] }}"
|
rgw_access_key: "{{ (create_rgw_user.stdout | default(get_rgw_user.stdout) | from_json)['keys'][0]['access_key'] }}"
|
||||||
rgw_secret_key: "{{ (create_rgw_user.stdout | default(get_rgw_user.stdout) | from_json)['keys'][0]['secret_key'] }}"
|
rgw_secret_key: "{{ (create_rgw_user.stdout | default(get_rgw_user.stdout) | from_json)['keys'][0]['secret_key'] }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
- name: set the rgw user
|
- name: set the rgw user
|
||||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-user-id {{ dashboard_rgw_api_user_id }}"
|
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-user-id {{ dashboard_rgw_api_user_id }}"
|
||||||
|
|
|
@ -13,12 +13,18 @@ class TestRGWs(object):
|
||||||
|
|
||||||
@pytest.mark.no_docker
|
@pytest.mark.no_docker
|
||||||
def test_rgw_bucket_default_quota_is_applied(self, node, host, setup):
|
def test_rgw_bucket_default_quota_is_applied(self, node, host, setup):
|
||||||
radosgw_admin_cmd = "sudo radosgw-admin --cluster={cluster} -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring user create --uid=test --display-name Test".format( # noqa E501
|
radosgw_admin_cmd = "sudo radosgw-admin --cluster={cluster} -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring user info --uid=test".format( # noqa E501
|
||||||
hostname=node["vars"]["inventory_hostname"],
|
hostname=node["vars"]["inventory_hostname"],
|
||||||
cluster=setup['cluster_name']
|
cluster=setup['cluster_name']
|
||||||
)
|
)
|
||||||
radosgw_admin_output = host.check_output(radosgw_admin_cmd)
|
radosgw_admin_output = host.run(radosgw_admin_cmd)
|
||||||
radosgw_admin_output_json = json.loads(radosgw_admin_output)
|
if radosgw_admin_output.rc == 22:
|
||||||
|
radosgw_admin_cmd = "sudo radosgw-admin --cluster={cluster} -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring user create --uid=test --display-name Test".format( # noqa E501
|
||||||
|
hostname=node["vars"]["inventory_hostname"],
|
||||||
|
cluster=setup['cluster_name']
|
||||||
|
)
|
||||||
|
radosgw_admin_output = host.run(radosgw_admin_cmd)
|
||||||
|
radosgw_admin_output_json = json.loads(radosgw_admin_output.stdout)
|
||||||
assert radosgw_admin_output_json["bucket_quota"]["enabled"] == True # noqa E501
|
assert radosgw_admin_output_json["bucket_quota"]["enabled"] == True # noqa E501
|
||||||
assert radosgw_admin_output_json["bucket_quota"]["max_objects"] == 1638400 # noqa E501
|
assert radosgw_admin_output_json["bucket_quota"]["max_objects"] == 1638400 # noqa E501
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue