rgw: multisite refact

Add the possibility to deploy rgw multisite configuration with a mix of
secondary and primary zones on a same rgw node.
Before that, on a same node, all instances were either primary
zones *OR* secondary.

Now you can define a rgw instance like following:

```
rgw_instances:
  - instance_name: 'rgw0'
    rgw_zonemaster: false
    rgw_zonesecondary: true
    rgw_zonegroupmaster: false
    rgw_realm: 'france'
    rgw_zonegroup: 'zonegroup-france'
    rgw_zone: paris-00
    radosgw_address: "{{ _radosgw_address }}"
    radosgw_frontend_port: 8080
    rgw_zone_user: jacques.chirac
    rgw_zone_user_display_name: "Jacques Chirac"
    system_access_key: P9Eb6S8XNyo4dtZZUUMy
    system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB
    endpoint: http://192.168.101.12:8080
```

Basically it's now possible to define `rgw_zonemaster`,
`rgw_zonesecondary` and `rgw_zonegroupmaster` at the intsance
level instead of the whole node level.

Also, this commit adds an option `deploy_secondary_zones` (default True)
which can be set to `False` in order to explicitly ask the playbook to
not deploy secondary zones in case where the corresponding endpoint are
not deployed yet.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1915478

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/6212/head
Guillaume Abrioux 2021-01-14 17:52:39 +01:00
parent fedb36688d
commit 71a5e666e3
34 changed files with 280 additions and 141 deletions

View File

@ -100,6 +100,7 @@ def exit_module(module, out, rc, cmd, err, startd, changed=False):
) )
module.exit_json(**result) module.exit_json(**result)
def fatal(message, module): def fatal(message, module):
''' '''
Report a fatal error and exit Report a fatal error and exit

View File

@ -119,10 +119,12 @@ rgw frontends = {{ frontend_line(radosgw_frontend_type) }} {{ radosgw_frontend_o
rgw thread pool size = {{ radosgw_thread_pool_size }} rgw thread pool size = {{ radosgw_thread_pool_size }}
{% endif %} {% endif %}
{% if rgw_multisite | bool %} {% if rgw_multisite | bool %}
{% if ((instance['rgw_zonemaster'] | default(rgw_zonemaster) | bool) or (deploy_secondary_zones | default(True) | bool)) %}
rgw_realm = {{ instance['rgw_realm'] }} rgw_realm = {{ instance['rgw_realm'] }}
rgw_zonegroup = {{ instance['rgw_zonegroup'] }} rgw_zonegroup = {{ instance['rgw_zonegroup'] }}
rgw_zone = {{ instance['rgw_zone'] }} rgw_zone = {{ instance['rgw_zone'] }}
{% endif %} {% endif %}
{% endif %}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% endif %} {% endif %}

View File

@ -1,39 +0,0 @@
---
- name: set_fact realms
set_fact:
realms: '{{ realms | default([]) | union([item.rgw_realm]) }}'
loop: "{{ rgw_instances_all }}"
run_once: true
- name: create list secondary_realms
set_fact:
secondary_realms: "{{ secondary_realms | default([]) | union([{ 'realm': item.rgw_realm, 'endpoint': item.endpoint, 'system_access_key': item.system_access_key, 'system_secret_key': item.system_secret_key, 'is_master': hostvars[item.host]['rgw_zonemaster'] }]) }}"
loop: "{{ rgw_instances_all }}"
run_once: true
when: not hostvars[item.host]['rgw_zonemaster'] | bool
- name: create list zonegroups
set_fact:
zonegroups: "{{ zonegroups | default([]) | union([{ 'realm': item.rgw_realm, 'zonegroup': item.rgw_zonegroup, 'is_master': hostvars[item.host]['rgw_zonegroupmaster'] }]) }}"
loop: "{{ rgw_instances_all }}"
run_once: true
when:
- hostvars[item.host]['rgw_zonemaster'] | bool
- name: create list zones
set_fact:
zones: "{{ zones | default([]) | union([{ 'realm': item.rgw_realm, 'zonegroup': item.rgw_zonegroup, 'zone': item.rgw_zone, 'is_master': hostvars[item.host]['rgw_zonemaster'], 'system_access_key': item.system_access_key, 'system_secret_key': item.system_secret_key }]) }}"
loop: "{{ rgw_instances_all }}"
run_once: true
- name: create a list of dicts with each rgw endpoint and it's zone
set_fact:
zone_endpoint_pairs: "{{ zone_endpoint_pairs | default([]) | union([{ 'endpoint': hostvars[item.host]['rgw_multisite_proto'] + '://' + item.radosgw_address + ':' + item.radosgw_frontend_port | string, 'rgw_zone': item.rgw_zone, 'rgw_realm': item.rgw_realm, 'rgw_zonegroup': item.rgw_zonegroup, 'rgw_zonemaster': hostvars[item.host]['rgw_zonemaster']}]) }}"
loop: "{{ rgw_instances_all }}"
run_once: true
- name: create a list of zones and all their endpoints
set_fact:
zone_endpoints_list: "{{ zone_endpoints_list | default([]) | union([{'zone': item.rgw_zone, 'zonegroup': item.rgw_zonegroup, 'realm': item.rgw_realm, 'is_master': item.rgw_zonemaster, 'endpoints': ','.join(zone_endpoint_pairs | selectattr('rgw_zone','match','^'+item.rgw_zone+'$') | selectattr('rgw_realm','match','^'+item.rgw_realm+'$') | selectattr('rgw_zonegroup', 'match','^'+item.rgw_zonegroup+'$') | map(attribute='endpoint'))}]) }}"
loop: "{{ zone_endpoint_pairs }}"
run_once: true

View File

@ -5,8 +5,8 @@
loop: "{{ rgw_instances_all }}" loop: "{{ rgw_instances_all }}"
run_once: true run_once: true
when: when:
- hostvars[item.host]['rgw_zonemaster'] | bool - item.rgw_zonemaster | default(hostvars[item.host]['rgw_zonemaster']) | bool
- hostvars[item.host]['rgw_zonegroupmaster'] | bool - item.rgw_zonegroupmaster | default(hostvars[item.host]['rgw_zonegroupmaster']) | bool
- name: create the zone user(s) - name: create the zone user(s)
radosgw_user: radosgw_user:
@ -22,6 +22,7 @@
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
loop: "{{ zone_users }}" loop: "{{ zone_users }}"
when: zone_users is defined
environment: environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}"

View File

@ -1,44 +1,50 @@
--- ---
- name: include_tasks create_realm_zonegroup_zone_lists.yml - name: set_fact realms
include_tasks: create_realm_zonegroup_zone_lists.yml set_fact:
realms: '{{ realms | default([]) | union([item.rgw_realm]) }}'
run_once: true
loop: "{{ rgw_instances_all }}"
when: item.rgw_zonemaster | default(hostvars[item.host]['rgw_zonemaster']) | bool
- name: create list zonegroups
set_fact:
zonegroups: "{{ zonegroups | default([]) | union([{ 'realm': item.rgw_realm, 'zonegroup': item.rgw_zonegroup, 'is_master': item.rgw_zonegroupmaster | default(hostvars[item.host]['rgw_zonegroupmaster']) }]) }}"
run_once: true
loop: "{{ rgw_instances_all }}"
when: item.rgw_zonegroupmaster | default(hostvars[item.host]['rgw_zonemaster']) | bool
- name: create list zones
set_fact:
zones: "{{ zones | default([]) | union([{ 'realm': item.rgw_realm, 'zonegroup': item.rgw_zonegroup, 'zone': item.rgw_zone, 'is_master': item.rgw_zonemaster | default(hostvars[item.host]['rgw_zonemaster']), 'system_access_key': item.system_access_key, 'system_secret_key': item.system_secret_key }]) }}"
run_once: true
loop: "{{ rgw_instances_all }}"
- name: create a list of dicts with each rgw endpoint and it's zone
set_fact:
zone_endpoint_pairs: "{{ zone_endpoint_pairs | default([]) | union([{ 'endpoint': hostvars[item.host]['rgw_multisite_proto'] + '://' + item.radosgw_address + ':' + item.radosgw_frontend_port | string, 'rgw_zone': item.rgw_zone, 'rgw_realm': item.rgw_realm, 'rgw_zonegroup': item.rgw_zonegroup, 'rgw_zonemaster': item.rgw_zonemaster | default(hostvars[item.host]['rgw_zonemaster']) }]) }}"
loop: "{{ rgw_instances_all }}"
run_once: true
- name: create a list of zones and all their endpoints
set_fact:
zone_endpoints_list: "{{ zone_endpoints_list | default([]) | union([{'zone': item.rgw_zone, 'zonegroup': item.rgw_zonegroup, 'realm': item.rgw_realm, 'is_master': item.rgw_zonemaster, 'endpoints': ','.join(zone_endpoint_pairs | selectattr('rgw_zone','match','^'+item.rgw_zone+'$') | selectattr('rgw_realm','match','^'+item.rgw_realm+'$') | selectattr('rgw_zonegroup', 'match','^'+item.rgw_zonegroup+'$') | map(attribute='endpoint'))}]) }}"
loop: "{{ zone_endpoint_pairs }}"
run_once: true
# Include the tasks depending on the zone type # Include the tasks depending on the zone type
- name: include_tasks master.yml - name: include_tasks master.yml
include_tasks: master.yml include_tasks: master.yml
when:
- rgw_zonemaster | bool
- not rgw_zonesecondary | bool
- name: include_tasks start_radosgw.yml for zonemaster rgws
include_tasks: ../start_radosgw.yml
when:
- rgw_zonemaster | bool
- not rgw_zonesecondary | bool
- not containerized_deployment | bool
- name: include_tasks start_docker_rgw.yml for zonemaster rgws
include_tasks: ../start_docker_rgw.yml
when:
- rgw_zonemaster | bool
- not rgw_zonesecondary | bool
- containerized_deployment | bool
- name: include_tasks secondary.yml - name: include_tasks secondary.yml
include_tasks: secondary.yml include_tasks: secondary.yml
when: when: deploy_secondary_zones | default(True) | bool
- not rgw_zonemaster | bool
- rgw_zonesecondary | bool
- name: include_tasks start_radosgw.yml for zonesecondary rgws - name: include_tasks start_radosgw.yml
include_tasks: ../start_radosgw.yml include_tasks: ../start_radosgw.yml
when: when:
- not rgw_zonemaster | bool
- rgw_zonesecondary | bool
- not containerized_deployment | bool - not containerized_deployment | bool
- name: include_tasks start_docker_rgw.yml for zonesecondary rgws - name: include_tasks start_docker_rgw.yml
include_tasks: ../start_docker_rgw.yml include_tasks: ../start_docker_rgw.yml
when: when:
- not rgw_zonemaster | bool
- rgw_zonesecondary | bool
- containerized_deployment | bool - containerized_deployment | bool

View File

@ -1,4 +1,11 @@
--- ---
- name: create list secondary_realms
set_fact:
secondary_realms: "{{ secondary_realms | default([]) | union([{ 'realm': item.rgw_realm, 'zonegroup': item.rgw_zonegroup, 'zone': item.rgw_zone, 'endpoint': item.endpoint, 'system_access_key': item.system_access_key, 'system_secret_key': item.system_secret_key, 'is_master': item.rgw_zonemaster | default(hostvars[item.host]['rgw_zonemaster']) }]) }}"
loop: "{{ rgw_instances_all }}"
run_once: true
when: not item.rgw_zonemaster | default(hostvars[item.host]['rgw_zonemaster']) | bool
- name: ensure connection to primary cluster from mon - name: ensure connection to primary cluster from mon
uri: uri:
url: "{{ item.endpoint }}" url: "{{ item.endpoint }}"
@ -11,6 +18,7 @@
uri: uri:
url: "{{ item.endpoint }}" url: "{{ item.endpoint }}"
loop: "{{ rgw_instances }}" loop: "{{ rgw_instances }}"
when: not item.rgw_zonemaster | default(rgw_zonemaster) | bool
- name: fetch the realm(s) - name: fetch the realm(s)
command: "{{ container_exec_cmd }} radosgw-admin realm pull --cluster={{ cluster }} --rgw-realm={{ item.realm }} --url={{ item.endpoint }} --access-key={{ item.system_access_key }} --secret={{ item.system_secret_key }}" command: "{{ container_exec_cmd }} radosgw-admin realm pull --cluster={{ cluster }} --rgw-realm={{ item.realm }} --url={{ item.endpoint }} --access-key={{ item.system_access_key }} --secret={{ item.system_secret_key }}"

View File

@ -20,6 +20,10 @@
enabled: yes enabled: yes
masked: no masked: no
with_items: "{{ rgw_instances }}" with_items: "{{ rgw_instances }}"
when:
- not rgw_multisite | bool or
((rgw_multisite | bool and item.rgw_zonesecondary | default(rgw_zonesecondary) | bool and deploy_secondary_zones | default(True)) or
(rgw_multisite | bool and item.rgw_zonemaster | default(rgw_zonemaster)))
- name: enable the ceph-radosgw.target service - name: enable the ceph-radosgw.target service
systemd: systemd:

View File

@ -8,14 +8,16 @@
- name: fail if either rgw_zonemaster or rgw_zonesecondary is undefined - name: fail if either rgw_zonemaster or rgw_zonesecondary is undefined
fail: fail:
msg: "rgw_zonemaster and rgw_zonesecondary must be defined" msg: "rgw_zonemaster and rgw_zonesecondary must be defined"
when: rgw_zonemaster is undefined or rgw_zonesecondary is undefined loop: "{{ rgw_instances }}"
when: item.rgw_zonemaster | default(rgw_zonemaster) is undefined or item.rgw_zonesecondary | default(rgw_zonesecondary) is undefined
- name: fail if rgw_zonemaster and rgw_zonesecondary are both true - name: fail if rgw_zonemaster and rgw_zonesecondary are both true
fail: fail:
msg: "rgw_zonemaster and rgw_zonesecondary cannot both be true" msg: "rgw_zonemaster and rgw_zonesecondary cannot both be true"
loop: "{{ rgw_instances }}"
when: when:
- rgw_zonemaster | bool - item.rgw_zonemaster | default(rgw_zonemaster) | bool
- rgw_zonesecondary | bool - item.rgw_zonesecondary | default(rgw_zonesecondary) | bool
- name: fail if rgw_zonegroup is not set - name: fail if rgw_zonegroup is not set
fail: fail:
@ -58,6 +60,7 @@
msg: "endpoint has not been set by the user" msg: "endpoint has not been set by the user"
loop: "{{ rgw_instances }}" loop: "{{ rgw_instances }}"
when: when:
- rgw_zonesecondary | bool - item.rgw_zonesecondary | default(rgw_zonesecondary) | bool
- rgw_pull_port is undefined and rgw_pullhost is undefined and rgw_pull_proto is undefined - rgw_pull_port is undefined and rgw_pullhost is undefined and item.rgw_pull_proto | default(rgw_pull_proto) is undefined
- item.endpoint is undefined - item.endpoint is undefined

View File

@ -29,4 +29,4 @@ ceph_conf_overrides:
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-master

View File

@ -2,6 +2,7 @@
copy_admin_key: true copy_admin_key: true
# Enable Multisite support # Enable Multisite support
rgw_multisite: true rgw_multisite: true
rgw_multisite_proto: http
rgw_create_pools: rgw_create_pools:
foo: foo:
pg_num: 16 pg_num: 16
@ -10,7 +11,3 @@ rgw_create_pools:
pg_num: 16 pg_num: 16
rgw_override_bucket_index_max_shards: 16 rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400 rgw_bucket_default_quota_max_objects: 1638400
rgw_zonemaster: True
rgw_zonesecondary: False
rgw_zonegroupmaster: True
rgw_multisite_proto: http

View File

@ -1,8 +1,12 @@
---
rgw_instances: rgw_instances:
- instance_name: 'rgw0' - instance_name: 'rgw0'
rgw_realm: 'france' rgw_zonemaster: True
rgw_zonegroup: 'idf' rgw_zonesecondary: False
rgw_zone: 'paris' rgw_zonegroupmaster: True
rgw_realm: 'canada'
rgw_zonegroup: 'zonegroup-canada'
rgw_zone: montreal-00
radosgw_address: "{{ _radosgw_address }}" radosgw_address: "{{ _radosgw_address }}"
radosgw_frontend_port: 8080 radosgw_frontend_port: 8080
rgw_zone_user: jacques.chirac rgw_zone_user: jacques.chirac
@ -10,15 +14,19 @@ rgw_instances:
system_access_key: P9Eb6S8XNyo4dtZZUUMy system_access_key: P9Eb6S8XNyo4dtZZUUMy
system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB
- instance_name: 'rgw1' - instance_name: 'rgw1'
rgw_realm: 'usa' rgw_zonemaster: false
rgw_zonegroup: 'alaska' rgw_zonesecondary: true
rgw_zone: 'juneau' rgw_zonegroupmaster: false
rgw_realm: 'france'
rgw_zonegroup: 'zonegroup-france'
rgw_zone: montreal-01
radosgw_address: "{{ _radosgw_address }}" radosgw_address: "{{ _radosgw_address }}"
radosgw_frontend_port: 8081 radosgw_frontend_port: 8081
rgw_zone_user: edward.lewis rgw_zone_user: edward.lewis
rgw_zone_user_display_name: "Edward Lewis" rgw_zone_user_display_name: "Edward Lewis"
system_access_key: yu17wkvAx3B8Wyn08XoF system_access_key: yu17wkvAx3B8Wyn08XoF
system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY
endpoint: http://192.168.107.12:8081
# functional testing # functional testing
rgw_multisite_endpoint_addr: 192.168.105.11 rgw_multisite_endpoint_addr: 192.168.105.12
radosgw_num_instances: 2 radosgw_num_instances: 2

View File

@ -0,0 +1,29 @@
---
rgw_zonemaster: true
rgw_zonesecondary: false
rgw_zonegroupmaster: true
rgw_multisite_proto: http
rgw_instances:
- instance_name: 'rgw0'
rgw_realm: 'foo'
rgw_zonegroup: 'zonegroup123'
rgw_zone: 'gotham_city'
radosgw_address: "{{ _radosgw_address }}"
radosgw_frontend_port: 8080
rgw_zone_user: batman
rgw_zone_user_display_name: "Batman"
system_access_key: 9WA1GN33IUYC717S8KB2
system_secret_key: R2vWXyboYw9nluehMgtATBGDBZSuWLnR0M4xNa1W
- instance_name: 'rgw1'
rgw_realm: 'bar'
rgw_zonegroup: 'zonegroup456'
rgw_zone: 'metropolis'
radosgw_address: "{{ _radosgw_address }}"
radosgw_frontend_port: 8081
rgw_zone_user: superman
rgw_zone_user_display_name: "Superman"
system_access_key: S96CJL44E29AN91Y3ZC5
system_secret_key: ha7yWiIi7bSV2vAqMBfKjYIVKMfOBaGkWrUZifRt
# functional testing
rgw_multisite_endpoint_addr: 192.168.105.11
radosgw_num_instances: 2

View File

@ -6,3 +6,4 @@ osd0
[rgws] [rgws]
osd0 osd0
rgw0

View File

@ -29,4 +29,4 @@ ceph_conf_overrides:
dashboard_enabled: False dashboard_enabled: False
ceph_docker_registry: quay.ceph.io ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-master ceph_docker_image_tag: latest-master

View File

@ -1,6 +1,7 @@
--- ---
# Enable Multisite support # Enable Multisite support
rgw_multisite: true rgw_multisite: true
rgw_multisite_proto: http
rgw_create_pools: rgw_create_pools:
foo: foo:
pg_num: 16 pg_num: 16
@ -9,7 +10,3 @@ rgw_create_pools:
pg_num: 16 pg_num: 16
rgw_override_bucket_index_max_shards: 16 rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400 rgw_bucket_default_quota_max_objects: 1638400
rgw_zonemaster: False
rgw_zonesecondary: True
rgw_zonegroupmaster: True
rgw_multisite_proto: http

View File

@ -1,26 +1,32 @@
---
rgw_instances: rgw_instances:
- instance_name: 'rgw0' - instance_name: 'rgw0'
rgw_realm: 'france' rgw_zonemaster: false
rgw_zonegroup: 'idf' rgw_zonesecondary: true
rgw_zone: 'versailles' rgw_zonegroupmaster: false
rgw_realm: 'canada'
rgw_zonegroup: 'zonegroup-canada'
rgw_zone: paris-00
radosgw_address: "{{ _radosgw_address }}" radosgw_address: "{{ _radosgw_address }}"
radosgw_frontend_port: 8080 radosgw_frontend_port: 8080
rgw_zone_user: jacques.chirac rgw_zone_user: jacques.chirac
rgw_zone_user_display_name: "Jacques Chirac" rgw_zone_user_display_name: "Jacques Chirac"
system_access_key: P9Eb6S8XNyo4dtZZUUMy system_access_key: P9Eb6S8XNyo4dtZZUUMy
system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB
endpoint: http://192.168.105.11:8080 endpoint: http://192.168.105.12:8080
- instance_name: 'rgw1' - instance_name: 'rgw1'
rgw_realm: 'usa' rgw_zonemaster: True
rgw_zonegroup: 'alaska' rgw_zonesecondary: False
rgw_zone: 'anchorage' rgw_zonegroupmaster: True
rgw_realm: 'france'
rgw_zonegroup: 'zonegroup-france'
rgw_zone: paris-01
radosgw_address: "{{ _radosgw_address }}" radosgw_address: "{{ _radosgw_address }}"
radosgw_frontend_port: 8081 radosgw_frontend_port: 8081
rgw_zone_user: edward.lewis rgw_zone_user: edward.lewis
rgw_zone_user_display_name: "Edward Lewis" rgw_zone_user_display_name: "Edward Lewis"
system_access_key: yu17wkvAx3B8Wyn08XoF system_access_key: yu17wkvAx3B8Wyn08XoF
system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY
endpoint: http://192.168.105.11:8081
# functional testing # functional testing
rgw_multisite_endpoint_addr: 192.168.107.11 rgw_multisite_endpoint_addr: 192.168.107.12
radosgw_num_instances: 2 radosgw_num_instances: 2

View File

@ -0,0 +1,31 @@
---
rgw_zonemaster: false
rgw_zonesecondary: true
rgw_zonegroupmaster: false
rgw_multisite_proto: http
rgw_instances:
- instance_name: 'rgw0'
rgw_realm: 'foo'
rgw_zonegroup: 'zonegroup123'
rgw_zone: 'gotham_city-secondary'
radosgw_address: "{{ _radosgw_address }}"
radosgw_frontend_port: 8080
rgw_zone_user: batman
rgw_zone_user_display_name: "Batman"
system_access_key: 9WA1GN33IUYC717S8KB2
system_secret_key: R2vWXyboYw9nluehMgtATBGDBZSuWLnR0M4xNa1W
endpoint: http://192.168.105.11:8080
- instance_name: 'rgw1'
rgw_realm: 'bar'
rgw_zonegroup: 'zonegroup456'
rgw_zone: 'metropolis-secondary'
radosgw_address: "{{ _radosgw_address }}"
radosgw_frontend_port: 8081
rgw_zone_user: superman
rgw_zone_user_display_name: "Superman"
system_access_key: S96CJL44E29AN91Y3ZC5
system_secret_key: ha7yWiIi7bSV2vAqMBfKjYIVKMfOBaGkWrUZifRt
endpoint: http://192.168.105.11:8081
# functional testing
rgw_multisite_endpoint_addr: 192.168.107.11
radosgw_num_instances: 2

View File

@ -6,3 +6,4 @@ osd0
[rgws] [rgws]
osd0 osd0
rgw0

View File

@ -7,7 +7,7 @@ docker: true
mon_vms: 1 mon_vms: 1
osd_vms: 1 osd_vms: 1
mds_vms: 0 mds_vms: 0
rgw_vms: 0 rgw_vms: 1
nfs_vms: 0 nfs_vms: 0
grafana_server_vms: 0 grafana_server_vms: 0
rbd_mirror_vms: 0 rbd_mirror_vms: 0

View File

@ -7,7 +7,7 @@ docker: true
mon_vms: 1 mon_vms: 1
osd_vms: 1 osd_vms: 1
mds_vms: 0 mds_vms: 0
rgw_vms: 0 rgw_vms: 1
nfs_vms: 0 nfs_vms: 0
grafana_server_vms: 0 grafana_server_vms: 0
rbd_mirror_vms: 0 rbd_mirror_vms: 0

View File

@ -24,4 +24,4 @@ ceph_conf_overrides:
mon_allow_pool_size_one: true mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1 osd_pool_default_size: 1
dashboard_enabled: False dashboard_enabled: False

View File

@ -2,6 +2,7 @@
copy_admin_key: true copy_admin_key: true
# Enable Multisite support # Enable Multisite support
rgw_multisite: true rgw_multisite: true
rgw_multisite_proto: http
rgw_create_pools: rgw_create_pools:
foo: foo:
pg_num: 16 pg_num: 16
@ -10,7 +11,3 @@ rgw_create_pools:
pg_num: 16 pg_num: 16
rgw_override_bucket_index_max_shards: 16 rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400 rgw_bucket_default_quota_max_objects: 1638400
rgw_zonemaster: True
rgw_zonesecondary: False
rgw_zonegroupmaster: True
rgw_multisite_proto: http

View File

@ -1,8 +1,12 @@
---
rgw_instances: rgw_instances:
- instance_name: 'rgw0' - instance_name: 'rgw0'
rgw_realm: 'france' rgw_zonemaster: True
rgw_zonegroup: 'idf' rgw_zonesecondary: False
rgw_zone: 'paris' rgw_zonegroupmaster: True
rgw_realm: 'canada'
rgw_zonegroup: 'zonegroup-canada'
rgw_zone: montreal-00
radosgw_address: "{{ _radosgw_address }}" radosgw_address: "{{ _radosgw_address }}"
radosgw_frontend_port: 8080 radosgw_frontend_port: 8080
rgw_zone_user: jacques.chirac rgw_zone_user: jacques.chirac
@ -10,15 +14,19 @@ rgw_instances:
system_access_key: P9Eb6S8XNyo4dtZZUUMy system_access_key: P9Eb6S8XNyo4dtZZUUMy
system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB
- instance_name: 'rgw1' - instance_name: 'rgw1'
rgw_realm: 'usa' rgw_zonemaster: false
rgw_zonegroup: 'alaska' rgw_zonesecondary: true
rgw_zone: 'juneau' rgw_zonegroupmaster: false
rgw_realm: 'france'
rgw_zonegroup: 'zonegroup-france'
rgw_zone: montreal-01
radosgw_address: "{{ _radosgw_address }}" radosgw_address: "{{ _radosgw_address }}"
radosgw_frontend_port: 8081 radosgw_frontend_port: 8081
rgw_zone_user: edward.lewis rgw_zone_user: edward.lewis
rgw_zone_user_display_name: "Edward Lewis" rgw_zone_user_display_name: "Edward Lewis"
system_access_key: yu17wkvAx3B8Wyn08XoF system_access_key: yu17wkvAx3B8Wyn08XoF
system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY
endpoint: http://192.168.103.12:8081
# functional testing # functional testing
rgw_multisite_endpoint_addr: 192.168.101.11 rgw_multisite_endpoint_addr: 192.168.101.12
radosgw_num_instances: 2 radosgw_num_instances: 2

View File

@ -0,0 +1,28 @@
rgw_zonemaster: true
rgw_zonesecondary: false
rgw_zonegroupmaster: true
rgw_multisite_proto: http
rgw_instances:
- instance_name: 'rgw0'
rgw_realm: 'foo'
rgw_zonegroup: 'zonegroup123'
rgw_zone: 'gotham_city'
radosgw_address: "{{ _radosgw_address }}"
radosgw_frontend_port: 8080
rgw_zone_user: batman
rgw_zone_user_display_name: "Batman"
system_access_key: 9WA1GN33IUYC717S8KB2
system_secret_key: R2vWXyboYw9nluehMgtATBGDBZSuWLnR0M4xNa1W
- instance_name: 'rgw1'
rgw_realm: 'bar'
rgw_zonegroup: 'zonegroup456'
rgw_zone: 'metropolis'
radosgw_address: "{{ _radosgw_address }}"
radosgw_frontend_port: 8081
rgw_zone_user: superman
rgw_zone_user_display_name: "Superman"
system_access_key: S96CJL44E29AN91Y3ZC5
system_secret_key: ha7yWiIi7bSV2vAqMBfKjYIVKMfOBaGkWrUZifRt
# functional testing
rgw_multisite_endpoint_addr: 192.168.101.11
radosgw_num_instances: 2

View File

@ -6,3 +6,4 @@ osd0
[rgws] [rgws]
osd0 osd0
rgw0

View File

@ -24,4 +24,4 @@ ceph_conf_overrides:
mon_allow_pool_size_one: true mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1 osd_pool_default_size: 1
dashboard_enabled: False dashboard_enabled: False

View File

@ -1,6 +1,7 @@
--- ---
# Enable Multisite support # Enable Multisite support
rgw_multisite: true rgw_multisite: true
rgw_multisite_proto: http
rgw_create_pools: rgw_create_pools:
foo: foo:
pg_num: 16 pg_num: 16
@ -9,7 +10,3 @@ rgw_create_pools:
pg_num: 16 pg_num: 16
rgw_override_bucket_index_max_shards: 16 rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400 rgw_bucket_default_quota_max_objects: 1638400
rgw_zonemaster: False
rgw_zonesecondary: True
rgw_zonegroupmaster: True
rgw_multisite_proto: http

View File

@ -1,26 +1,32 @@
---
rgw_instances: rgw_instances:
- instance_name: 'rgw0' - instance_name: 'rgw0'
rgw_realm: 'france' rgw_zonemaster: false
rgw_zonegroup: 'idf' rgw_zonesecondary: true
rgw_zone: 'versailles' rgw_zonegroupmaster: false
rgw_realm: 'canada'
rgw_zonegroup: 'zonegroup-canada'
rgw_zone: paris-00
radosgw_address: "{{ _radosgw_address }}" radosgw_address: "{{ _radosgw_address }}"
radosgw_frontend_port: 8080 radosgw_frontend_port: 8080
rgw_zone_user: jacques.chirac rgw_zone_user: jacques.chirac
rgw_zone_user_display_name: "Jacques Chirac" rgw_zone_user_display_name: "Jacques Chirac"
system_access_key: P9Eb6S8XNyo4dtZZUUMy system_access_key: P9Eb6S8XNyo4dtZZUUMy
system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB
endpoint: http://192.168.101.11:8080 endpoint: http://192.168.101.12:8080
- instance_name: 'rgw1' - instance_name: 'rgw1'
rgw_realm: 'usa' rgw_zonemaster: True
rgw_zonegroup: 'alaska' rgw_zonesecondary: False
rgw_zone: 'anchorage' rgw_zonegroupmaster: True
rgw_realm: 'france'
rgw_zonegroup: 'zonegroup-france'
rgw_zone: paris-01
radosgw_address: "{{ _radosgw_address }}" radosgw_address: "{{ _radosgw_address }}"
radosgw_frontend_port: 8081 radosgw_frontend_port: 8081
rgw_zone_user: edward.lewis rgw_zone_user: edward.lewis
rgw_zone_user_display_name: "Edward Lewis" rgw_zone_user_display_name: "Edward Lewis"
system_access_key: yu17wkvAx3B8Wyn08XoF system_access_key: yu17wkvAx3B8Wyn08XoF
system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY
endpoint: http://192.168.101.11:8081
# functional testing # functional testing
rgw_multisite_endpoint_addr: 192.168.103.11 rgw_multisite_endpoint_addr: 192.168.103.12
radosgw_num_instances: 2 radosgw_num_instances: 2

View File

@ -0,0 +1,31 @@
---
rgw_zonemaster: false
rgw_zonesecondary: true
rgw_zonegroupmaster: false
rgw_multisite_proto: http
rgw_instances:
- instance_name: 'rgw0'
rgw_realm: 'foo'
rgw_zonegroup: 'zonegroup123'
rgw_zone: 'gotham_city-secondary'
radosgw_address: "{{ _radosgw_address }}"
radosgw_frontend_port: 8080
rgw_zone_user: batman
rgw_zone_user_display_name: "Batman"
system_access_key: 9WA1GN33IUYC717S8KB2
system_secret_key: R2vWXyboYw9nluehMgtATBGDBZSuWLnR0M4xNa1W
endpoint: http://192.168.101.11:8080
- instance_name: 'rgw1'
rgw_realm: 'bar'
rgw_zonegroup: 'zonegroup456'
rgw_zone: 'metropolis-secondary'
radosgw_address: "{{ _radosgw_address }}"
radosgw_frontend_port: 8081
rgw_zone_user: superman
rgw_zone_user_display_name: "Superman"
system_access_key: S96CJL44E29AN91Y3ZC5
system_secret_key: ha7yWiIi7bSV2vAqMBfKjYIVKMfOBaGkWrUZifRt
endpoint: http://192.168.101.11:8081
# functional testing
rgw_multisite_endpoint_addr: 192.168.103.11
radosgw_num_instances: 2

View File

@ -6,3 +6,4 @@ osd0
[rgws] [rgws]
osd0 osd0
rgw0

View File

@ -7,7 +7,7 @@ docker: false
mon_vms: 1 mon_vms: 1
osd_vms: 1 osd_vms: 1
mds_vms: 0 mds_vms: 0
rgw_vms: 0 rgw_vms: 1
nfs_vms: 0 nfs_vms: 0
grafana_server_vms: 0 grafana_server_vms: 0
rbd_mirror_vms: 0 rbd_mirror_vms: 0

View File

@ -7,7 +7,7 @@ docker: false
mon_vms: 1 mon_vms: 1
osd_vms: 1 osd_vms: 1
mds_vms: 0 mds_vms: 0
rgw_vms: 0 rgw_vms: 1
nfs_vms: 0 nfs_vms: 0
grafana_server_vms: 0 grafana_server_vms: 0
rbd_mirror_vms: 0 rbd_mirror_vms: 0

View File

@ -29,8 +29,9 @@
s3cmd --no-ssl --access_key={{ item.system_access_key }} --secret_key={{ item.system_secret_key }} --host={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} --host-bucket={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} mb s3://testinfra-{{ item.rgw_realm }}; s3cmd --no-ssl --access_key={{ item.system_access_key }} --secret_key={{ item.system_secret_key }} --host={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} --host-bucket={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} mb s3://testinfra-{{ item.rgw_realm }};
s3cmd --no-ssl --access_key={{ item.system_access_key }} --secret_key={{ item.system_secret_key }} --host={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} --host-bucket={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} put /tmp/testinfra-{{ item.rgw_realm }}.img s3://testinfra-{{ item.rgw_realm }}' s3cmd --no-ssl --access_key={{ item.system_access_key }} --secret_key={{ item.system_secret_key }} --host={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} --host-bucket={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} put /tmp/testinfra-{{ item.rgw_realm }}.img s3://testinfra-{{ item.rgw_realm }}'
with_items: "{{ rgw_instances_host }}" with_items: "{{ rgw_instances_host }}"
tags: upload
when: when:
- rgw_zonemaster | bool - item.rgw_zonemaster | default(rgw_zonemaster) | bool
- containerized_deployment | bool - containerized_deployment | bool
- name: generate and upload a random a 10Mb file - non containerized - name: generate and upload a random a 10Mb file - non containerized
@ -39,8 +40,9 @@
s3cmd --no-ssl --access_key={{ item.system_access_key }} --secret_key={{ item.system_secret_key }} --host={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} --host-bucket={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} mb s3://testinfra-{{ item.rgw_realm }}; s3cmd --no-ssl --access_key={{ item.system_access_key }} --secret_key={{ item.system_secret_key }} --host={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} --host-bucket={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} mb s3://testinfra-{{ item.rgw_realm }};
s3cmd --no-ssl --access_key={{ item.system_access_key }} --secret_key={{ item.system_secret_key }} --host={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} --host-bucket={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} put /tmp/testinfra-{{ item.rgw_realm }}.img s3://testinfra-{{ item.rgw_realm }}; s3cmd --no-ssl --access_key={{ item.system_access_key }} --secret_key={{ item.system_secret_key }} --host={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} --host-bucket={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} put /tmp/testinfra-{{ item.rgw_realm }}.img s3://testinfra-{{ item.rgw_realm }};
with_items: "{{ rgw_instances_host }}" with_items: "{{ rgw_instances_host }}"
tags: upload
when: when:
- rgw_zonemaster | bool - item.rgw_zonemaster | default(rgw_zonemaster) | bool
- not containerized_deployment | bool - not containerized_deployment | bool
- name: get info from replicated file - containerized deployment - name: get info from replicated file - containerized deployment
@ -51,8 +53,9 @@
retries: 60 retries: 60
delay: 1 delay: 1
until: result is succeeded until: result is succeeded
tags: download
when: when:
- not rgw_zonemaster | bool - not item.rgw_zonemaster | default(rgw_zonemaster) | bool
- containerized_deployment | bool - containerized_deployment | bool
- name: get info from replicated file - non containerized - name: get info from replicated file - non containerized
@ -63,6 +66,7 @@
retries: 60 retries: 60
delay: 1 delay: 1
until: result is succeeded until: result is succeeded
tags: download
when: when:
- not rgw_zonemaster | bool - not item.rgw_zonemaster | default(rgw_zonemaster) | bool
- not containerized_deployment | bool - not containerized_deployment | bool

14
tox.ini
View File

@ -229,8 +229,17 @@ commands=
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
" "
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rgw_multisite.yml ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit rgws --extra-vars "\
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/rgw_multisite.yml ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rgw_multisite.yml --skip-tags download
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/rgw_multisite.yml --skip-tags download
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rgw_multisite.yml --skip-tags upload
ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/rgw_multisite.yml --skip-tags upload
bash -c "cd {changedir}/secondary && vagrant destroy --force" bash -c "cd {changedir}/secondary && vagrant destroy --force"
# clean rule after the scenario is complete # clean rule after the scenario is complete
ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent' ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent'
@ -331,6 +340,7 @@ commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
deploy_secondary_zones=False \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \ ceph_docker_registry_auth=True \