Experimental RGW Multisite Support

pull/936/head
James Saint-Rossy 2016-08-16 14:57:01 -04:00
parent 052e4cc687
commit d8609ec687
11 changed files with 159 additions and 43 deletions

View File

@ -0,0 +1,81 @@
RGW Multisite (Experimental)
============================
Directions for configuring the RGW Multisite support in ceph-ansible
## Requirements
* At least 2 Ceph clusters
* 1 RGW per cluster
* Jewel or newer
More details:
* Can configure a Master and Secondary realm/zonegroup/zone on 2 separate clusters.
## Configuring the Master Zone in the Primary Cluster
This will setup the realm, zonegroup and master zone and make them the defaults. It will also reconfigure the specified RGW for use with the zone.
1. Edit the Inventory File
```
[rgws]
cluster0-rgw0 rgw_zone=us-east rgw_zonemaster=true
```
1. Generate System Access and System Secret Keys
```
echo system_access_key: $(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1) > multi-site-keys.sh
echo system_secret_key: $(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 40 | head -n 1) >> multi-site-keys.sh
```
1. Edit the RGW Group Vars
```
copy_admin_key: true
# Enable Multisite support
rgw_multisite: true
rgw_realm: gold
rgw_zonegroup: us
system_access_key: 6kWkikvapSnHyE22P7nO
system_secret_key: MGecsMrWtKZgngOHZdrd6d3JxGO5CPWgT2lcnpSt
```
**Note:** replace the system_access_key and system_secret_key values with the ones you generated
1. Run the ceph-ansible playbook on your 1st cluster
Note: If you have already installed a cluster with ceph-ansible, you can use the `rgw-configure.yml` playbook as a shortcut (Only runs the ceph-rgw role)
## Configuring the Secondary Zone in a Separate Cluster
```
[rgws]
cluster1-rgw0 rgw_zone=us-west rgw_zonesecondary=true
```
1. Edit the RGW Group Vars
```
copy_admin_key: true
# Enable Multisite support
rgw_multisite: true
rgw_realm: gold
rgw_zonegroup: us
rgw_pullhost: cluster1-rgw0.fqdn
system_access_key: 6kWkikvapSnHyE22P7nO
system_secret_key: MGecsMrWtKZgngOHZdrd6d3JxGO5CPWgT2lcnpSt
```
**Note:** pullhost should be the host of the RGW that is configured as the Zone Master
**Note:** system_access_key and system_secret_key should match what you used in the 1st cluster
1. Run the ceph-ansible playbook on your 2nd cluster
Note: If you have already installed a cluster with ceph-ansible, you can use the `rgw-configure.yml` playbook as a shortcut (Only runs the ceph-rgw role)
## Conclusion
You should now have a master zone on cluster0 and a secondary zone on cluster1 in an Active-Active mode.

View File

@ -423,6 +423,7 @@
- name: request data removal
local_action: shell echo requesting data removal
become: false
notify:
- remove data

View File

@ -0,0 +1,11 @@
---
# Nukes a multisite config
- hosts: rgws
become: True
tasks:
- include: ~/ceph-ansible-master/roles/ceph-rgw/tasks/multisite/destroy.yml
handlers:
- include: ~/ceph-ansible-master/roles/ceph-rgw/handlers/main.yml
# Ansible 2.1.0 bug will ignore included handlers without this
static: True

10
rgw-standalone.yml 100644
View File

@ -0,0 +1,10 @@
---
# Run ceph-rgw role standalone
# Need to load the facts from mons because ceph-common need them to generate the ceph.conf
- hosts: mons
- hosts: rgws
become: True
roles:
- ceph-rgw

View File

@ -104,9 +104,6 @@ rgw keystone revocation interval = {{ radosgw_keystone_revocation_internal }}
rgw s3 auth use keystone = {{ radosgw_s3_auth_use_keystone }}
nss db path = {{ radosgw_nss_db_path }}
{% endif %}
{% if rgw_zonegroup is defined and rgw_zone is defined %}
rgw zone = {{ rgw_zonegroup }}-{{ rgw_zone }}
{% endif %}
{% endif %}
{% endfor %}
{% endif %}

View File

@ -1,3 +1,8 @@
---
- name: update period
command: radosgw-admin period update --commit
- name: restart rgw
service:
name: ceph-radosgw@rgw.{{ ansible_host }}
state: restarted

View File

@ -1,15 +1,26 @@
---
- include: pre_requisite.yml
when: not rgw_containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: openstack-keystone.yml
when: radosgw_keystone
- include: ./multisite/main.yml
when: rgw_zone is defined and rgw_multisite and ( is_jewel or is_after_jewel )
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: start_radosgw.yml
when: not rgw_containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- name: Include rgw multisite playbooks
include: multisite/main.yml
when: rgw_zone is defined and rgw_multisite and ( is_jewel or is_after_jewel )
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./docker/main.yml
when: rgw_containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False

View File

@ -3,27 +3,27 @@
command: radosgw-admin user rm --uid=zone.user
run_once: true
failed_when: false
notify:
- update period
- name: Delete the zone
command: radosgw-admin zone delete --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }}
run_once: true
failed_when: false
notify:
- update period
- name: Delete the zonegroup
command: radosgw-admin zonegroup delete --rgw-zonegroup={{ rgw_zonegroup }}
run_once: true
failed_when: false
notify:
- update period
- name: Delete the realm
command: radosgw-admin realm delete --rgw-realm={{ rgw_realm }}
run_once: true
failed_when: false
notify:
- update period
- name: Delete zone from RGW stanza in ceph.conf
lineinfile:
dest: /etc/ceph/ceph.conf
regexp: "rgw_zone = {{ rgw_zonegroup }}-{{ rgw_zone }}"
state: absent
when: "rgw_zone is defined and rgw_zonegroup is defined"
notify:
- restart rgw

View File

@ -1,19 +1,27 @@
---
- include: checks.yml
- name: Include multisite checks
include: checks.yml
# Include the tasks depending on the zone type
- include: master.yml
when: "{{ rgw_zonemaster }} is defined and {{ rgw_zonemaster }}"
- name: Include master multisite tasks
include: master.yml
when: "rgw_zonemaster is defined and rgw_zonemaster"
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: secondary.yml
when: "{{ rgw_zonesecondary }} is defined and {{ rgw_zonesecondary }}"
- name: Include secondary multisite tasks
include: secondary.yml
when: "rgw_zonesecondary is defined and rgw_zonesecondary"
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
## This is currently handled in the ceph-common ceph.conf template
## Probably should do here so we can use a restart handler for the rgw
#- name: Add zone to RGW stanza in ceph.conf
# lineinfile:
# dest: /etc/ceph/ceph.conf
# regexp: "{{ ansible_host }}"
# insertafter: "^[client.rgw.{{ ansible_host }}]"
# line: "rgw_zone={{ rgw_zonegroup }}-{{ rgw_zone }}"
# state: present
# Continue with common tasks
- name: Add zone to RGW stanza in ceph.conf
lineinfile:
dest: /etc/ceph/ceph.conf
regexp: "{{ ansible_host }}"
insertafter: "^[client.rgw.{{ ansible_host }}]"
line: "rgw_zone = {{ rgw_zone }}"
state: present
notify:
- restart rgw

View File

@ -7,14 +7,14 @@
- update period
- name: Create the zonegroup
command: radosgw-admin zonegroup create --rgw-zonegroup={{ rgw_zonegroup }} --endpoints=http://{{ ansible_hostname }}:8080 --master --default
command: radosgw-admin zonegroup create --rgw-zonegroup={{ rgw_zonegroup }} --endpoints=http://{{ ansible_fqdn }}:{{ radosgw_civetweb_port }} --master --default
run_once: true
when: ("No such file or directory" in zonegroupcheck.stderr) and rgw_zonemaster
notify:
- update period
- name: Create the zone
command: radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints=http://{{ ansible_hostname }}:8080 --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default --master
command: radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints=http://{{ ansible_fqdn }}:{{ radosgw_civetweb_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default --master
run_once: true
when: ("No such file or directory" in zonecheck.stderr) and rgw_zonemaster
notify:

View File

@ -1,13 +1,13 @@
---
- name: Fetch the realm (Secondary)
command: radosgw-admin realm pull --url=http://{{ rgw_pullhost }}:8080 --access-key={{ system_access_key }} --secret={{ system_secret_key }}
- name: Fetch the realm
command: radosgw-admin realm pull --url=http://{{ rgw_pullhost }}:{{ radosgw_civetweb_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}
run_once: true
when: ("No such file or directory" in realmcheck.stderr)
notify:
- update period
- name: Fetch the realm (Secondary)
command: radosgw-admin period pull --url=http://{{ rgw_pullhost }}:8080 --access-key={{ system_access_key }} --secret={{ system_secret_key }}
- name: Fetch the period
command: radosgw-admin period pull --url=http://{{ rgw_pullhost }}:{{ radosgw_civetweb_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}
run_once: true
when: ("No such file or directory" in realmcheck.stderr)
notify:
@ -26,16 +26,8 @@
- update period # Might not need to update period here
- name: Create the zone
command: radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints=http://{{ ansible_hostname }}:8080 --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default
command: radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints=http://{{ ansible_hostname }}:{{ radosgw_civetweb_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default
run_once: true
when: ("No such file or directory" in zonecheck.stderr)
notify:
- update period
#- name: Add zone to RGW stanza in ceph.conf
# lineinfile:
# dest: /etc/ceph/ceph.conf
# regexp: "{{ ansible_host }}"
# insertafter: "^[client.rgw.{{ ansible_host }}]"
# line: "rgw_zone={{ rgw_zonegroup }}-{{ rgw_zone }}"
# state: present