diff --git a/README-MULTISITE.md b/README-MULTISITE.md new file mode 100644 index 000000000..2056a6b66 --- /dev/null +++ b/README-MULTISITE.md @@ -0,0 +1,81 @@ +RGW Multisite (Experimental) +============================ + +Directions for configuring the RGW Multisite support in ceph-ansible + +## Requirements + +* At least 2 Ceph clusters +* 1 RGW per cluster +* Jewel or newer + +More details: + +* Can configure a Master and Secondary realm/zonegroup/zone on 2 separate clusters. + + +## Configuring the Master Zone in the Primary Cluster + +This will setup the realm, zonegroup and master zone and make them the defaults. It will also reconfigure the specified RGW for use with the zone. + +1. Edit the Inventory File + +``` +[rgws] +cluster0-rgw0 rgw_zone=us-east rgw_zonemaster=true +``` +1. Generate System Access and System Secret Keys + +``` +echo system_access_key: $(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1) > multi-site-keys.sh +echo system_secret_key: $(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 40 | head -n 1) >> multi-site-keys.sh +``` +1. Edit the RGW Group Vars + +``` +copy_admin_key: true +# Enable Multisite support +rgw_multisite: true +rgw_realm: gold +rgw_zonegroup: us +system_access_key: 6kWkikvapSnHyE22P7nO +system_secret_key: MGecsMrWtKZgngOHZdrd6d3JxGO5CPWgT2lcnpSt +``` + +**Note:** replace the system_access_key and system_secret_key values with the ones you generated + +1. Run the ceph-ansible playbook on your 1st cluster + +Note: If you have already installed a cluster with ceph-ansible, you can use the `rgw-configure.yml` playbook as a shortcut (Only runs the ceph-rgw role) + +## Configuring the Secondary Zone in a Separate Cluster + +``` +[rgws] +cluster1-rgw0 rgw_zone=us-west rgw_zonesecondary=true +``` + +1. Edit the RGW Group Vars + +``` +copy_admin_key: true +# Enable Multisite support +rgw_multisite: true +rgw_realm: gold +rgw_zonegroup: us +rgw_pullhost: cluster1-rgw0.fqdn +system_access_key: 6kWkikvapSnHyE22P7nO +system_secret_key: MGecsMrWtKZgngOHZdrd6d3JxGO5CPWgT2lcnpSt +``` + +**Note:** pullhost should be the host of the RGW that is configured as the Zone Master +**Note:** system_access_key and system_secret_key should match what you used in the 1st cluster + + +1. Run the ceph-ansible playbook on your 2nd cluster + +Note: If you have already installed a cluster with ceph-ansible, you can use the `rgw-configure.yml` playbook as a shortcut (Only runs the ceph-rgw role) + +## Conclusion + +You should now have a master zone on cluster0 and a secondary zone on cluster1 in an Active-Active mode. diff --git a/infrastructure-playbooks/purge-multisite.yml b/infrastructure-playbooks/purge-multisite.yml new file mode 100644 index 000000000..0677a1491 --- /dev/null +++ b/infrastructure-playbooks/purge-multisite.yml @@ -0,0 +1,11 @@ +--- +# Nukes a multisite config +- hosts: rgws + become: True + tasks: + - include: ~/ceph-ansible-master/roles/ceph-rgw/tasks/multisite/destroy.yml + + handlers: + - include: ~/ceph-ansible-master/roles/ceph-rgw/handlers/main.yml + # Ansible 2.1.0 bug will ignore included handlers without this + static: True diff --git a/infrastructure-playbooks/rgw-standalone.yml b/infrastructure-playbooks/rgw-standalone.yml new file mode 100644 index 000000000..10f1c3582 --- /dev/null +++ b/infrastructure-playbooks/rgw-standalone.yml @@ -0,0 +1,10 @@ +--- +# Run ceph-rgw role standalone + +# Need to load the facts from mons because ceph-common need them to generate the ceph.conf +- hosts: mons + +- hosts: rgws + become: True + roles: + - ceph-rgw diff --git a/roles/ceph-rgw/handlers/main.yml b/roles/ceph-rgw/handlers/main.yml new file mode 100644 index 000000000..3955c8243 --- /dev/null +++ b/roles/ceph-rgw/handlers/main.yml @@ -0,0 +1,8 @@ +--- +- name: update period + command: radosgw-admin period update --commit + +- name: restart rgw + service: + name: ceph-radosgw@rgw.{{ ansible_host }} + state: restarted diff --git a/roles/ceph-rgw/tasks/main.yml b/roles/ceph-rgw/tasks/main.yml index 91c8c9f79..ca2f9ea78 100644 --- a/roles/ceph-rgw/tasks/main.yml +++ b/roles/ceph-rgw/tasks/main.yml @@ -1,12 +1,26 @@ --- - include: pre_requisite.yml when: not rgw_containerized_deployment + # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) + static: False - include: openstack-keystone.yml when: radosgw_keystone + # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) + static: False - include: start_radosgw.yml when: not rgw_containerized_deployment + # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) + static: False + +- name: include rgw multisite playbooks + include: multisite/main.yml + when: rgw_zone is defined and rgw_multisite and ( is_jewel or is_after_jewel ) + # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) + static: False - include: ./docker/main.yml when: rgw_containerized_deployment + # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) + static: False diff --git a/roles/ceph-rgw/tasks/multisite/checks.yml b/roles/ceph-rgw/tasks/multisite/checks.yml new file mode 100644 index 000000000..7bc6bbb8d --- /dev/null +++ b/roles/ceph-rgw/tasks/multisite/checks.yml @@ -0,0 +1,24 @@ +--- +- name: check if the realm already exists + command: radosgw-admin realm get --rgw-realm={{ rgw_realm }} + register: realmcheck + failed_when: False + changed_when: False + +- name: check if the zonegroup already exists + command: radosgw-admin zonegroup get --rgw-zonegroup={{ rgw_zonegroup }} + register: zonegroupcheck + failed_when: False + changed_when: False + +- name: check if the zone already exists + command: radosgw-admin zone get --rgw-zone={{ rgw_zone }} + register: zonecheck + failed_when: False + changed_when: False + +- name: check if the system user already exists + command: radosgw-admin user info --uid=zone.user + register: usercheck + failed_when: False + changed_when: False diff --git a/roles/ceph-rgw/tasks/multisite/destroy.yml b/roles/ceph-rgw/tasks/multisite/destroy.yml new file mode 100644 index 000000000..db1226130 --- /dev/null +++ b/roles/ceph-rgw/tasks/multisite/destroy.yml @@ -0,0 +1,29 @@ +--- +- name: delete the zone user + command: radosgw-admin user rm --uid=zone.user + run_once: true + failed_when: false + +- name: delete the zone + command: radosgw-admin zone delete --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} + run_once: true + failed_when: false + +- name: delete the zonegroup + command: radosgw-admin zonegroup delete --rgw-zonegroup={{ rgw_zonegroup }} + run_once: true + failed_when: false + +- name: delete the realm + command: radosgw-admin realm delete --rgw-realm={{ rgw_realm }} + run_once: true + failed_when: false + +- name: delete zone from rgw stanza in ceph.conf + lineinfile: + dest: /etc/ceph/ceph.conf + regexp: "rgw_zone = {{ rgw_zonegroup }}-{{ rgw_zone }}" + state: absent + when: "rgw_zone is defined and rgw_zonegroup is defined" + notify: + - restart rgw diff --git a/roles/ceph-rgw/tasks/multisite/main.yml b/roles/ceph-rgw/tasks/multisite/main.yml new file mode 100644 index 000000000..cc874ba00 --- /dev/null +++ b/roles/ceph-rgw/tasks/multisite/main.yml @@ -0,0 +1,27 @@ +--- +- name: include multisite checks + include: checks.yml + +# Include the tasks depending on the zone type +- name: include master multisite tasks + include: master.yml + when: "rgw_zonemaster is defined and rgw_zonemaster" + # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) + static: False + +- name: include secondary multisite tasks + include: secondary.yml + when: "rgw_zonesecondary is defined and rgw_zonesecondary" + # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) + static: False + +# Continue with common tasks +- name: add zone to rgw stanza in ceph.conf + lineinfile: + dest: /etc/ceph/ceph.conf + regexp: "{{ ansible_host }}" + insertafter: "^[client.rgw.{{ ansible_host }}]" + line: "rgw_zone = {{ rgw_zone }}" + state: present + notify: + - restart rgw diff --git a/roles/ceph-rgw/tasks/multisite/master.yml b/roles/ceph-rgw/tasks/multisite/master.yml new file mode 100644 index 000000000..ff818465b --- /dev/null +++ b/roles/ceph-rgw/tasks/multisite/master.yml @@ -0,0 +1,28 @@ +--- +- name: create the realm + command: radosgw-admin realm create --rgw-realm={{ rgw_realm }} --default + run_once: true + when: ("No such file or directory" in realmcheck.stderr) and rgw_zonemaster + notify: + - update period + +- name: create the zonegroup + command: radosgw-admin zonegroup create --rgw-zonegroup={{ rgw_zonegroup }} --endpoints=http://{{ ansible_fqdn }}:{{ radosgw_civetweb_port }} --master --default + run_once: true + when: ("No such file or directory" in zonegroupcheck.stderr) and rgw_zonemaster + notify: + - update period + +- name: create the zone + command: radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints=http://{{ ansible_fqdn }}:{{ radosgw_civetweb_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default --master + run_once: true + when: ("No such file or directory" in zonecheck.stderr) and rgw_zonemaster + notify: + - update period + +- name: create the zone user + command: radosgw-admin user create --uid=zone.user --display-name="Zone User" --access-key={{ system_access_key }} --secret={{ system_secret_key }} --system + run_once: true + when: "'could not fetch user info: no user info saved' in usercheck.stderr" + notify: + - update period diff --git a/roles/ceph-rgw/tasks/multisite/secondary.yml b/roles/ceph-rgw/tasks/multisite/secondary.yml new file mode 100644 index 000000000..90dd03225 --- /dev/null +++ b/roles/ceph-rgw/tasks/multisite/secondary.yml @@ -0,0 +1,33 @@ +--- +- name: fetch the realm + command: radosgw-admin realm pull --url=http://{{ rgw_pullhost }}:{{ radosgw_civetweb_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} + run_once: true + when: ("No such file or directory" in realmcheck.stderr) + notify: + - update period + +- name: fetch the period + command: radosgw-admin period pull --url=http://{{ rgw_pullhost }}:{{ radosgw_civetweb_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} + run_once: true + when: ("No such file or directory" in realmcheck.stderr) + notify: + - update period + +- name: set default realm + command: radosgw-admin realm default --rgw-realm={{ rgw_realm }} + run_once: true + notify: + - update period # Might not need to update period here + +- name: set default zonegroup + command: radosgw-admin zonegroup default --rgw-zonegroup={{ rgw_zonegroup }} + run_once: true + notify: + - update period # Might not need to update period here + +- name: create the zone + command: radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints=http://{{ ansible_hostname }}:{{ radosgw_civetweb_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default + run_once: true + when: ("No such file or directory" in zonecheck.stderr) + notify: + - update period