Add role definitions of ceph-rgw-loadbalancer

This add support for rgw loadbalancer based on HAProxy and Keepalived.
We define a single role ceph-rgw-loadbalancer and include HAProxy and
Keepalived configurations all in this.

A single haproxy backend is used to balance all RGW instances and
a single frontend is exported via a single port, default 80.

Keepalived is used to maintain the high availability of all haproxy
instances. You are free to use any number of VIPs. A single VIP is
shared across all keepalived instances and there will be one
master for one VIP, selected sequentially, and others serve as
backups.
This assumes that each keepalived instance is on the same node as
one haproxy instance and we use a simple check script to detect
the state of each haproxy instance and trigger the VIP failover
upon its failure.

Signed-off-by: guihecheng <guihecheng@cmiot.chinamobile.com>
pull/4058/head
guihecheng 2019-04-04 10:54:41 +08:00 committed by Guillaume Abrioux
parent ab54fe20ec
commit 35d40c65f8
14 changed files with 227 additions and 0 deletions

View File

@ -68,6 +68,8 @@ for role in "$basedir"/roles/ceph-*; do
output="rbdmirrors.yml.sample"
elif [[ $rolename == "ceph-iscsi-gw" ]]; then
output="iscsigws.yml.sample"
elif [[ $rolename == "ceph-rgw-loadbalancer" ]]; then
output="rgwloadbalancers.yml.sample"
else
output="${rolename:5}s.yml.sample"
fi

View File

@ -54,6 +54,7 @@ dummy:
#client_group_name: clients
#iscsi_gw_group_name: iscsigws
#mgr_group_name: mgrs
#rgwloadbalancer_group_name: rgwloadbalancers
# If configure_firewall is true, then ansible will try to configure the
# appropriate firewalling rules so that Ceph daemons can communicate
@ -70,6 +71,7 @@ dummy:
#ceph_rbdmirror_firewall_zone: public
#ceph_iscsi_firewall_zone: public
#ceph_dashboard_firewall_zone: public
#ceph_rgwloadbalancer_firewall_zone: public
# Generate local ceph.conf in fetch directory
#ceph_conf_local: false

View File

@ -0,0 +1,24 @@
---
# Variables here are applicable to all host groups NOT roles
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# You can override vars by using host or group vars
###########
# GENERAL #
###########
#haproxy_frontend_port: 80
#
#virtual_ips:
# - 192.168.238.250
# - 192.168.238.251
#
#virtual_ip_netmask: 24
#virtual_ip_interface: ens33

View File

@ -54,6 +54,7 @@ fetch_directory: ~/ceph-ansible-keys
#client_group_name: clients
#iscsi_gw_group_name: iscsigws
#mgr_group_name: mgrs
#rgwloadbalancer_group_name: rgwloadbalancers
# If configure_firewall is true, then ansible will try to configure the
# appropriate firewalling rules so that Ceph daemons can communicate
@ -70,6 +71,7 @@ fetch_directory: ~/ceph-ansible-keys
#ceph_rbdmirror_firewall_zone: public
#ceph_iscsi_firewall_zone: public
#ceph_dashboard_firewall_zone: public
#ceph_rgwloadbalancer_firewall_zone: public
# Generate local ceph.conf in fetch directory
#ceph_conf_local: false

View File

@ -46,6 +46,7 @@ rbdmirror_group_name: rbdmirrors
client_group_name: clients
iscsi_gw_group_name: iscsigws
mgr_group_name: mgrs
rgwloadbalancer_group_name: rgwloadbalancers
# If configure_firewall is true, then ansible will try to configure the
# appropriate firewalling rules so that Ceph daemons can communicate
@ -62,6 +63,7 @@ ceph_nfs_firewall_zone: public
ceph_rbdmirror_firewall_zone: public
ceph_iscsi_firewall_zone: public
ceph_dashboard_firewall_zone: public
ceph_rgwloadbalancer_firewall_zone: public
# Generate local ceph.conf in fetch directory
ceph_conf_local: false

View File

@ -213,4 +213,30 @@
- dashboard_enabled | bool
- inventory_hostname in groups.get('grafana-server', [])
- name: open haproxy ports
firewalld:
port: "{{ haproxy_frontend_port | default(80) }}/tcp"
zone: "{{ ceph_rgwloadbalancer_firewall_zone }}"
source: "{{ public_network }}"
permanent: true
immediate: true
state: enabled
when:
- rgwloadbalancer_group_name is defined
- rgwloadbalancer_group_name in group_names
tags:
- firewall
- name: add rich rule for keepalived vrrp
firewalld:
rich_rule: 'rule protocol value="vrrp" accept'
permanent: true
immediate: true
state: enabled
when:
- rgwloadbalancer_group_name is defined
- rgwloadbalancer_group_name in group_names
tags:
- firewall
- meta: flush_handlers

View File

@ -0,0 +1,15 @@
---
# You can override vars by using host or group vars
###########
# GENERAL #
###########
haproxy_frontend_port: 80
#
#virtual_ips:
# - 192.168.238.250
# - 192.168.238.251
#
#virtual_ip_netmask: 24
#virtual_ip_interface: ens33

View File

@ -0,0 +1,10 @@
---
- name: restart haproxy
service:
name: haproxy
state: restarted
- name: restart keepalived
service:
name: keepalived
state: restarted

View File

@ -0,0 +1,13 @@
---
galaxy_info:
author: Gui Hecheng
description: Config HAProxy & Keepalived
license: Apache
min_ansible_version: 2.8
platforms:
- name: EL
versions:
- 7
categories:
- system
dependencies: []

View File

@ -0,0 +1,6 @@
---
- name: include_tasks pre_requisite.yml
include_tasks: pre_requisite.yml
- name: include_tasks start_rgw_loadbalancer.yml
include_tasks: start_rgw_loadbalancer.yml

View File

@ -0,0 +1,35 @@
---
- name: install haproxy and keepalived
package:
name: ['haproxy', 'keepalived']
state: present
register: result
until: result is succeeded
- name: "generate haproxy configuration file: haproxy.cfg"
template:
src: haproxy.cfg.j2
dest: /etc/haproxy/haproxy.cfg
owner: "root"
group: "root"
mode: "0644"
validate: "haproxy -f %s -c"
notify:
- restart haproxy
- name: set_fact vip to vrrp_instance
set_fact:
vrrp_instances: "{{ vrrp_instances | default([]) | union([{ 'name': 'VI_' + index|string , 'vip': item, 'master': groups[rgwloadbalancer_group_name][index] }]) }}"
loop: "{{ virtual_ips | flatten(levels=1) }}"
loop_control:
index_var: index
- name: "generate keepalived: configuration file: keepalived.conf"
template:
src: keepalived.conf.j2
dest: /etc/keepalived/keepalived.conf
owner: "root"
group: "root"
mode: "0644"
notify:
- restart keepalived

View File

@ -0,0 +1,12 @@
---
- name: start haproxy
service:
name: haproxy
state: started
enabled: yes
- name: start keepalived
service:
name: keepalived
state: started
enabled: yes

View File

@ -0,0 +1,43 @@
# {{ ansible_managed }}
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 8000
user haproxy
group haproxy
daemon
stats socket /var/lib/haproxy/stats
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 8000
frontend rgw-frontend
bind *:{{ haproxy_frontend_port }}
default_backend rgw-backend
backend rgw-backend
option forwardfor
balance static-rr
option httpchk Get /
{% for host in groups[rgw_group_name] %}
{% for instance in hostvars[host]['rgw_instances'] %}
server {{ 'server-' + hostvars[host]['ansible_hostname'] + '-' + instance['instance_name'] }} {{ instance['radosgw_address'] }}:{{ instance['radosgw_frontend_port'] }} weight 100
{% endfor %}
{% endfor %}

View File

@ -0,0 +1,35 @@
# {{ ansible_managed }}
! Configuration File for keepalived
global_defs {
router_id CEPH_RGW
}
vrrp_script check_haproxy {
script "killall -0 haproxy"
weight -20
interval 2
rise 2
fall 2
}
{% for instance in vrrp_instances %}
vrrp_instance {{ instance['name'] }} {
state {{ 'MASTER' if ansible_hostname == instance['master'] else 'BACKUP' }}
priority {{ '100' if ansible_hostname == instance['master'] else '90' }}
interface {{ virtual_ip_interface }}
virtual_router_id {{ 50 + loop.index }}
advert_int 1
authentication {
auth_type PASS
auth_pass 1234
}
virtual_ipaddress {
{{ instance['vip'] }}/{{ virtual_ip_netmask }} dev {{ virtual_ip_interface }}
}
track_script {
check_haproxy
}
}
{% endfor %}