增加可选附加负载均衡节点

pull/453/head
gjmzj 2019-02-07 15:10:49 +08:00
parent 06ef648653
commit 0e04736f2b
16 changed files with 278 additions and 4 deletions

View File

@ -14,6 +14,7 @@
192.168.1.1 192.168.1.1
# 参数 NEW_INSTALLyes表示新建no表示使用已有harbor服务器 # 参数 NEW_INSTALLyes表示新建no表示使用已有harbor服务器
# 如果不使用域名,可以设置 HARBOR_DOMAIN=""
[harbor] [harbor]
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no #192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no
@ -21,6 +22,11 @@
[new-node] [new-node]
#192.168.1.xx #192.168.1.xx
# 【可选】外部负载均衡,用于自有环境负载转发 NodePort 暴露的服务等
[ex-lb]
#192.168.1.6 LB_ROLE=backup EX_VIP=192.168.1.250
#192.168.1.7 LB_ROLE=master EX_VIP=192.168.1.250
[all:vars] [all:vars]
# ---------集群主要参数--------------- # ---------集群主要参数---------------
#集群部署模式allinone, single-master, multi-master #集群部署模式allinone, single-master, multi-master

View File

@ -14,7 +14,8 @@
[kube-node] [kube-node]
192.168.1.1 192.168.1.1
# variable NEW_INSTALL: 'yes' to setup a new harbor server; 'no' to integrate with existed one # set 'NEW_INSTALL': 'yes' to install a harbor server; 'no' to integrate with existed one
# if not using domianset 'HARBOR_DOMAIN=""'
[harbor] [harbor]
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no #192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no
@ -22,6 +23,11 @@
[new-node] [new-node]
#192.168.1.xx #192.168.1.xx
# [optional] loadbalance for services with type 'NodePort'
[ex-lb]
#192.168.1.6 LB_ROLE=backup EX_VIP=192.168.1.250
#192.168.1.7 LB_ROLE=master EX_VIP=192.168.1.250
[all:vars] [all:vars]
# --------- Main Variables --------------- # --------- Main Variables ---------------
# Cluster Deployment Mode: allinone, single-master, multi-master # Cluster Deployment Mode: allinone, single-master, multi-master

View File

@ -21,6 +21,7 @@
192.168.1.4 192.168.1.4
# 参数 NEW_INSTALLyes表示新建no表示使用已有harbor服务器 # 参数 NEW_INSTALLyes表示新建no表示使用已有harbor服务器
# 如果不使用域名,可以设置 HARBOR_DOMAIN=""
[harbor] [harbor]
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no #192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no

View File

@ -21,7 +21,8 @@
192.168.1.1 192.168.1.1
192.168.1.4 192.168.1.4
# variable NEW_INSTALL: 'yes' to setup a new harbor server; 'no' to integrate with existed one # set 'NEW_INSTALL': 'yes' to install a harbor server; 'no' to integrate with existed one
# if not using domianset 'HARBOR_DOMAIN=""'
[harbor] [harbor]
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no #192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no

View File

@ -23,6 +23,7 @@
192.168.1.4 192.168.1.4
# 参数 NEW_INSTALLyes表示新建no表示使用已有harbor服务器 # 参数 NEW_INSTALLyes表示新建no表示使用已有harbor服务器
# 如果不使用域名,可以设置 HARBOR_DOMAIN=""
[harbor] [harbor]
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no #192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no
@ -34,6 +35,11 @@
[new-node] [new-node]
#192.168.1.xx #192.168.1.xx
#【可选】外部负载均衡,用于自有环境负载转发 NodePort 暴露的服务等
[ex-lb]
#192.168.1.6 LB_ROLE=backup EX_VIP=192.168.1.250
#192.168.1.7 LB_ROLE=master EX_VIP=192.168.1.250
[all:vars] [all:vars]
# ---------集群主要参数--------------- # ---------集群主要参数---------------
#集群部署模式allinone, single-master, multi-master #集群部署模式allinone, single-master, multi-master

View File

@ -23,7 +23,8 @@
192.168.1.3 192.168.1.3
192.168.1.4 192.168.1.4
# variable NEW_INSTALL: 'yes' to setup a new harbor server; 'no' to integrate with existed one # set 'NEW_INSTALL': 'yes' to install a harbor server; 'no' to integrate with existed one
# if not using domianset 'HARBOR_DOMAIN=""'
[harbor] [harbor]
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no #192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no
@ -35,6 +36,11 @@
[new-node] [new-node]
#192.168.1.xx #192.168.1.xx
# [optional] loadbalance for services with type 'NodePort'
[ex-lb]
#192.168.1.6 LB_ROLE=backup EX_VIP=192.168.1.250
#192.168.1.7 LB_ROLE=master EX_VIP=192.168.1.250
[all:vars] [all:vars]
# --------- Main Variables --------------- # --------- Main Variables ---------------
# Cluster Deployment Mode: allinone, single-master, multi-master # Cluster Deployment Mode: allinone, single-master, multi-master

View File

@ -15,6 +15,7 @@
192.168.1.3 192.168.1.3
# 参数 NEW_INSTALLyes表示新建no表示使用已有harbor服务器 # 参数 NEW_INSTALLyes表示新建no表示使用已有harbor服务器
# 如果不使用域名,可以设置 HARBOR_DOMAIN=""
[harbor] [harbor]
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no #192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no
@ -22,6 +23,11 @@
[new-node] [new-node]
#192.168.1.xx #192.168.1.xx
#【可选】外部负载均衡,用于自有环境负载转发 NodePort 暴露的服务等
[ex-lb]
#192.168.1.6 LB_ROLE=backup EX_VIP=192.168.1.250
#192.168.1.7 LB_ROLE=master EX_VIP=192.168.1.250
[all:vars] [all:vars]
# ---------集群主要参数--------------- # ---------集群主要参数---------------
#集群部署模式allinone, single-master, multi-master #集群部署模式allinone, single-master, multi-master

View File

@ -15,7 +15,8 @@
192.168.1.2 192.168.1.2
192.168.1.3 192.168.1.3
# variable NEW_INSTALL: 'yes' to setup a new harbor server; 'no' to integrate with existed one # set 'NEW_INSTALL': 'yes' to install a harbor server; 'no' to integrate with existed one
# if not using domianset 'HARBOR_DOMAIN=""'
[harbor] [harbor]
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no #192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no
@ -23,6 +24,11 @@
[new-node] [new-node]
#192.168.1.xx #192.168.1.xx
# [optional] loadbalance for services with type 'NodePort'
[ex-lb]
#192.168.1.6 LB_ROLE=backup EX_VIP=192.168.1.250
#192.168.1.7 LB_ROLE=master EX_VIP=192.168.1.250
[all:vars] [all:vars]
# --------- Main Variables --------------- # --------- Main Variables ---------------
# Cluster Deployment Mode: allinone, single-master, multi-master # Cluster Deployment Mode: allinone, single-master, multi-master

View File

@ -0,0 +1,36 @@
- hosts:
- ex-lb
tasks:
- name: stop and disable chrony in Ubuntu
service: name=chrony state=stopped enabled=no
ignore_errors: true
tags: rm_ntp
when: ansible_distribution == "Ubuntu" or ansible_distribution == "Debian"
- name: stop and disable chronyd in CentOS/RedHat
service: name=chronyd state=stopped enabled=no
ignore_errors: true
tags: rm_ntp
when: ansible_distribution == "CentOS" or ansible_distribution == "RedHat"
- name: stop keepalived service
service: name=keepalived state=stopped enabled=no
ignore_errors: true
- name: stop haproxy service
service: name=haproxy state=stopped enabled=no
ignore_errors: true
- name: remove files and dirs
file: name={{ item }} state=absent
with_items:
- "/etc/haproxy"
- "/etc/keepalived"
- name: clean 'ENV PATH'
lineinfile:
dest: ~/.bashrc
state: absent
regexp: '{{ item }}'
with_items:
- 'kubeasz'

View File

@ -0,0 +1,15 @@
# 区分多个instance的VRRP组播同网段不能重复取值在0-255之间
# 因项目已设置vrrp报文单播模式所以这个ROUTER_ID 即便同网段里面有重复也没关系
ROUTER_ID: 222
# haproxy负载均衡算法常见如下
# "roundrobin": 基于服务器权重的轮询
# "leastconn": 基于服务器最小连接数
# "source": 基于请求源IP地址
# "uri": 基于请求的URI
BALANCE_ALG: "roundrobin"
# 启用 ingress NodePort服务的负载均衡 (yes/no)
INGRESS_NODEPORT_LB: "yes"
# 启用 ingress tls NodePort服务的负载均衡 (yes/no)
INGRESS_TLS_NODEPORT_LB: "yes"

View File

@ -0,0 +1,5 @@
- hosts: ex-lb
roles:
- { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes'" }
- prepare
- ex-lb

View File

@ -0,0 +1,62 @@
# 自动设置LB节点变量'LB_IF'
- name: 注册变量 LB_IF_TMP
shell: "ip a|grep '{{ inventory_hostname }}/'|awk '{print $NF}'"
register: LB_IF_TMP
- name: 设置变量 LB_IF
set_fact: LB_IF="{{ LB_IF_TMP.stdout }}"
- name: apt更新缓存刷新
apt: update_cache=yes cache_valid_time=72000
when: ansible_distribution == "Ubuntu" and ansible_distribution_major_version|int >= 16
- name: 安装 haproxy
package: name=haproxy state=latest
- name: 创建haproxy配置目录
file: name=/etc/haproxy state=directory
- name: 修改centos的haproxy.service
template: src=haproxy.service.j2 dest=/usr/lib/systemd/system/haproxy.service
when: (ansible_distribution == "CentOS" or ansible_distribution == "RedHat") and ansible_distribution_major_version == "7"
tags: restart_lb
- name: 配置 haproxy
template: src=haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg
tags: restart_lb
- name: 安装 keepalived
package: name=keepalived state=latest
- name: 创建keepalived配置目录
file: name=/etc/keepalived state=directory
- name: 配置 keepalived 主节点
template: src=keepalived-master.conf.j2 dest=/etc/keepalived/keepalived.conf
when: LB_ROLE == "master"
tags: restart_lb
- name: 配置 keepalived 备节点
template: src=keepalived-backup.conf.j2 dest=/etc/keepalived/keepalived.conf
when: LB_ROLE == "backup"
tags: restart_lb
- name: daemon-reload for haproxy.service
shell: systemctl daemon-reload
tags: restart_lb
- name: 开机启用haproxy服务
shell: systemctl enable haproxy
ignore_errors: true
- name: 重启haproxy服务
shell: systemctl restart haproxy
tags: restart_lb
- name: 开机启用keepalived服务
shell: systemctl enable keepalived
ignore_errors: true
- name: 重启keepalived服务
shell: systemctl restart keepalived
tags: restart_lb

View File

@ -0,0 +1,45 @@
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin
stats timeout 30s
user haproxy
group haproxy
daemon
nbproc 1
defaults
log global
timeout connect 5000
timeout client 10m
timeout server 10m
{% if INGRESS_NODEPORT_LB == "yes" %}
listen ingress-node
bind 0.0.0.0:80
mode tcp
option tcplog
balance {{ BALANCE_ALG }}
{% for host in groups['kube-node'] %}
server {{ host }} {{ host }}:23456 check inter 2000 fall 2 rise 2 weight 1
{% endfor %}
{% for host in groups['new-node'] %}
server {{ host }} {{ host }}:23456 check inter 2000 fall 2 rise 2 weight 1
{% endfor %}
{% endif %}
{% if INGRESS_TLS_NODEPORT_LB == "yes" %}
listen ingress-node-tls
bind 0.0.0.0:443
mode tcp
option tcplog
balance {{ BALANCE_ALG }}
{% for host in groups['kube-node'] %}
server {{ host }} {{ host }}:23457 check inter 2000 fall 2 rise 2 weight 1
{% endfor %}
{% for host in groups['new-node'] %}
server {{ host }} {{ host }}:23457 check inter 2000 fall 2 rise 2 weight 1
{% endfor %}
{% endif %}

View File

@ -0,0 +1,13 @@
[Unit]
Description=HAProxy Load Balancer
After=syslog.target network.target
[Service]
EnvironmentFile=/etc/sysconfig/haproxy
ExecStartPre=/usr/bin/mkdir -p /run/haproxy
ExecStart=/usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid $OPTIONS
ExecReload=/bin/kill -USR2 $MAINPID
KillMode=mixed
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,30 @@
global_defs {
router_id lb-backup-{{ inventory_hostname }}
}
vrrp_script check-haproxy {
script "killall -0 haproxy"
interval 5
weight -60
}
vrrp_instance VI-kube-master {
state BACKUP
priority {{ 119 | random(61, 1) }}
unicast_src_ip {{ inventory_hostname }}
unicast_peer {
{% for h in groups['ex-lb'] %}{% if h != inventory_hostname %}
{{ h }}
{% endif %}{% endfor %}
}
dont_track_primary
interface {{ LB_IF }}
virtual_router_id {{ ROUTER_ID }}
advert_int 3
track_script {
check-haproxy
}
virtual_ipaddress {
{{ EX_VIP }}
}
}

View File

@ -0,0 +1,30 @@
global_defs {
router_id lb-master-{{ inventory_hostname }}
}
vrrp_script check-haproxy {
script "killall -0 haproxy"
interval 5
weight -60
}
vrrp_instance VI-kube-master {
state MASTER
priority 120
unicast_src_ip {{ inventory_hostname }}
unicast_peer {
{% for h in groups['ex-lb'] %}{% if h != inventory_hostname %}
{{ h }}
{% endif %}{% endfor %}
}
dont_track_primary
interface {{ LB_IF }}
virtual_router_id {{ ROUTER_ID }}
advert_int 3
track_script {
check-haproxy
}
virtual_ipaddress {
{{ EX_VIP }}
}
}