调整ex-lb的流程

pull/641/head
gjmzj 2019-05-30 21:37:01 +08:00
parent 8cf349b271
commit 70c1f8b172
20 changed files with 93 additions and 255 deletions

View File

@ -205,21 +205,20 @@
- hosts:
- kube-master
- kube-node
- deploy
- etcd
- lb
- ex-lb
tasks:
- name: stop and disable chrony in Ubuntu
service: name=chrony state=stopped enabled=no
ignore_errors: true
tags: rm_ntp
when: ansible_distribution == "Ubuntu" or ansible_distribution == "Debian"
- block:
- name: stop and disable chrony in Ubuntu
service: name=chrony state=stopped enabled=no
ignore_errors: true
when: 'ansible_distribution in ["Ubuntu","Debian"]'
- name: stop and disable chronyd in CentOS/RedHat
service: name=chronyd state=stopped enabled=no
ignore_errors: true
tags: rm_ntp
when: ansible_distribution == "CentOS" or ansible_distribution == "RedHat"
- name: stop and disable chronyd in CentOS/RedHat
service: name=chronyd state=stopped enabled=no
ignore_errors: true
when: 'ansible_distribution in ["CentOS","RedHat","Amazon"]'
when: "groups['chrony']|length > 0"
- name: clean certs and keys
file: name={{ item }} state=absent

View File

@ -18,10 +18,10 @@
# [optional] loadbalance for accessing k8s from outside
[ex-lb]
#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250
#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250
#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
# [optional] ntp server for cluster
# [optional] ntp server for the cluster
[chrony]
#192.168.1.1

View File

@ -22,10 +22,10 @@
# [optional] loadbalance for accessing k8s from outside
[ex-lb]
#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250
#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250
#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
# [optional] ntp server for cluster
# [optional] ntp server for the cluster
[chrony]
#192.168.1.1

View File

@ -1,17 +1,17 @@
- hosts:
- ex-lb
tasks:
- name: stop and disable chrony in Ubuntu
service: name=chrony state=stopped enabled=no
ignore_errors: true
tags: rm_ntp
when: ansible_distribution == "Ubuntu" or ansible_distribution == "Debian"
- name: stop and disable chronyd in CentOS/RedHat
service: name=chronyd state=stopped enabled=no
ignore_errors: true
tags: rm_ntp
when: ansible_distribution == "CentOS" or ansible_distribution == "RedHat"
- block:
- name: stop and disable chrony in Ubuntu
service: name=chrony state=stopped enabled=no
ignore_errors: true
when: 'ansible_distribution in ["Ubuntu","Debian"]'
- name: stop and disable chronyd in CentOS/RedHat
service: name=chronyd state=stopped enabled=no
ignore_errors: true
when: 'ansible_distribution in ["CentOS","RedHat","Amazon"]'
when: "groups['chrony']|length > 0"
- name: stop keepalived service
service: name=keepalived state=stopped enabled=no

View File

@ -1,5 +1,5 @@
- hosts: ex-lb
roles:
- { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes'" }
- { role: chrony, when: "groups['chrony']|length > 0" }
- prepare
- ex-lb

View File

@ -1,14 +1,21 @@
# ex-lb 节点成员不能同时是 kube-node 节点,因为它们都需要安装 haproxy
- name: fail info1
fail: msg="an 'ex-lb' node CAN NOT be a 'kube-node' node at the same time"
when: "inventory_hostname in groups['kube-node']"
# 自动设置LB节点变量'LB_IF'
- name: 注册变量 LB_IF_TMP
shell: "ip a|grep '{{ inventory_hostname }}/'|awk '{print $NF}'"
register: LB_IF_TMP
tags: restart_lb
- name: 设置变量 LB_IF
set_fact: LB_IF="{{ LB_IF_TMP.stdout }}"
set_fact: LB_IF={{ LB_IF_TMP.stdout }}
tags: restart_lb
- name: apt更新缓存刷新
apt: update_cache=yes cache_valid_time=72000
when: ansible_distribution == "Ubuntu" and ansible_distribution_major_version|int >= 16
when: 'ansible_distribution in ["Ubuntu","Debian"]'
- name: 安装 haproxy
package: name=haproxy state=present
@ -18,7 +25,7 @@
- name: 修改centos的haproxy.service
template: src=haproxy.service.j2 dest=/usr/lib/systemd/system/haproxy.service
when: (ansible_distribution == "CentOS" or ansible_distribution == "RedHat") and ansible_distribution_major_version == "7"
when: 'ansible_distribution in ["CentOS","RedHat","Amazon"]'
tags: restart_lb
- name: 配置 haproxy

View File

@ -1,6 +1,5 @@
global
log /dev/log local0
log /dev/log local1 notice
log /dev/log local1 warning
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin
stats timeout 30s
@ -11,29 +10,55 @@ global
defaults
log global
timeout connect 5000
timeout connect 5s
timeout client 10m
timeout server 10m
{% if INGRESS_NODEPORT_LB == "yes" %}
listen kube-master
bind 0.0.0.0:{{ EX_APISERVER_PORT }}
mode tcp
option tcplog
option dontlognull
option dontlog-normal
balance {{ BALANCE_ALG }}
{% for host in groups['kube-master'] %}
server {{ host }} {{ host }}:6443 check inter 5s fall 2 rise 2 weight 1
{% endfor %}
{% if INGRESS_NODEPORT_LB == "yes" %}
listen ingress-node
bind 0.0.0.0:80
mode tcp
option tcplog
option dontlognull
option dontlog-normal
balance {{ BALANCE_ALG }}
{% if groups['kube-node']|length > 3 %}
server {{ groups['kube-node'][0] }} {{ groups['kube-node'][0] }}:23456 check inter 5s fall 2 rise 2 weight 1
server {{ groups['kube-node'][1] }} {{ groups['kube-node'][1] }}:23456 check inter 5s fall 2 rise 2 weight 1
server {{ groups['kube-node'][2] }} {{ groups['kube-node'][2] }}:23456 check inter 5s fall 2 rise 2 weight 1
{% else %}
{% for host in groups['kube-node'] %}
server {{ host }} {{ host }}:23456 check inter 2000 fall 2 rise 2 weight 1
server {{ host }} {{ host }}:23456 check inter 5s fall 2 rise 2 weight 1
{% endfor %}
{% endif %}
{% if INGRESS_TLS_NODEPORT_LB == "yes" %}
{% endif %}
{% if INGRESS_TLS_NODEPORT_LB == "yes" %}
listen ingress-node-tls
bind 0.0.0.0:443
mode tcp
option tcplog
option dontlognull
option dontlog-normal
balance {{ BALANCE_ALG }}
{% if groups['kube-node']|length > 3 %}
server {{ groups['kube-node'][0] }} {{ groups['kube-node'][0] }}:23457 check inter 5s fall 2 rise 2 weight 1
server {{ groups['kube-node'][1] }} {{ groups['kube-node'][1] }}:23457 check inter 5s fall 2 rise 2 weight 1
server {{ groups['kube-node'][2] }} {{ groups['kube-node'][2] }}:23457 check inter 5s fall 2 rise 2 weight 1
{% else %}
{% for host in groups['kube-node'] %}
server {{ host }} {{ host }}:23457 check inter 2000 fall 2 rise 2 weight 1
server {{ host }} {{ host }}:23456 check inter 5s fall 2 rise 2 weight 1
{% endfor %}
{% endif %}
{% endif %}

View File

@ -25,6 +25,6 @@ vrrp_instance VI-kube-master {
check-haproxy
}
virtual_ipaddress {
{{ EX_VIP }}
{{ EX_APISERVER_VIP }}
}
}

View File

@ -25,6 +25,6 @@ vrrp_instance VI-kube-master {
check-haproxy
}
virtual_ipaddress {
{{ EX_VIP }}
{{ EX_APISERVER_VIP }}
}
}

View File

@ -10,9 +10,11 @@
- name: 注册变量 KUBERNETES_SVC_IP
shell: echo {{ SERVICE_CIDR }}|cut -d/ -f1|awk -F. '{print $1"."$2"."$3"."$4+1}'
register: KUBERNETES_SVC_IP
tags: change_cert
- name: 设置变量 CLUSTER_KUBERNETES_SVC_IP
set_fact: CLUSTER_KUBERNETES_SVC_IP={{ KUBERNETES_SVC_IP.stdout }}
tags: change_cert
- name: 创建 kubernetes 证书签名请求
template: src=kubernetes-csr.json.j2 dest={{ ca_dir }}/kubernetes-csr.json
@ -47,12 +49,11 @@
connection: local
register: TMP_PASS
run_once: true
tags: restart_master
- name: 设置 basic-auth 随机密码
set_fact: BASIC_AUTH_PASS="{{ TMP_PASS.stdout }}"
tags: restart_master
set_fact: BASIC_AUTH_PASS={{ TMP_PASS.stdout }}
when: 'BASIC_AUTH_ENABLE == "yes" and BASIC_AUTH_PASS == "_pwd_"'
tags: restart_master
- name: 创建 basic-auth.csv
template: src=basic-auth.csv.j2 dest={{ ca_dir }}/basic-auth.csv

View File

@ -2,11 +2,13 @@
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"{{ MASTER_IP }}",
{% if groups['ex-lb']|length > 0 %}
"{{ hostvars[groups['ex-lb'][0]]['EX_APISERVER_VIP'] }}",
{% endif %}
"{{ inventory_hostname }}",
"{{ CLUSTER_KUBERNETES_SVC_IP }}",
{% for HOST in MASTER_CERT_HOSTS %}
"{{ HOST }}",
{% for host in MASTER_CERT_HOSTS %}
"{{ host }}",
{% endfor %}
"kubernetes",
"kubernetes.default",

View File

@ -1,3 +1,8 @@
# kube-node 节点成员不能同时是 ex-lb 节点,因为它们都需要安装 haproxy
- name: fail info1
fail: msg="an 'kube-node' node CAN NOT be a 'ex-lb' node at the same time"
when: "inventory_hostname in groups['ex-lb']"
- name: 安装 haproxy
package: name=haproxy state=present

View File

@ -1,15 +0,0 @@
# 区分多个instance的VRRP组播同网段不能重复取值在0-255之间
# 因项目已设置vrrp报文单播模式所以这个ROUTER_ID 即便同网段里面有重复也没关系
ROUTER_ID: 111
# haproxy负载均衡算法常见如下
# "roundrobin": 基于服务器权重的轮询
# "leastconn": 基于服务器最小连接数
# "source": 基于请求源IP地址
# "uri": 基于请求的URI
BALANCE_ALG: "roundrobin"
# 启用 ingress NodePort服务的负载均衡 (yes/no)
INGRESS_NODEPORT_LB: "no"
# 启用 ingress tls NodePort服务的负载均衡 (yes/no)
INGRESS_TLS_NODEPORT_LB: "no"

View File

@ -1,4 +0,0 @@
- hosts: lb
roles:
- lb

View File

@ -1,64 +0,0 @@
# 自动设置LB节点变量'LB_IF'
- name: 注册变量 LB_IF_TMP
shell: "ip a|grep '{{ inventory_hostname }}/'|awk '{print $NF}'"
register: LB_IF_TMP
tags: restart_lb
- name: 设置变量 LB_IF
set_fact: LB_IF="{{ LB_IF_TMP.stdout }}"
tags: restart_lb
- name: apt更新缓存刷新
apt: update_cache=yes cache_valid_time=72000
when: ansible_distribution == "Ubuntu" and ansible_distribution_major_version|int >= 16
- name: 安装 haproxy
package: name=haproxy state=present
- name: 创建haproxy配置目录
file: name=/etc/haproxy state=directory
- name: 修改centos的haproxy.service
template: src=haproxy.service.j2 dest=/usr/lib/systemd/system/haproxy.service
when: (ansible_distribution == "CentOS" or ansible_distribution == "RedHat") and ansible_distribution_major_version == "7"
tags: restart_lb
- name: 配置 haproxy
template: src=haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg
tags: restart_lb
- name: 安装 keepalived
package: name=keepalived state=present
- name: 创建keepalived配置目录
file: name=/etc/keepalived state=directory
- name: 配置 keepalived 主节点
template: src=keepalived-master.conf.j2 dest=/etc/keepalived/keepalived.conf
when: LB_ROLE == "master"
tags: restart_lb
- name: 配置 keepalived 备节点
template: src=keepalived-backup.conf.j2 dest=/etc/keepalived/keepalived.conf
when: LB_ROLE == "backup"
tags: restart_lb
- name: daemon-reload for haproxy.service
shell: systemctl daemon-reload
tags: restart_lb
- name: 开机启用haproxy服务
shell: systemctl enable haproxy
ignore_errors: true
- name: 重启haproxy服务
shell: systemctl restart haproxy
tags: restart_lb
- name: 开机启用keepalived服务
shell: systemctl enable keepalived
ignore_errors: true
- name: 重启keepalived服务
shell: systemctl restart keepalived
tags: restart_lb

View File

@ -1,47 +0,0 @@
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin
stats timeout 30s
user haproxy
group haproxy
daemon
nbproc 1
defaults
log global
timeout connect 5000
timeout client 10m
timeout server 10m
listen kube-master
bind 0.0.0.0:{{ KUBE_APISERVER.split(':')[2] }}
mode tcp
option tcplog
balance {{ BALANCE_ALG }}
{% for host in groups['kube-master'] %}
server {{ host }} {{ host }}:6443 check inter 2000 fall 2 rise 2 weight 1
{% endfor %}
{% if INGRESS_NODEPORT_LB == "yes" %}
listen ingress-node
bind 0.0.0.0:80
mode tcp
option tcplog
balance {{ BALANCE_ALG }}
{% for host in groups['kube-node'] %}
server {{ host }} {{ host }}:23456 check inter 2000 fall 2 rise 2 weight 1
{% endfor %}
{% endif %}
{% if INGRESS_TLS_NODEPORT_LB == "yes" %}
listen ingress-node-tls
bind 0.0.0.0:443
mode tcp
option tcplog
balance {{ BALANCE_ALG }}
{% for host in groups['kube-node'] %}
server {{ host }} {{ host }}:23457 check inter 2000 fall 2 rise 2 weight 1
{% endfor %}
{% endif %}

View File

@ -1,13 +0,0 @@
[Unit]
Description=HAProxy Load Balancer
After=syslog.target network.target
[Service]
EnvironmentFile=/etc/sysconfig/haproxy
ExecStartPre=/usr/bin/mkdir -p /run/haproxy
ExecStart=/usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid $OPTIONS
ExecReload=/bin/kill -USR2 $MAINPID
KillMode=mixed
[Install]
WantedBy=multi-user.target

View File

@ -1,30 +0,0 @@
global_defs {
router_id lb-backup-{{ inventory_hostname }}
}
vrrp_script check-haproxy {
script "killall -0 haproxy"
interval 5
weight -60
}
vrrp_instance VI-kube-master {
state BACKUP
priority {{ 119 | random(61, 1) }}
unicast_src_ip {{ inventory_hostname }}
unicast_peer {
{% for h in groups['lb'] %}{% if h != inventory_hostname %}
{{ h }}
{% endif %}{% endfor %}
}
dont_track_primary
interface {{ LB_IF }}
virtual_router_id {{ ROUTER_ID }}
advert_int 3
track_script {
check-haproxy
}
virtual_ipaddress {
{{ MASTER_IP }}
}
}

View File

@ -1,30 +0,0 @@
global_defs {
router_id lb-master-{{ inventory_hostname }}
}
vrrp_script check-haproxy {
script "killall -0 haproxy"
interval 5
weight -60
}
vrrp_instance VI-kube-master {
state MASTER
priority 120
unicast_src_ip {{ inventory_hostname }}
unicast_peer {
{% for h in groups['lb'] %}{% if h != inventory_hostname %}
{{ h }}
{% endif %}{% endfor %}
}
dont_track_primary
interface {{ LB_IF }}
virtual_router_id {{ ROUTER_ID }}
advert_int 3
track_script {
check-haproxy
}
virtual_ipaddress {
{{ MASTER_IP }}
}
}

View File

@ -210,14 +210,16 @@
- name: stop and disable chrony in Ubuntu
service: name=chrony state=stopped enabled=no
ignore_errors: true
tags: rm_ntp
when: ansible_distribution == "Ubuntu" or ansible_distribution == "Debian"
when:
- 'ansible_distribution in ["Ubuntu","Debian"]'
- "groups['chrony']|length > 0"
- name: stop and disable chronyd in CentOS/RedHat
service: name=chronyd state=stopped enabled=no
ignore_errors: true
tags: rm_ntp
when: ansible_distribution == "CentOS" or ansible_distribution == "RedHat"
when:
- 'ansible_distribution in ["CentOS","RedHat","Amazon"]'
- "groups['chrony']|length > 0"
- name: 清理证书目录和文件
file: name={{ item }} state=absent