update kube-route v0.3.1

pull/626/head
gjmzj 2019-05-27 00:04:00 +08:00
parent f8e533e203
commit 5859315c3e
8 changed files with 45 additions and 289 deletions

View File

@ -54,31 +54,27 @@
- kube-node
tasks:
- block:
- name: clean 'kube-router' stuff
shell: "{{ bin_dir }}/docker run --privileged --net=host cloudnativelabs/kube-router --cleanup-config"
ignore_errors: true
when: "CLUSTER_NETWORK == 'kube-router'"
- name: 获取是否运行名为'kubeasz'的容器
- name: to check if container 'kubeasz' is running
shell: 'docker ps|grep kubeasz || echo "NOT FOUND"'
register: install_info
- name: fail info
fail: msg="you CAN NOT delete dockerd, because container 'kubeasz' is running!"
when: "'kubeasz' in install_info.stdout"
- name: stop and disable docker service
service:
name: docker
state: stopped
enabled: no
ignore_errors: true
when: "'kubeasz' not in install_info.stdout"
# as k8s-network-plugins use host-network, '/var/run/docker/netns/default' must be umounted
- name: unmount docker filesystem-1
mount: path=/var/run/docker/netns/default state=unmounted
when: "'kubeasz' not in install_info.stdout"
- name: unmount docker filesystem-2
mount: path=/var/lib/docker/overlay state=unmounted
when: "'kubeasz' not in install_info.stdout"
- name: remove files and dirs
file: name={{ item }} state=absent
@ -90,7 +86,6 @@
- "/etc/systemd/system/docker.service.d/"
- "/etc/bash_completion.d/docker"
- "/usr/bin/docker"
when: "'kubeasz' not in install_info.stdout"
when: CONTAINER_RUNTIME == 'docker'
- block:

View File

@ -5,8 +5,8 @@
- **集群特性** `TLS`双向认证、`RBAC`授权、多`Master`高可用、支持`Network Policy`、备份恢复
- **集群版本** kubernetes v1.8, v1.9, v1.10, v1.11, v1.12, v1.13, v1.14
- **操作系统** Ubuntu 16.04+, CentOS/RedHat 7
- **运行时** docker 17.03.x-ce, 18.06.x-ce, 18.09.x, containerd 1.2.6
- **网络** calico, cilium, flannel, kube-router
- **运行时** docker 17.03.x-ce, 18.06.x-ce, 18.09.x, [containerd](docs/guide/containerd.md) 1.2.6
- **网络** [calico](docs/setup/network-plugin/calico.md), [cilium](docs/setup/network-plugin/cilium.md), [flannel](docs/setup/network-plugin/flannel.md), [kube-ovn](docs/setup/network-plugin/kube-ovn.md), [kube-router](docs/setup/network-plugin/kube-router.md)
## 快速指南

View File

@ -3,7 +3,7 @@
kube-router是一个简单、高效的网络插件它提供一揽子解决方案
- 基于GoBGP 提供Pod 网络互联Routing
- 使用ipsets优化的iptables 提供网络策略支持Firewall/NetworkPolicy
- 基于IPVS/LVS 提供高性能服务代理Service Proxy
- 基于IPVS/LVS 提供高性能服务代理Service Proxy(注:由于 k8s 新版本中 ipvs 已可用因此这里不选择启用kube-router基于ipvs的service proxy)
更多介绍请前往`https://github.com/cloudnativelabs/kube-router`
@ -56,7 +56,7 @@ tcp 0 0 192.168.1.3:179 192.168.1.2:43928 ESTABLISHED 18
```
- 4.NetworkPolicy有效性验证参照[这里](guide/networkpolicy.md)
- 4.NetworkPolicy有效性验证参照[这里](../../guide/networkpolicy.md)
- 5.ipset列表查看
@ -86,22 +86,3 @@ Members:
192.168.1.3 timeout 0
...
```
- 6.ipvs虚拟服务器查看 (roles/kube-router/defaults/main.yml 需配置`SERVICE_PROXY: "true"`)
``` bash
# 首先创建测试应用
$ kubectl run nginx --image=nginx --replicas=3 --port=80 --expose
# 查看ipvsadm输出
$ ipvsadm
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.68.0.1:https rr persistent 10800 # 这个kubernetes虚拟服务地址
-> 192.168.1.1:6443 Masq 1 0 0
TCP 10.68.199.39:http rr # 这个是测试应用nginx的虚拟服务地址
-> 172.20.1.5:http Masq 1 0 0
-> 172.20.2.6:http Masq 1 0 0
-> 172.20.2.8:http Masq 1 0 0
```

View File

@ -1,22 +1,17 @@
# 更多设置参考https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md
# 如果 node 节点有多块网卡,请设置 true
# 另外发现设置为 true 时能够解决v1.10使用ipvs偶尔出现pod内dial tcp 10.68.0.1:443: i/o timeout的 bug
NODE_WITH_MULTIPLE_NETWORKS: "true"
# 因目前 kube-proxy 已提供 ipvs 模式,这里不使用 kube-router 的 service_proxy
#SERVICE_PROXY: "false"
# Router 支持开关
ROUTER_ENABLE: "true"
# 公有云上存在限制,一般需要始终开启 ipinip自有环境可以设置为 "subnet"
OVERLAY_TYPE: "full"
# NetworkPolicy 支持开关
FIREWALL_ENABLE: "true"
# service-proxy 支持开关,如选择 'false' 即使用k8s集群默认的kube-proxy
SERVICE_PROXY: "false"
# kube-router 镜像版本
kube_router_ver: "v0.2.0"
kube_router_ver: "v0.3.1"
busybox_ver: "1.28.4"
PullPolicy: "IfNotPresent"
# kube-router 离线镜像tar包
kuberouter_offline: "kube-router_{{ kube_router_ver }}.tar"

View File

@ -4,11 +4,6 @@
- name: 准备配置 kube-router DaemonSet (without IPVS)
template: src=kuberouter.yaml.j2 dest=/opt/kube/kube-system/kube-router/kuberouter.yaml
when: 'SERVICE_PROXY != "true"'
- name: 准备配置 kube-router DaemonSet (with IPVS)
template: src=kuberouter-all.yaml.j2 dest=/opt/kube/kube-system/kube-router/kuberouter.yaml
when: 'SERVICE_PROXY == "true"'
delegate_to: "{{ groups.deploy[0] }}"
run_once: true
@ -26,17 +21,6 @@
- loopback
- portmap
# kube-router 带service proxy (IPVS/LVS)
- name: 停止 kube-proxy 服务
service: name=kube-proxy state=stopped enabled=no
when: 'SERVICE_PROXY == "true"'
ignore_errors: true
- name: 清理 kube-proxy产生的iptables或ipvs规则
shell: "{{ bin_dir }}/kube-proxy --cleanup; ipvsadm -C"
when: 'SERVICE_PROXY == "true"'
ignore_errors: true
# 【可选】推送离线docker 镜像,可以忽略执行错误
- block:
- name: 检查是否已下载离线kube-router镜像

View File

@ -1,202 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-router-cfg
namespace: kube-system
labels:
tier: node
k8s-app: kube-router
data:
cni-conf.json: |
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"ipam": {
"type":"host-local"
}
}
kubeconfig: |
apiVersion: v1
kind: Config
clusterCIDR: "{{ CLUSTER_CIDR }}"
clusters:
- name: cluster
cluster:
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
server: {{ KUBE_APISERVER }}
users:
- name: kube-router
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
contexts:
- context:
cluster: cluster
user: kube-router
name: kube-router-context
current-context: kube-router-context
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels:
k8s-app: kube-router
tier: node
name: kube-router
namespace: kube-system
spec:
template:
metadata:
labels:
k8s-app: kube-router
tier: node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: kube-router
containers:
- name: kube-router
image: cloudnativelabs/kube-router:{{ kube_router_ver }}
imagePullPolicy: {{ PullPolicy }}
args:
- "--run-router=true"
- "--run-firewall={{ FIREWALL_ENABLE }}"
- "--run-service-proxy=true"
- "--kubeconfig=/var/lib/kube-router/kubeconfig"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %}
# if hosts have multiple net interfaces, set following two ENVs
- name: KUBERNETES_SERVICE_HOST
value: "{{ MASTER_IP }}"
#value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ KUBE_APISERVER.split(':')[2] }}"
{% endif %}
livenessProbe:
httpGet:
path: /healthz
port: 20244
initialDelaySeconds: 10
periodSeconds: 3
resources:
requests:
cpu: 250m
memory: 250Mi
securityContext:
privileged: true
volumeMounts:
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: cni-conf-dir
mountPath: /etc/cni/net.d
- name: kubeconfig
mountPath: /var/lib/kube-router
readOnly: true
initContainers:
- name: install-cni
image: busybox:{{ busybox_ver }}
imagePullPolicy: {{ PullPolicy }}
command:
- /bin/sh
- -c
- set -e -x;
if [ ! -f /etc/cni/net.d/10-kuberouter.conf ]; then
TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
cp /etc/kube-router/cni-conf.json ${TMP};
mv ${TMP} /etc/cni/net.d/10-kuberouter.conf;
fi;
if [ ! -f /var/lib/kube-router/kubeconfig ]; then
TMP=/var/lib/kube-router/.tmp-kubeconfig;
cp /etc/kube-router/kubeconfig ${TMP};
mv ${TMP} /var/lib/kube-router/kubeconfig;
fi
volumeMounts:
- mountPath: /etc/cni/net.d
name: cni-conf-dir
- mountPath: /etc/kube-router
name: kube-router-cfg
- name: kubeconfig
mountPath: /var/lib/kube-router
hostNetwork: true
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
volumes:
- name: lib-modules
hostPath:
path: /lib/modules
- name: cni-conf-dir
hostPath:
path: /etc/cni/net.d
- name: kube-router-cfg
configMap:
name: kube-router-cfg
- name: kubeconfig
hostPath:
path: /var/lib/kube-router
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-router
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kube-router
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- namespaces
- pods
- services
- nodes
- endpoints
verbs:
- list
- get
- watch
- apiGroups:
- "networking.k8s.io"
resources:
- networkpolicies
verbs:
- list
- get
- watch
- apiGroups:
- extensions
resources:
- networkpolicies
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kube-router
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-router
subjects:
- kind: ServiceAccount
name: kube-router
namespace: kube-system

View File

@ -9,13 +9,19 @@ metadata:
data:
cni-conf.json: |
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"ipam": {
"type":"host-local"
}
"cniVersion":"0.3.0",
"name":"mynet",
"plugins":[
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"ipam":{
"type":"host-local"
}
}
]
}
---
@ -40,8 +46,10 @@ spec:
containers:
- name: kube-router
image: cloudnativelabs/kube-router:{{ kube_router_ver }}
imagePullPolicy: {{ PullPolicy }}
imagePullPolicy: IfNotPresent
args:
- "--hostname-override=RT-{{ inventory_hostname }}"
- "--overlay-type={{ OVERLAY_TYPE }}"
- "--run-router=true"
- "--run-firewall={{ FIREWALL_ENABLE }}"
- "--run-service-proxy=false"
@ -50,14 +58,8 @@ spec:
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %}
# if hosts have multiple net interfaces, set following two ENVs
- name: KUBERNETES_SERVICE_HOST
value: "{{ MASTER_IP }}"
#value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ KUBE_APISERVER.split(':')[2] }}"
{% endif %}
- name: KUBE_ROUTER_CNI_CONF_FILE
value: /etc/cni/net.d/10-kuberouter.conflist
livenessProbe:
httpGet:
path: /healthz
@ -79,15 +81,18 @@ spec:
initContainers:
- name: install-cni
image: busybox:{{ busybox_ver }}
imagePullPolicy: {{ PullPolicy }}
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- set -e -x;
if [ ! -f /etc/cni/net.d/10-kuberouter.conf ]; then
if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
if [ -f /etc/cni/net.d/*.conf ]; then
rm -f /etc/cni/net.d/*.conf;
fi;
TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
cp /etc/kube-router/cni-conf.json ${TMP};
mv ${TMP} /etc/cni/net.d/10-kuberouter.conf;
mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
fi
volumeMounts:
- mountPath: /etc/cni/net.d
@ -101,6 +106,9 @@ spec:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/not-ready
operator: Exists
volumes:
- name: lib-modules
hostPath:

View File

@ -66,30 +66,26 @@
# 清理集群docker服务、网络相关
- block:
- name: 清理kube-router相关
shell: "{{ bin_dir }}/docker run --privileged --net=host cloudnativelabs/kube-router --cleanup-config"
ignore_errors: true
when: "CLUSTER_NETWORK == 'kube-router'"
- name: 获取是否运行名为'kubeasz'的容器
shell: 'docker ps|grep kubeasz || echo "NOT FOUND"'
register: install_info
- name: fail info3
fail: msg="you CAN NOT delete dockerd, because container 'kubeasz' is running!"
when: "'kubeasz' in install_info.stdout"
- name: stop and disable docker service
service:
name: docker
state: stopped
enabled: no
ignore_errors: true
when: "'kubeasz' not in install_info.stdout"
- name: unmount docker filesystem-1
mount: path=/var/run/docker/netns/default state=unmounted
when: "'kubeasz' not in install_info.stdout"
- name: unmount docker filesystem-2
mount: path=/var/lib/docker/overlay state=unmounted
when: "'kubeasz' not in install_info.stdout"
- name: remove files and dirs
file: name={{ item }} state=absent
@ -101,7 +97,6 @@
- "/etc/systemd/system/docker.service.d/"
- "/etc/bash_completion.d/docker"
- "/usr/bin/docker"
when: "'kubeasz' not in install_info.stdout"
when: CONTAINER_RUNTIME == 'docker'
- block: