增加网络插件kube-ovn支持

pull/626/head
gjmzj 2019-05-22 11:17:42 +08:00
parent b4e373b01e
commit 828de9d3b5
7 changed files with 543 additions and 1 deletions

View File

@ -7,3 +7,4 @@
- { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" }
- { role: flannel, when: "CLUSTER_NETWORK == 'flannel'" }
- { role: kube-router, when: "CLUSTER_NETWORK == 'kube-router'" }
- { role: kube-ovn, when: "CLUSTER_NETWORK == 'kube-ovn'" }

View File

@ -68,6 +68,7 @@
- { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" }
- { role: flannel, when: "CLUSTER_NETWORK == 'flannel'" }
- { role: kube-router, when: "CLUSTER_NETWORK == 'kube-router'" }
- { role: kube-ovn, when: "CLUSTER_NETWORK == 'kube-ovn'" }
# to install cluster-addons
- hosts:

View File

@ -133,6 +133,10 @@
- "/var/lib/cni/"
- "/var/lib/kube-router/"
- "/opt/kube/kube-system/"
- "/var/run/openvswitch/"
- "/etc/origin/openvswitch/"
- "/etc/openvswitch/"
- "/var/log/openvswitch/"
- name: cleanup iptables
shell: "iptables -F && iptables -X \
@ -149,7 +153,9 @@
ip link del dummy0; \
ip link del kube-ipvs0; \
ip link del cilium_net; \
ip link del cilium_vxlan"
ip link del cilium_vxlan; \
ip link del ovn0; \
ip link del ovs-system"
ignore_errors: true
- name: cleanup networks2

View File

@ -0,0 +1,5 @@
# 选择 OVN DB and OVN Control Plane 节点默认为第一个master节点
OVN_DB_NODE: "{{ groups['kube-master'][0] }}"
# 离线镜像tar包
kube_ovn_offline: "kube_ovn_0.4.0.tar"

View File

@ -0,0 +1,78 @@
- block:
- name: 在deploy 节点创建相关目录
file: name=/opt/kube/kube-ovn state=directory
- name: 配置 kube-ovn.yaml 文件
template: src=kube-ovn.yaml.j2 dest=/opt/kube/kube-ovn/kube-ovn.yaml
- name: 配置 ovn.yaml 文件
template: src=ovn.yaml.j2 dest=/opt/kube/kube-ovn/ovn.yaml
delegate_to: "{{ groups.deploy[0] }}"
run_once: true
- name: 创建相关目录
file: name={{ item }} state=directory
with_items:
- /etc/cni/net.d
- /opt/kube/images
# 【可选】推送离线镜像,可以忽略执行错误
- block:
- name: 检查是否已下载离线kube_ovn镜像
command: "ls {{ base_dir }}/down"
register: download_info
connection: local
run_once: true
- name: 尝试推送离线docker 镜像(若执行失败,可忽略)
copy: src={{ base_dir }}/down/{{ item }} dest=/opt/kube/images/{{ item }}
when: 'item in download_info.stdout'
with_items:
- "pause_3.1.tar"
- "{{ kube_ovn_offline }}"
ignore_errors: true
- name: 获取kube_ovn离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
# 如果目录下有离线镜像就把它导入到node节点上
- name: 导入 kube_ovn的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ item }}"
with_items:
- "pause_3.1.tar"
- "{{ kube_ovn_offline }}"
ignore_errors: true
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'docker'"
- name: 导入 kube_ovn的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ item }}"
with_items:
- "pause_3.1.tar"
- "{{ kube_ovn_offline }}"
ignore_errors: true
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'containerd'"
# 只需单节点执行一次
- name: 运行 kube-ovn网络
shell: "{{ bin_dir }}/kubectl label node {{ OVN_DB_NODE }} kube-ovn/role=master --overwrite && \
{{ bin_dir }}/kubectl apply -f /opt/kube/kube-ovn/ovn.yaml && sleep 5 && \
{{ bin_dir }}/kubectl apply -f /opt/kube/kube-ovn/kube-ovn.yaml"
delegate_to: "{{ groups.deploy[0] }}"
run_once: true
# 删除原有cni配置
- name: 删除默认cni配置
file: path=/etc/cni/net.d/10-default.conf state=absent
# 等待网络插件部署成功,视下载镜像速度而定
- name: 轮询等待kube-ovn 运行,视下载镜像速度而定
shell: "{{ bin_dir }}/kubectl get pod -n kube-ovn -o wide|grep 'kube-ovn-cni'|grep ' {{ inventory_hostname }} '|awk '{print $3}'"
register: pod_status
until: pod_status.stdout == "Running"
delegate_to: "{{ groups.deploy[0] }}"
retries: 15
delay: 8
ignore_errors: true

View File

@ -0,0 +1,128 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: kube-ovn-controller
namespace: kube-ovn
annotations:
kubernetes.io/description: |
kube-ovn controller
spec:
replicas: 2
selector:
matchLabels:
app: kube-ovn-controller
strategy:
rollingUpdate:
maxSurge: 0%
maxUnavailable: 100%
type: RollingUpdate
template:
metadata:
labels:
app: kube-ovn-controller
component: network
type: infra
spec:
tolerations:
- operator: Exists
effect: NoSchedule
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: kube-ovn-controller
topologyKey: kubernetes.io/hostname
serviceAccountName: ovn
hostNetwork: true
containers:
- name: kube-ovn-controller
image: "index.alauda.cn/alaudak8s/kube-ovn-controller:v0.4.0"
imagePullPolicy: IfNotPresent
command:
- /kube-ovn/start-controller.sh
args:
- --default-cidr=10.16.0.0/16
- --node-switch-cidr=100.64.0.0/16
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: KUBE_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
nodeSelector:
beta.kubernetes.io/os: "linux"
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: kube-ovn-cni
namespace: kube-ovn
annotations:
kubernetes.io/description: |
This daemon set launches the kube-ovn cni daemon.
spec:
selector:
matchLabels:
app: kube-ovn-cni
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: kube-ovn-cni
component: network
type: infra
spec:
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: ovn
hostNetwork: true
hostPID: true
initContainers:
- name: install-cni
image: "index.alauda.cn/alaudak8s/kube-ovn-cni:v0.4.0"
imagePullPolicy: IfNotPresent
command: ["/kube-ovn/install-cni.sh"]
volumeMounts:
- mountPath: /etc/cni/net.d
name: cni-conf
- mountPath: /opt/cni/bin
name: cni-bin
containers:
- name: cni-server
image: "index.alauda.cn/alaudak8s/kube-ovn-cni:v0.4.0"
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 0
privileged: true
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /run/openvswitch
name: host-run-ovs
nodeSelector:
beta.kubernetes.io/os: "linux"
volumes:
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: cni-conf
hostPath:
path: /etc/cni/net.d
- name: cni-bin
hostPath:
path: {{ bin_dir }}

View File

@ -0,0 +1,323 @@
apiVersion: v1
kind: Namespace
metadata:
name: kube-ovn
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ovn-config
namespace: kube-ovn
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ovn
namespace: kube-ovn
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.k8s.io/system-only: "true"
name: system:ovn-reader
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- networkpolicies
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ovn-cluster-reader
roleRef:
name: cluster-reader
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: ovn
namespace: kube-ovn
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ovn-reader
roleRef:
name: system:ovn-reader
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: ovn
namespace: kube-ovn
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-admin-0
roleRef:
name: cluster-admin
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: ovn
namespace: kube-ovn
---
kind: Service
apiVersion: v1
metadata:
name: ovn-nb
namespace: kube-ovn
spec:
ports:
- name: ovn-nb
protocol: TCP
port: 6641
targetPort: 6641
type: ClusterIP
selector:
app: ovn-central
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: ovn-sb
namespace: kube-ovn
spec:
ports:
- name: ovn-sb
protocol: TCP
port: 6642
targetPort: 6642
type: ClusterIP
selector:
app: ovn-central
sessionAffinity: None
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: ovn-central
namespace: kube-ovn
annotations:
kubernetes.io/description: |
OVN components: northd, nb and sb.
spec:
replicas: 1
strategy:
rollingUpdate:
maxSurge: 0%
maxUnavailable: 100%
type: RollingUpdate
selector:
matchLabels:
app: ovn-central
template:
metadata:
labels:
app: ovn-central
component: network
type: infra
spec:
tolerations:
- operator: Exists
effect: NoSchedule
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: ovn-central
topologyKey: kubernetes.io/hostname
serviceAccountName: ovn
hostNetwork: true
containers:
- name: ovn-central
image: "index.alauda.cn/alaudak8s/kube-ovn-db:v0.4.0"
imagePullPolicy: IfNotPresent
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
resources:
requests:
cpu: 200m
memory: 300Mi
limits:
cpu: 400m
memory: 800Mi
volumeMounts:
- mountPath: /run/openvswitch
name: host-run-ovs
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /sys
name: host-sys
readOnly: true
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /var/log/openvswitch
name: host-log
readinessProbe:
exec:
command:
- sh
- /root/ovn-is-leader.sh
periodSeconds: 3
livenessProbe:
exec:
command:
- sh
- /root/ovn-healthcheck.sh
initialDelaySeconds: 30
periodSeconds: 7
failureThreshold: 5
nodeSelector:
beta.kubernetes.io/os: "linux"
kube-ovn/role: "master"
volumes:
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-sys
hostPath:
path: /sys
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
- name: host-log
hostPath:
path: /var/log/openvswitch
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: ovs-ovn
namespace: kube-ovn
annotations:
kubernetes.io/description: |
This daemon set launches the openvswitch daemon.
spec:
selector:
matchLabels:
app: ovs
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: ovs
component: network
type: infra
spec:
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: ovn
hostNetwork: true
hostPID: true
containers:
- name: openvswitch
image: "index.alauda.cn/alaudak8s/kube-ovn-node:v0.4.0"
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 0
privileged: true
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- mountPath: /lib/modules
name: host-modules
readOnly: true
- mountPath: /run/openvswitch
name: host-run-ovs
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /sys
name: host-sys
readOnly: true
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /var/log/openvswitch
name: host-log
readinessProbe:
exec:
command:
- sh
- /root/ovs-healthcheck.sh
periodSeconds: 5
livenessProbe:
exec:
command:
- sh
- /root/ovs-healthcheck.sh
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 5
resources:
requests:
cpu: 100m
memory: 300Mi
limits:
cpu: 200m
memory: 400Mi
nodeSelector:
beta.kubernetes.io/os: "linux"
volumes:
- name: host-modules
hostPath:
path: /lib/modules
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-sys
hostPath:
path: /sys
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
- name: host-log
hostPath:
path: /var/log/openvswitch