update kube-ovn v1.11.5

pull/1282/head
gjmzj 2023-05-14 20:53:18 +08:00
parent ef3d68edc4
commit 908c5a57bd
9 changed files with 3690 additions and 1337 deletions

23
ezdown
View File

@ -33,14 +33,11 @@ pauseVer=3.9
# images not downloaded by default(only download with '-X')
ciliumVer=1.13.2
flannelVer=v0.21.4
kubeRouterVer=v0.3.1
kubeOvnVer=v1.11.5
nfsProvisionerVer=v4.0.2
promChartVer=45.23.0
# images not downloaded
kubeRouterVer=v0.3.1
kubeOvnVer=v1.5.3
function usage() {
echo -e "\033[33mUsage:\033[0m ezdown [options] [args]"
cat <<EOF
@ -316,7 +313,7 @@ function get_ext_bin() {
}
function get_sys_pkg() {
[[ -f "$BASE/down/packages/chrony_xenial.tar.gz" ]] && { logger warn "system packages existed"; return 0; }
[[ -f "$BASE/down/packages/$1.tgz" ]] && { logger warn "system packages for $1 existed"; return 0; }
docker ps -a |grep -q temp_sys_pkg && { logger debug "remove existing container"; docker rm -f temp_sys_pkg; }
logger info "downloading system packages kubeasz-sys-pkg:$SYS_PKG_VER"
@ -470,6 +467,18 @@ function get_extra_images() {
docker push "easzlab.io.local:5000/flannel/flannel-cni-plugin:v1.1.2"
;;
# kube-ovn images
kube-ovn)
if [[ ! -f "$imageDir/kube-ovn_$kubeOvnVer.tar" ]];then
docker pull "kubeovn/kube-ovn:$kubeOvnVer" && \
docker save -o "$imageDir/kube-ovn_$kubeOvnVer.tar" "kubeovn/kube-ovn:$kubeOvnVer"
else
docker load -i "$imageDir/kube-ovn_$kubeOvnVer.tar"
fi
docker tag "kubeovn/kube-ovn:$kubeOvnVer" "easzlab.io.local:5000/kubeovn/kube-ovn:$kubeOvnVer"
docker push "easzlab.io.local:5000/kubeovn/kube-ovn:$kubeOvnVer"
;;
# network-check images
network-check)
if [[ ! -f "$imageDir/network-check.tar" ]];then
@ -655,7 +664,7 @@ function main() {
[[ $OPTARG =~ (ubuntu_[0-9]+|centos_[0-9]+|debian_[0-9]+|fedora_[0-9]+|almalinux_[0-9]+|opensuse_leap_[0-9]+|rocky_[0-9]+) ]] || \
{ usage-down-sys-pkg; exit 1; }
SYS_PKG_VER="${SYS_PKG_VER}_$OPTARG"
ACTION="get_sys_pkg"
ACTION="get_sys_pkg $OPTARG"
;;
R)
ACTION="get_harbor_offline_pkg"

View File

@ -110,15 +110,13 @@
- "/sys/fs/bpf/tc/"
- "/var/lib/cni/"
- "/var/lib/kube-router/"
- "/opt/kube/kube-ovn/"
- "/var/run/openvswitch/"
- "/etc/origin/openvswitch/"
- "/etc/openvswitch/"
- "/var/log/openvswitch/"
- "/var/run/ovn/"
- "/etc/origin/openvswitch/"
- "/etc/origin/ovn/"
- "/etc/ovn/"
- "/var/log/openvswitch/"
- "/var/log/ovn/"
- "/var/log/kube-ovn/"
ignore_errors: true
when: "inventory_hostname in groups['kube_master'] or inventory_hostname in groups['kube_node']"

View File

@ -1,42 +1,19 @@
- block:
- name: 注册变量 ovn_default_gateway
shell: echo {{ CLUSTER_CIDR }}|cut -d/ -f1|awk -F. '{print $1"."$2"."$3"."$4+1}'
register: ovn_default_gateway
- name: 设置变量 kube_ovn_default_gateway
set_fact: kube_ovn_default_gateway={{ ovn_default_gateway.stdout }}
- name: 创建配置文件
template: src={{ item }}.j2 dest={{ cluster_dir }}/yml/{{ item }}
with_items:
- crd.yaml
- kube-ovn.yaml
- ovn.yaml
- name: 删除 kube-ovn网络
shell: "{{ base_dir }}/bin/kubectl delete -f {{ cluster_dir }}/yml/crd.yaml; \
{{ base_dir }}/bin/kubectl delete -f {{ cluster_dir }}/yml/ovn.yaml; \
{{ base_dir }}/bin/kubectl delete -f {{ cluster_dir }}/yml/kube-ovn.yaml; sleep 3"
tags: force_change_certs
when: 'CHANGE_CA|bool'
- name: 运行 kube-ovn网络
shell: "{{ base_dir }}/bin/kubectl label node {{ OVN_DB_NODE }} kube-ovn/role=master --overwrite && \
{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/crd.yaml && sleep 5 && \
{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/ovn.yaml && sleep 5 && \
{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/kube-ovn.yaml"
tags: force_change_certs
run_once: true
connection: local
# 删除原有cni配置
- name: 删除默认cni配置
file: path=/etc/cni/net.d/10-default.conf state=absent
- name: 配置 kubectl plugin
template: src=kubectl-ko.j2 dest=/usr/local/bin/kubectl-ko mode=0755
- block:
- name: 准备安装文件install
template: src=install.sh.j2 dest={{ cluster_dir }}/yml/install.sh
# 等待网络插件部署成功,视下载镜像速度而定
- name: 安装kube-ovn网络
shell: 'export PATH="{{ base_dir }}/bin/:$PATH"; cd {{ cluster_dir }}/yml/ && \
bash install.sh >> /tmp/install-kube-ovn-`date +"%Y%m%d%H%M%S"`.log 2>&1'
run_once: true
ignore_errors: true
connection: local
# 等待网络插件部署成功
- name: 轮询等待kube-ovn 运行,视下载镜像速度而定
shell: "{{ base_dir }}/bin/kubectl get pod -n kube-ovn -o wide|grep 'kube-ovn-cni'|grep ' {{ K8S_NODENAME }} '|awk '{print $3}'"
register: pod_status

View File

@ -1,224 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: ips.kubeovn.io
spec:
group: kubeovn.io
versions:
- name: v1
served: true
storage: true
additionalPrinterColumns:
- name: IP
type: string
jsonPath: .spec.ipAddress
- name: Mac
type: string
jsonPath: .spec.macAddress
- name: Node
type: string
jsonPath: .spec.nodeName
- name: Subnet
type: string
jsonPath: .spec.subnet
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
podName:
type: string
namespace:
type: string
subnet:
type: string
attachSubnets:
type: array
items:
type: string
nodeName:
type: string
ipAddress:
type: string
attachIps:
type: array
items:
type: string
macAddress:
type: string
attachMacs:
type: array
items:
type: string
containerID:
type: string
scope: Cluster
names:
plural: ips
singular: ip
kind: IP
shortNames:
- ip
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: subnets.kubeovn.io
spec:
group: kubeovn.io
versions:
- name: v1
served: true
storage: true
subresources:
status: {}
additionalPrinterColumns:
- name: Provider
type: string
jsonPath: .spec.provider
- name: Protocol
type: string
jsonPath: .spec.protocol
- name: CIDR
type: string
jsonPath: .spec.cidrBlock
- name: Private
type: boolean
jsonPath: .spec.private
- name: NAT
type: boolean
jsonPath: .spec.natOutgoing
- name: Default
type: boolean
jsonPath: .spec.default
- name: GatewayType
type: string
jsonPath: .spec.gatewayType
- name: Used
type: number
jsonPath: .status.usingIPs
- name: Available
type: number
jsonPath: .status.availableIPs
schema:
openAPIV3Schema:
type: object
properties:
status:
type: object
properties:
availableIPs:
type: number
usingIPs:
type: number
activateGateway:
type: string
conditions:
type: array
items:
type: object
properties:
type:
type: string
status:
type: string
reason:
type: string
message:
type: string
lastUpdateTime:
type: string
lastTransitionTime:
type: string
spec:
type: object
properties:
default:
type: boolean
protocol:
type: string
cidrBlock:
type: string
namespaces:
type: array
items:
type: string
gateway:
type: string
provider:
type: string
excludeIps:
type: array
items:
type: string
gatewayType:
type: string
allowSubnets:
type: array
items:
type: string
gatewayNode:
type: string
natOutgoing:
type: boolean
private:
type: boolean
vlan:
type: string
underlayGateway:
type: boolean
disableInterConnection:
type: boolean
scope: Cluster
names:
plural: subnets
singular: subnet
kind: Subnet
shortNames:
- subnet
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: vlans.kubeovn.io
spec:
group: kubeovn.io
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
vlanId:
type: integer
providerInterfaceName:
type: string
logicalInterfaceName:
type: string
subnet:
type: string
additionalPrinterColumns:
- name: VlanID
type: string
jsonPath: .spec.vlanId
- name: ProviderInterfaceName
type: string
jsonPath: .spec.providerInterfaceName
- name: Subnet
type: string
jsonPath: .spec.subnet
scope: Cluster
names:
plural: vlans
singular: vlan
kind: Vlan
shortNames:
- vlan

File diff suppressed because it is too large Load Diff

View File

@ -1,372 +0,0 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: kube-ovn-controller
namespace: kube-system
annotations:
kubernetes.io/description: |
kube-ovn controller
spec:
replicas: 1
selector:
matchLabels:
app: kube-ovn-controller
strategy:
rollingUpdate:
maxSurge: 0%
maxUnavailable: 100%
type: RollingUpdate
template:
metadata:
labels:
app: kube-ovn-controller
component: network
type: infra
spec:
tolerations:
- operator: Exists
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: kube-ovn-controller
topologyKey: kubernetes.io/hostname
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
containers:
- name: kube-ovn-controller
image: "kubeovn/kube-ovn:v1.5.3"
imagePullPolicy: IfNotPresent
command:
- /kube-ovn/start-controller.sh
args:
- --default-cidr={{ kube_ovn_default_cidr }}
- --default-gateway={{ kube_ovn_default_gateway }}
- --node-switch-cidr={{ kube_ovn_node_switch_cidr }}
- --network-type=geneve
- --default-interface-name=
- --default-vlan-id=100
env:
- name: ENABLE_SSL
value: "false"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: KUBE_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /var/run/tls
name: kube-ovn-tls
readinessProbe:
exec:
command:
- sh
- /kube-ovn/kube-ovn-controller-healthcheck.sh
periodSeconds: 3
timeoutSeconds: 45
livenessProbe:
exec:
command:
- sh
- /kube-ovn/kube-ovn-controller-healthcheck.sh
initialDelaySeconds: 300
periodSeconds: 7
failureThreshold: 5
timeoutSeconds: 45
resources:
requests:
cpu: 200m
memory: 200Mi
limits:
cpu: 1000m
memory: 1Gi
nodeSelector:
kubernetes.io/os: "linux"
volumes:
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: kube-ovn-cni
namespace: kube-system
annotations:
kubernetes.io/description: |
This daemon set launches the kube-ovn cni daemon.
spec:
selector:
matchLabels:
app: kube-ovn-cni
template:
metadata:
labels:
app: kube-ovn-cni
component: network
type: infra
spec:
tolerations:
- operator: Exists
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
hostPID: true
initContainers:
- name: install-cni
image: "kubeovn/kube-ovn:v1.5.3"
imagePullPolicy: IfNotPresent
command: ["/kube-ovn/install-cni.sh"]
securityContext:
runAsUser: 0
privileged: true
volumeMounts:
- mountPath: /opt/cni/bin
name: cni-bin
containers:
- name: cni-server
image: "kubeovn/kube-ovn:v1.5.3"
imagePullPolicy: IfNotPresent
command:
- sh
- /kube-ovn/start-cniserver.sh
args:
- --enable-mirror={{ kube_ovn_enable_mirror|string|lower }}
- --service-cluster-ip-range={{ SERVICE_CIDR }}
- --encap-checksum=true
- --iface=
- --network-type=geneve
- --default-interface-name=
securityContext:
runAsUser: 0
privileged: true
env:
- name: ENABLE_SSL
value: "false"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /etc/cni/net.d
name: cni-conf
- mountPath: /run/openvswitch
name: host-run-ovs
- mountPath: /run/ovn
name: host-run-ovn
- mountPath: /var/run/netns
name: host-ns
mountPropagation: HostToContainer
readinessProbe:
exec:
command:
- nc
- -z
- -w3
- 127.0.0.1
- "10665"
periodSeconds: 3
livenessProbe:
exec:
command:
- nc
- -z
- -w3
- 127.0.0.1
- "10665"
initialDelaySeconds: 30
periodSeconds: 7
failureThreshold: 5
resources:
requests:
cpu: 200m
memory: 200Mi
limits:
cpu: 1000m
memory: 1Gi
nodeSelector:
kubernetes.io/os: "linux"
volumes:
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: cni-conf
hostPath:
path: /etc/cni/net.d
- name: cni-bin
hostPath:
path: /opt/cni/bin
- name: host-ns
hostPath:
path: /var/run/netns
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: kube-ovn-pinger
namespace: kube-system
annotations:
kubernetes.io/description: |
This daemon set launches the openvswitch daemon.
spec:
selector:
matchLabels:
app: kube-ovn-pinger
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: kube-ovn-pinger
component: network
type: infra
spec:
tolerations:
- operator: Exists
serviceAccountName: ovn
hostPID: true
containers:
- name: pinger
image: "kubeovn/kube-ovn:v1.5.3"
command: ["/kube-ovn/kube-ovn-pinger", "--external-address=114.114.114.114", "--external-dns=alauda.cn"]
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 0
privileged: false
env:
- name: ENABLE_SSL
value: "false"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /lib/modules
name: host-modules
readOnly: true
- mountPath: /run/openvswitch
name: host-run-ovs
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /sys
name: host-sys
readOnly: true
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /var/run/tls
name: kube-ovn-tls
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 200m
memory: 400Mi
nodeSelector:
kubernetes.io/os: "linux"
volumes:
- name: host-modules
hostPath:
path: /lib/modules
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-sys
hostPath:
path: /sys
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
- name: host-log-ovs
hostPath:
path: /var/log/openvswitch
- name: host-log-ovn
hostPath:
path: /var/log/ovn
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
---
kind: Service
apiVersion: v1
metadata:
name: kube-ovn-pinger
namespace: kube-system
labels:
app: kube-ovn-pinger
spec:
selector:
app: kube-ovn-pinger
ports:
- port: 8080
name: metrics
---
kind: Service
apiVersion: v1
metadata:
name: kube-ovn-controller
namespace: kube-system
labels:
app: kube-ovn-controller
spec:
selector:
app: kube-ovn-controller
ports:
- port: 10660
name: metrics
---
kind: Service
apiVersion: v1
metadata:
name: kube-ovn-cni
namespace: kube-system
labels:
app: kube-ovn-cni
spec:
selector:
app: kube-ovn-cni
ports:
- port: 10665
name: metrics

View File

@ -1,288 +0,0 @@
#!/bin/bash
set -euo pipefail
KUBE_OVN_NS=kube-system
OVN_NB_POD=
OVN_SB_POD=
showHelp(){
echo "kubectl ko {subcommand} [option...]"
echo "Available Subcommands:"
echo " nbctl [ovn-nbctl options ...] invoke ovn-nbctl"
echo " sbctl [ovn-sbctl options ...] invoke ovn-sbctl"
echo " vsctl {nodeName} [ovs-vsctl options ...] invoke ovs-vsctl on selected node"
echo " tcpdump {namespace/podname} [tcpdump options ...] capture pod traffic"
echo " trace {namespace/podname} {target ip address} {icmp|tcp|udp} [target tcp or udp port] trace ovn microflow of specific packet"
echo " diagnose {all|node} [nodename] diagnose connectivity of all nodes or a specific node"
}
tcpdump(){
namespacedPod="$1"; shift
namespace=$(echo "$namespacedPod" | cut -d "/" -f1)
podName=$(echo "$namespacedPod" | cut -d "/" -f2)
if [ "$podName" = "$namespacedPod" ]; then
nodeName=$(kubectl get pod "$podName" -o jsonpath={.spec.nodeName})
mac=$(kubectl get pod "$podName" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/mac_address})
hostNetwork=$(kubectl get pod "$podName" -o jsonpath={.spec.hostNetwork})
else
nodeName=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.nodeName})
hostNetwork=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.hostNetwork})
fi
if [ -z "$nodeName" ]; then
echo "Pod $namespacedPod not exists on any node"
exit 1
fi
ovnCni=$(kubectl get pod -n $KUBE_OVN_NS -o wide| grep kube-ovn-cni| grep " $nodeName " | awk '{print $1}')
if [ -z "$ovnCni" ]; then
echo "kube-ovn-cni not exist on node $nodeName"
exit 1
fi
if [ "$hostNetwork" = "true" ]; then
set -x
kubectl exec -it "$ovnCni" -n $KUBE_OVN_NS -- tcpdump -nn "$@"
else
nicName=$(kubectl exec -it "$ovnCni" -n $KUBE_OVN_NS -- ovs-vsctl --data=bare --no-heading --columns=name find interface external-ids:iface-id="$podName"."$namespace" | tr -d '\r')
if [ -z "$nicName" ]; then
echo "nic doesn't exist on node $nodeName"
exit 1
fi
set -x
kubectl exec -it "$ovnCni" -n $KUBE_OVN_NS -- tcpdump -nn -i "$nicName" "$@"
fi
}
trace(){
namespacedPod="$1"
namespace=$(echo "$1" | cut -d "/" -f1)
podName=$(echo "$1" | cut -d "/" -f2)
if [ "$podName" = "$1" ]; then
echo "namespace is required"
exit 1
fi
podIP=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/ip_address})
mac=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/mac_address})
ls=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/logical_switch})
hostNetwork=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.hostNetwork})
nodeName=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.nodeName})
if [ "$hostNetwork" = "true" ]; then
echo "Can not trace host network pod"
exit 1
fi
if [ -z "$ls" ]; then
echo "pod address not ready"
exit 1
fi
gwMac=$(kubectl exec -it $OVN_NB_POD -n $KUBE_OVN_NS -- ovn-nbctl --data=bare --no-heading --columns=mac find logical_router_port name=ovn-cluster-"$ls" | tr -d '\r')
if [ -z "$gwMac" ]; then
echo "get gw mac failed"
exit 1
fi
dst="$2"
if [ -z "$dst" ]; then
echo "need a target ip address"
exit 1
fi
type="$3"
case $type in
icmp)
set -x
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -- ovn-trace --ct=new "$ls" "inport == \"$podName.$namespace\" && ip.ttl == 64 && icmp && eth.src == $mac && ip4.src == $podIP && eth.dst == $gwMac && ip4.dst == $dst"
;;
tcp|udp)
set -x
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -- ovn-trace --ct=new "$ls" "inport == \"$podName.$namespace\" && ip.ttl == 64 && eth.src == $mac && ip4.src == $podIP && eth.dst == $gwMac && ip4.dst == $dst && $type.src == 10000 && $type.dst == $4"
;;
*)
echo "type $type not supported"
echo "kubectl ko trace {namespace/podname} {target ip address} {icmp|tcp|udp} [target tcp or udp port]"
exit 1
;;
esac
set +x
echo "--------"
echo "Start OVS Tracing"
echo ""
echo ""
ovsPod=$(kubectl get pod -n $KUBE_OVN_NS -o wide | grep " $nodeName " | grep ovs-ovn | awk '{print $1}')
if [ -z "$ovsPod" ]; then
echo "ovs pod doesn't exist on node $nodeName"
exit 1
fi
inPort=$(kubectl exec "$ovsPod" -n $KUBE_OVN_NS -- ovs-vsctl --format=csv --data=bare --no-heading --columns=ofport find interface external_id:iface-id="$podName"."$namespace")
case $type in
icmp)
set -x
kubectl exec "$ovsPod" -n $KUBE_OVN_NS -- ovs-appctl ofproto/trace br-int in_port="$inPort",icmp,nw_src="$podIP",nw_dst="$dst",dl_src="$mac",dl_dst="$gwMac"
;;
tcp|udp)
set -x
kubectl exec "$ovsPod" -n $KUBE_OVN_NS -- ovs-appctl ofproto/trace br-int in_port="$inPort","$type",nw_src="$podIP",nw_dst="$dst",dl_src="$mac",dl_dst="$gwMac","$type"_src=1000,"$type"_dst="$4"
;;
*)
echo "type $type not supported"
echo "kubectl ko trace {namespace/podname} {target ip address} {icmp|tcp|udp} [target tcp or udp port]"
exit 1
;;
esac
}
vsctl(){
nodeName="$1"; shift
kubectl get no "$nodeName" > /dev/null
ovsPod=$(kubectl get pod -n $KUBE_OVN_NS -o wide | grep " $nodeName " | grep ovs-ovn | awk '{print $1}')
if [ -z "$ovsPod" ]; then
echo "ovs pod doesn't exist on node $nodeName"
exit 1
fi
kubectl exec "$ovsPod" -n $KUBE_OVN_NS -- ovs-vsctl "$@"
}
diagnose(){
kubectl get crd subnets.kubeovn.io
kubectl get crd ips.kubeovn.io
kubectl get svc kube-dns -n kube-system
kubectl get svc kubernetes -n default
kubectl get no -o wide
kubectl ko nbctl show
kubectl ko sbctl show
checkDaemonSet kube-proxy
checkDeployment ovn-central
checkDeployment kube-ovn-controller
checkDaemonSet kube-ovn-cni
checkDaemonSet ovs-ovn
checkDeployment coredns
type="$1"
case $type in
all)
echo "### kube-ovn-controller recent log"
set +e
kubectl logs -n $KUBE_OVN_NS -l app=kube-ovn-controller --tail=100 | grep E$(date +%m%d)
set -e
echo ""
pingers=$(kubectl get pod -n $KUBE_OVN_NS | grep kube-ovn-pinger | awk '{print $1}')
for pinger in $pingers
do
nodeName=$(kubectl get pod "$pinger" -n "$KUBE_OVN_NS" -o jsonpath={.spec.nodeName})
echo "### start to diagnose node $nodeName"
echo "#### ovn-controller log:"
kubectl exec -n $KUBE_OVN_NS -it "$pinger" -- tail /var/log/ovn/ovn-controller.log
echo ""
echo "#### ovs-vsctl show results:"
kubectl exec -n $KUBE_OVN_NS -it "$pinger" -- ovs-vsctl show
echo ""
echo "#### pinger diagnose results:"
kubectl exec -n $KUBE_OVN_NS -it "$pinger" -- /kube-ovn/kube-ovn-pinger --mode=job
echo "### finish diagnose node $nodeName"
echo ""
done
;;
node)
nodeName="$2"
kubectl get no "$nodeName" > /dev/null
pinger=$(kubectl get pod -n $KUBE_OVN_NS -o wide | grep kube-ovn-pinger | grep " $nodeName " | awk '{print $1}')
echo "### start to diagnose node nodeName"
echo "#### ovn-controller log:"
kubectl exec -n $KUBE_OVN_NS -it "$pinger" -- tail /var/log/ovn/ovn-controller.log
echo ""
kubectl exec -n $KUBE_OVN_NS -it "$pinger" -- /kube-ovn/kube-ovn-pinger --mode=job
echo "### finish diagnose node nodeName"
echo ""
;;
*)
echo "type $type not supported"
echo "kubectl ko diagnose {all|node} [nodename]"
;;
esac
}
getOvnCentralPod(){
NB_POD=$(kubectl get pod -n $KUBE_OVN_NS -l ovn-nb-leader=true | grep ovn-central | head -n 1 | awk '{print $1}')
if [ -z "$NB_POD" ]; then
echo "nb leader not exists"
exit 1
fi
OVN_NB_POD=$NB_POD
SB_POD=$(kubectl get pod -n $KUBE_OVN_NS -l ovn-sb-leader=true | grep ovn-central | head -n 1 | awk '{print $1}')
if [ -z "$SB_POD" ]; then
echo "nb leader not exists"
exit 1
fi
OVN_SB_POD=$SB_POD
}
checkDaemonSet(){
name="$1"
currentScheduled=$(kubectl get ds -n $KUBE_OVN_NS "$name" -o jsonpath={.status.currentNumberScheduled})
desiredScheduled=$(kubectl get ds -n $KUBE_OVN_NS "$name" -o jsonpath={.status.desiredNumberScheduled})
available=$(kubectl get ds -n $KUBE_OVN_NS "$name" -o jsonpath={.status.numberAvailable})
ready=$(kubectl get ds -n $KUBE_OVN_NS "$name" -o jsonpath={.status.numberReady})
if [ "$currentScheduled" = "$desiredScheduled" ] && [ "$desiredScheduled" = "$available" ] && [ "$available" = "$ready" ]; then
echo "ds $name ready"
else
echo "Error ds $name not ready"
exit 1
fi
}
checkDeployment(){
name="$1"
ready=$(kubectl get deployment -n $KUBE_OVN_NS "$name" -o jsonpath={.status.readyReplicas})
updated=$(kubectl get deployment -n $KUBE_OVN_NS "$name" -o jsonpath={.status.updatedReplicas})
desire=$(kubectl get deployment -n $KUBE_OVN_NS "$name" -o jsonpath={.status.replicas})
available=$(kubectl get deployment -n $KUBE_OVN_NS "$name" -o jsonpath={.status.availableReplicas})
if [ "$ready" = "$updated" ] && [ "$updated" = "$desire" ] && [ "$desire" = "$available" ]; then
echo "deployment $name ready"
else
echo "Error deployment $name not ready"
exit 1
fi
}
if [ $# -lt 1 ]; then
showHelp
exit 0
else
subcommand="$1"; shift
fi
getOvnCentralPod
case $subcommand in
nbctl)
kubectl exec "$OVN_NB_POD" -n $KUBE_OVN_NS -- ovn-nbctl "$@"
;;
sbctl)
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -- ovn-sbctl "$@"
;;
vsctl)
vsctl "$@"
;;
tcpdump)
tcpdump "$@"
;;
trace)
trace "$@"
;;
diagnose)
diagnose "$@"
;;
*)
showHelp
;;
esac

View File

@ -1,404 +0,0 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: kube-ovn
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
spec:
privileged: true
allowPrivilegeEscalation: true
allowedCapabilities:
- '*'
volumes:
- '*'
hostNetwork: true
hostPorts:
- min: 0
max: 65535
hostIPC: true
hostPID: true
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ovn-config
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ovn
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.k8s.io/system-only: "true"
name: system:ovn
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- kube-ovn
- apiGroups:
- "kubeovn.io"
resources:
- subnets
- subnets/status
- ips
- vlans
- networks
verbs:
- "*"
- apiGroups:
- ""
resources:
- pods
- namespaces
- nodes
- configmaps
verbs:
- create
- get
- list
- watch
- patch
- update
- apiGroups:
- ""
- networking.k8s.io
- apps
- extensions
resources:
- networkpolicies
- services
- endpoints
- statefulsets
- daemonsets
- deployments
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ovn
roleRef:
name: system:ovn
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: ovn
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
name: ovn-nb
namespace: kube-system
spec:
ports:
- name: ovn-nb
protocol: TCP
port: 6641
targetPort: 6641
type: ClusterIP
selector:
app: ovn-central
ovn-nb-leader: "true"
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: ovn-sb
namespace: kube-system
spec:
ports:
- name: ovn-sb
protocol: TCP
port: 6642
targetPort: 6642
type: ClusterIP
selector:
app: ovn-central
ovn-sb-leader: "true"
sessionAffinity: None
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: ovn-central
namespace: kube-system
annotations:
kubernetes.io/description: |
OVN components: northd, nb and sb.
spec:
replicas: 1
strategy:
rollingUpdate:
maxSurge: 0%
maxUnavailable: 100%
type: RollingUpdate
selector:
matchLabels:
app: ovn-central
template:
metadata:
labels:
app: ovn-central
component: network
type: infra
spec:
tolerations:
- operator: Exists
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: ovn-central
topologyKey: kubernetes.io/hostname
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
containers:
- name: ovn-central
image: "kubeovn/kube-ovn:v1.5.3"
imagePullPolicy: IfNotPresent
command: ["/kube-ovn/start-db.sh"]
securityContext:
capabilities:
add: ["SYS_NICE"]
env:
- name: ENABLE_SSL
value: "false"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
requests:
cpu: 500m
memory: 200Mi
limits:
cpu: 3
memory: 3Gi
volumeMounts:
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /sys
name: host-sys
readOnly: true
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /etc/ovn
name: host-config-ovn
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /var/run/tls
name: kube-ovn-tls
readinessProbe:
exec:
command:
- sh
- /kube-ovn/ovn-is-leader.sh
periodSeconds: 3
timeoutSeconds: 45
livenessProbe:
exec:
command:
- sh
- /kube-ovn/ovn-healthcheck.sh
initialDelaySeconds: 30
periodSeconds: 7
failureThreshold: 5
timeoutSeconds: 45
nodeSelector:
kubernetes.io/os: "linux"
kube-ovn/role: "master"
volumes:
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-sys
hostPath:
path: /sys
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
- name: host-config-ovn
hostPath:
path: /etc/origin/ovn
- name: host-log-ovs
hostPath:
path: /var/log/openvswitch
- name: host-log-ovn
hostPath:
path: /var/log/ovn
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: ovs-ovn
namespace: kube-system
annotations:
kubernetes.io/description: |
This daemon set launches the openvswitch daemon.
spec:
selector:
matchLabels:
app: ovs
updateStrategy:
type: OnDelete
template:
metadata:
labels:
app: ovs
component: network
type: infra
spec:
tolerations:
- operator: Exists
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
hostPID: true
containers:
- name: openvswitch
image: "kubeovn/kube-ovn:v1.5.3"
imagePullPolicy: IfNotPresent
command: ["/kube-ovn/start-ovs.sh"]
securityContext:
runAsUser: 0
privileged: true
env:
- name: ENABLE_SSL
value: "false"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: HW_OFFLOAD
value: "false"
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /lib/modules
name: host-modules
readOnly: true
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /sys
name: host-sys
readOnly: true
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /etc/ovn
name: host-config-ovn
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /var/run/tls
name: kube-ovn-tls
readinessProbe:
exec:
command:
- sh
- /kube-ovn/ovs-healthcheck.sh
periodSeconds: 5
timeoutSeconds: 45
livenessProbe:
exec:
command:
- sh
- /kube-ovn/ovs-healthcheck.sh
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 5
timeoutSeconds: 45
resources:
requests:
cpu: 200m
memory: 200Mi
limits:
cpu: 1000m
memory: 800Mi
nodeSelector:
kubernetes.io/os: "linux"
volumes:
- name: host-modules
hostPath:
path: /lib/modules
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-sys
hostPath:
path: /sys
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
- name: host-config-ovn
hostPath:
path: /etc/origin/ovn
- name: host-log-ovs
hostPath:
path: /var/log/openvswitch
- name: host-log-ovn
hostPath:
path: /var/log/ovn
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls

View File

@ -1,3 +1,3 @@
kube_ovn_default_cidr: "{{ CLUSTER_CIDR }}"
kube_ovn_node_switch_cidr: 100.64.0.0/16
kube_ovn_enable_mirror: true
# CLUSTER_CIDR_GW 作为 POD_GATEWAY选取CLUSTER_CIDR 网段中的第一个地址
CLUSTER_CIDR_GW: "{{ CLUSTER_CIDR.split('.')[0] }}.{{ CLUSTER_CIDR.split('.')[1] }}.{{ CLUSTER_CIDR.split('.')[2] }}.{{ CLUSTER_CIDR.split('.')[3]|int + 1 }}"