add cilium connectivity-check

pull/1166/head
jin.gjm 2022-06-16 12:37:24 +08:00
parent 5f86b82546
commit 3258022773
9 changed files with 1341 additions and 38 deletions

View File

@ -133,6 +133,7 @@ calico_ver_main: "{{ calico_ver.split('.')[0] }}.{{ calico_ver.split('.')[1] }}"
# ------------------------------------------- cilium
# [cilium]镜像版本
cilium_ver: "__cilium__"
cilium_connectivity_check: "true"
# ------------------------------------------- kube-ovn
# [kube-ovn]选择 OVN DB and OVN Control Plane 节点默认为第一个master节点

View File

@ -2,6 +2,12 @@ image:
repository: quay.io/cilium/cilium
useDigest: false
# -- Additional agent container arguments.
{% if ENABLE_LOCAL_DNS_CACHE %}
extraArgs:
- --exclude-local-address="{{ LOCAL_DNS_CACHE }}/32"
{% endif %}
resources:
limits:
cpu: 4000m

View File

@ -117,7 +117,6 @@
- "/var/lib/calico/"
- "/var/log/calico/"
- "/etc/cilium/"
- "/var/run/cilium/"
- "/sys/fs/bpf/tc/"
- "/var/lib/cni/"
- "/var/lib/kube-router/"
@ -130,35 +129,6 @@
- "/etc/origin/ovn/"
- "/etc/ovn/"
- "/var/log/ovn/"
# - name: cleanup networks1
# shell: "ip link del tunl0; \
# ip link del flannel.1; \
# ip link del cni0; \
# ip link del mynet0; \
# ip link del kube-bridge; \
# ip link del dummy0; \
# ip link del kube-ipvs0; \
# ip link del cilium_net; \
# ip link del cilium_vxlan; \
# ip link del ovn0; \
# ip link del ovs-system"
# ignore_errors: true
#
# - name: cleanup networks2
# shell: "systemctl restart networking; \
# systemctl restart network"
# ignore_errors: true
#
# - name: cleanup 'calico' routes
# shell: "for rt in `ip route|grep bird|sed 's/blackhole //'|awk '{print $1}'`;do ip route del $rt;done;"
# when: "CLUSTER_NETWORK == 'calico'"
# ignore_errors: true
#
# - name: cleanup iptables
# shell: "iptables -F && iptables -X \
# && iptables -F -t nat && iptables -X -t nat \
# && iptables -F -t raw && iptables -X -t raw \
# && iptables -F -t mangle && iptables -X -t mangle"
ignore_errors: true
when: "inventory_hostname in groups['kube_master'] or inventory_hostname in groups['kube_node']"

View File

@ -0,0 +1,33 @@
- block:
- name: 准备 cilium-check 配置目录
file: name={{ cluster_dir }}/yml/cilium-check state=directory
- name: 准备部署文件
template: src=cilium-check/{{ item }}.j2 dest={{ cluster_dir }}/yml/cilium-check/{{ item }}
with_items:
- "connectivity-check.yaml"
- "check-part1.yaml"
- "namespace.yaml"
- name: 创建测试namespace
shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/cilium-check/namespace.yaml"
- name: 创建测试part1
shell: "{{ base_dir }}/bin/kubectl apply -n cilium-test -f {{ cluster_dir }}/yml/cilium-check/check-part1.yaml"
- name: 轮询等待echo pod运行视下载镜像速度而定
shell: "{{ base_dir }}/bin/kubectl get pod -n cilium-test |grep echo|grep Running|grep '1/1'|wc -l"
register: pod_status
until: pod_status.stdout == "3"
retries: 15
delay: 8
ignore_errors: true
- name: 创建完整测试connectivity-check
shell: "{{ base_dir }}/bin/kubectl apply -n cilium-test -f {{ cluster_dir }}/yml/cilium-check/connectivity-check.yaml"
- debug:
msg: "[重要]: 请查看命名空间cilium-test下所有pod如果均为Running状态且没有重启数增长说明cilium连接测试正常。 \
测试观察一段时间可以整体删除该命名空间所有资源(kubectl delete ns cilium-test)"
run_once: true
connection: local

View File

@ -30,3 +30,6 @@
- import_tasks: nfs-provisioner.yml
when: '"nfs-client-provisioner" not in pod_info.stdout and nfs_provisioner_install == "yes"'
- import_tasks: cilium_connectivity_check.yml
when: 'cilium_connectivity_check|bool'

View File

@ -0,0 +1,256 @@
# Automatically generated by Makefile. DO NOT EDIT
---
metadata:
name: echo-a
labels:
name: echo-a
topology: any
component: network-check
traffic: internal
quarantine: "false"
type: autocheck
spec:
template:
metadata:
labels:
name: echo-a
spec:
hostNetwork: false
containers:
- name: echo-a-container
env:
- name: PORT
value: "8080"
ports:
- containerPort: 8080
image: quay.io/cilium/json-mock:v1.3.0@sha256:2729064827fa9dbfface8d3df424feb6c792a0ba07117b844349635c93c06d2b
imagePullPolicy: IfNotPresent
readinessProbe:
timeoutSeconds: 7
exec:
command:
- curl
- -sS
- --fail
- --connect-timeout
- "5"
- -o
- /dev/null
- localhost:8080
livenessProbe:
timeoutSeconds: 7
exec:
command:
- curl
- -sS
- --fail
- --connect-timeout
- "5"
- -o
- /dev/null
- localhost:8080
selector:
matchLabels:
name: echo-a
replicas: 1
apiVersion: apps/v1
kind: Deployment
---
metadata:
name: echo-b
labels:
name: echo-b
topology: any
component: services-check
traffic: internal
quarantine: "false"
type: autocheck
spec:
template:
metadata:
labels:
name: echo-b
spec:
hostNetwork: false
containers:
- name: echo-b-container
env:
- name: PORT
value: "8080"
ports:
- containerPort: 8080
hostPort: 40000
image: quay.io/cilium/json-mock:v1.3.0@sha256:2729064827fa9dbfface8d3df424feb6c792a0ba07117b844349635c93c06d2b
imagePullPolicy: IfNotPresent
readinessProbe:
timeoutSeconds: 7
exec:
command:
- curl
- -sS
- --fail
- --connect-timeout
- "5"
- -o
- /dev/null
- localhost:8080
livenessProbe:
timeoutSeconds: 7
exec:
command:
- curl
- -sS
- --fail
- --connect-timeout
- "5"
- -o
- /dev/null
- localhost:8080
selector:
matchLabels:
name: echo-b
replicas: 1
apiVersion: apps/v1
kind: Deployment
---
metadata:
name: echo-b-host
labels:
name: echo-b-host
topology: any
component: services-check
traffic: internal
quarantine: "false"
type: autocheck
spec:
template:
metadata:
labels:
name: echo-b-host
spec:
hostNetwork: true
containers:
- name: echo-b-host-container
env:
- name: PORT
value: "41000"
ports: []
image: quay.io/cilium/json-mock:v1.3.0@sha256:2729064827fa9dbfface8d3df424feb6c792a0ba07117b844349635c93c06d2b
imagePullPolicy: IfNotPresent
readinessProbe:
timeoutSeconds: 7
exec:
command:
- curl
- -sS
- --fail
- --connect-timeout
- "5"
- -o
- /dev/null
- localhost:41000
livenessProbe:
timeoutSeconds: 7
exec:
command:
- curl
- -sS
- --fail
- --connect-timeout
- "5"
- -o
- /dev/null
- localhost:41000
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: name
operator: In
values:
- echo-b
topologyKey: kubernetes.io/hostname
selector:
matchLabels:
name: echo-b-host
replicas: 1
apiVersion: apps/v1
kind: Deployment
---
metadata:
name: echo-a
labels:
name: echo-a
topology: any
component: network-check
traffic: internal
quarantine: "false"
type: autocheck
spec:
ports:
- name: http
port: 8080
type: ClusterIP
selector:
name: echo-a
apiVersion: v1
kind: Service
---
metadata:
name: echo-b
labels:
name: echo-b
topology: any
component: services-check
traffic: internal
quarantine: "false"
type: autocheck
spec:
ports:
- name: http
port: 8080
nodePort: 31414
type: NodePort
selector:
name: echo-b
apiVersion: v1
kind: Service
---
metadata:
name: echo-b-headless
labels:
name: echo-b-headless
topology: any
component: services-check
traffic: internal
quarantine: "false"
type: autocheck
spec:
ports:
- name: http
port: 8080
type: ClusterIP
selector:
name: echo-b
clusterIP: None
apiVersion: v1
kind: Service
---
metadata:
name: echo-b-host-headless
labels:
name: echo-b-host-headless
topology: any
component: services-check
traffic: internal
quarantine: "false"
type: autocheck
spec:
ports: []
type: ClusterIP
selector:
name: echo-b-host
clusterIP: None
apiVersion: v1
kind: Service

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
kubernetes.io/metadata.name: cilium-test
name: cilium-test
spec:
finalizers:
- kubernetes

View File

@ -50,11 +50,4 @@
retries: 8
delay: 2
tags: upgrade
- name: 添加 crictl 命令自动补全
lineinfile:
dest: ~/.bashrc
state: present
regexp: 'crictl completion'
line: 'source <(crictl completion) # generated by kubeasz'
when: "'NoFound' in containerd_svc.stdout"