From ee750e3a2e98003202e53bdd36976fed6fb25f45 Mon Sep 17 00:00:00 2001 From: gjmzj Date: Sat, 14 Jan 2023 18:29:52 +0800 Subject: [PATCH] update calico v3.24.5 --- ezdown | 2 +- roles/calico/templates/calico-v3.24.yaml.j2 | 640 ++++++++++++++++++++ 2 files changed, 641 insertions(+), 1 deletion(-) create mode 100644 roles/calico/templates/calico-v3.24.yaml.j2 diff --git a/ezdown b/ezdown index 4d2d395..facaa29 100755 --- a/ezdown +++ b/ezdown @@ -22,7 +22,7 @@ HARBOR_VER=v2.6.3 REGISTRY_MIRROR=CN # images downloaded by default(with '-D') -calicoVer=v3.23.5 +calicoVer=v3.24.5 dnsNodeCacheVer=1.22.13 corednsVer=1.9.3 dashboardVer=v2.7.0 diff --git a/roles/calico/templates/calico-v3.24.yaml.j2 b/roles/calico/templates/calico-v3.24.yaml.j2 new file mode 100644 index 0000000..5183b8e --- /dev/null +++ b/roles/calico/templates/calico-v3.24.yaml.j2 @@ -0,0 +1,640 @@ +--- +# download Release archive (https://github.com/projectcalico/calico/releases/download/v3.24.5/release-v3.24.5.tgz) +# Release notes: https://projectcalico.docs.tigera.io/archive/v3.24/release-notes/ + +# Datastore: etcd, using Typha is redundant and not recommended. +# Kubeasz uses cmd-line-way( kubectl create) to create etcd-secrets, see more in 'roles/calico/tasks/main.yml' + +# source from: release-v3.24.5/manifests/calico-etcd.yaml + +# Source: calico/templates/calico-kube-controllers.yaml +# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers +--- +# Source: calico/templates/calico-kube-controllers.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system +--- +# Source: calico/templates/calico-node.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system +--- +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # Configure this with the location of your etcd cluster. + etcd_endpoints: "{{ ETCD_ENDPOINTS }}" + # If you're using TLS enabled etcd uncomment the following. + # You must also populate the Secret below with these files. + etcd_ca: "/calico-secrets/etcd-ca" + etcd_cert: "/calico-secrets/etcd-cert" + etcd_key: "/calico-secrets/etcd-key" + # Typha is disabled. + typha_service_name: "none" + # Configure the backend to use. + calico_backend: "{{ CALICO_NETWORKING_BACKEND }}" + + # Configure the MTU to use for workload interfaces and tunnels. + # By default, MTU is auto-detected, and explicitly setting this field should not be required. + # You can override auto-detection by providing a non-zero value. + veth_mtu: "0" + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "log_file_path": "/var/log/calico/cni/cni.log", + "etcd_endpoints": "{{ ETCD_ENDPOINTS }}", + "etcd_key_file": "/etc/calico/ssl/calico-key.pem", + "etcd_cert_file": "/etc/calico/ssl/calico.pem", + "etcd_ca_cert_file": "{{ ca_dir }}/ca.pem", + "mtu": 1500, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "/etc/cni/net.d/calico-kubeconfig" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } +--- +# Source: calico/templates/calico-kube-controllers-rbac.yaml +# Include a clusterrole for the kube-controllers component, +# and bind it to the calico-kube-controllers serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +rules: + # Pods are monitored for changing labels. + # The node controller monitors Kubernetes nodes. + # Namespace and serviceaccount labels are used for policy. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + - serviceaccounts + verbs: + - watch + - list + - get + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list +--- +# Source: calico/templates/calico-node-rbac.yaml +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node +rules: + # Used for creating service account tokens to be used by the CNI plugin + - apiGroups: [""] + resources: + - serviceaccounts/token + resourceNames: + - calico-node + verbs: + - create + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + # EndpointSlices are used for Service-based network policy rule + # enforcement. + - apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - watch + - list + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps + verbs: + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch +--- +# Source: calico/templates/calico-kube-controllers-rbac.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system +--- +# Source: calico/templates/calico-node-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system +--- +# Source: calico/templates/calico-node.yaml +# This manifest installs the calico-node container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: calico-node + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure calico-node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: calico-node + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: easzlab.io.local:5000/calico/cni:{{ calico_ver }} + imagePullPolicy: IfNotPresent + command: ["/opt/cni/bin/install"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-calico.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # The location of the etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + - mountPath: /calico-secrets + name: etcd-certs + securityContext: + privileged: true + # This init container mounts the necessary filesystems needed by the BPF data plane + # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed + # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode. + - name: "mount-bpffs" + image: easzlab.io.local:5000/calico/node:{{ calico_ver }} + imagePullPolicy: IfNotPresent + command: ["calico-node", "-init", "-best-effort"] + volumeMounts: + - mountPath: /sys/fs + name: sys-fs + # Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + - mountPath: /var/run/calico + name: var-run-calico + # Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + # Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary, + # executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly. + - mountPath: /nodeproc + name: nodeproc + readOnly: true + securityContext: + privileged: true + containers: + # Runs calico-node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: easzlab.io.local:5000/calico/node:{{ calico_ver }} + imagePullPolicy: IfNotPresent + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # The location of the etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + # Set noderef for node controller. + - name: CALICO_K8S_NODE_REF + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,bgp" + # Auto-detect the BGP IP address. + - name: IP + value: "autodetect" + - name: IP_AUTODETECTION_METHOD + value: "{{ IP_AUTODETECTION_METHOD }}" + # Enable IPIP +{% if CALICO_NETWORKING_BACKEND == "brid" %} + - name: CALICO_IPV4POOL_IPIP + value: "{{ CALICO_IPV4POOL_IPIP }}" +{% endif %} + # Enable or Disable VXLAN on the default IP pool. +{% if CALICO_NETWORKING_BACKEND == "vxlan" %} + - name: CALICO_IPV4POOL_VXLAN + value: "Never" + - name: CALICO_IPV6POOL_VXLAN + value: "Never" +{% endif %} + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Set MTU for the VXLAN tunnel device. + - name: FELIX_VXLANMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Set MTU for the Wireguard tunnel device. + - name: FELIX_WIREGUARDMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within `--cluster-cidr`. + - name: CALICO_IPV4POOL_CIDR + value: "{{ CLUSTER_CIDR }}" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + - name: FELIX_HEALTHENABLED + value: "true" + # Set Kubernetes NodePorts: If services do use NodePorts outside Calico’s expected range, + # Calico will treat traffic to those ports as host traffic instead of pod traffic. + - name: FELIX_KUBENODEPORTRANGES + value: "{{ NODE_PORT_RANGE.split('-')[0] }}:{{ NODE_PORT_RANGE.split('-')[1] }}" + - name: FELIX_PROMETHEUSMETRICSENABLED + value: "false" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + lifecycle: + preStop: + exec: + command: + - /bin/calico-node + - -shutdown + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live +{% if CALICO_NETWORKING_BACKEND == "brid" %} + - -bird-live +{% endif %} + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready +{% if CALICO_NETWORKING_BACKEND == "brid" %} + - -bird-ready +{% endif %} + periodSeconds: 10 + timeoutSeconds: 10 + volumeMounts: + # For maintaining CNI plugin API credentials. + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + readOnly: false + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - mountPath: /calico-secrets + name: etcd-certs + - name: policysync + mountPath: /var/run/nodeagent + # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the + # parent directory. + - name: bpffs + mountPath: /sys/fs/bpf + - name: cni-log-dir + mountPath: /var/log/calico/cni + readOnly: true + volumes: + # Used by calico-node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: sys-fs + hostPath: + path: /sys/fs/ + type: DirectoryOrCreate + - name: bpffs + hostPath: + path: /sys/fs/bpf + type: Directory + # mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs. + - name: nodeproc + hostPath: + path: /proc + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: {{ bin_dir }} + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Used to access CNI logs. + - name: cni-log-dir + hostPath: + path: /var/log/calico/cni + # Mount in the etcd TLS secrets with mode 400. + # See https://kubernetes.io/docs/concepts/configuration/secret/ + - name: etcd-certs + secret: + secretName: calico-etcd-secrets + defaultMode: 0400 + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent +--- +# Source: calico/templates/calico-kube-controllers.yaml +# See https://github.com/projectcalico/kube-controllers +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + # The controllers can only have a single active instance. + replicas: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + spec: + nodeSelector: + kubernetes.io/os: linux + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical + # The controllers must run in the host network namespace so that + # it isn't governed by policy that would prevent it from working. + hostNetwork: true + containers: + - name: calico-kube-controllers + image: easzlab.io.local:5000/calico/kube-controllers:{{ calico_ver }} + imagePullPolicy: IfNotPresent + env: + # The location of the etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: policy,namespace,serviceaccount,workloadendpoint,node + volumeMounts: + # Mount in the etcd TLS secrets. + - mountPath: /calico-secrets + name: etcd-certs + livenessProbe: + exec: + command: + - /usr/bin/check-status + - -l + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + periodSeconds: 10 + volumes: + # Mount in the etcd TLS secrets with mode 400. + # See https://kubernetes.io/docs/concepts/configuration/secret/ + - name: etcd-certs + secret: + secretName: calico-etcd-secrets + defaultMode: 0440