503 lines
19 KiB
Django/Jinja
503 lines
19 KiB
Django/Jinja
---
|
|
# This manifest installs the calico/node container, as well
|
|
# as the Calico CNI plugins and network config on
|
|
# each control plane and worker node in a Kubernetes cluster.
|
|
kind: DaemonSet
|
|
apiVersion: apps/v1
|
|
metadata:
|
|
name: calico-node
|
|
namespace: kube-system
|
|
labels:
|
|
k8s-app: calico-node
|
|
spec:
|
|
selector:
|
|
matchLabels:
|
|
k8s-app: calico-node
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
rollingUpdate:
|
|
maxUnavailable: 1
|
|
template:
|
|
metadata:
|
|
labels:
|
|
k8s-app: calico-node
|
|
annotations:
|
|
{% if calico_datastore == "etcd" %}
|
|
kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}"
|
|
{% endif %}
|
|
{% if calico_felix_prometheusmetricsenabled %}
|
|
prometheus.io/scrape: 'true'
|
|
prometheus.io/port: "{{ calico_felix_prometheusmetricsport }}"
|
|
{% endif %}
|
|
spec:
|
|
nodeSelector:
|
|
{{ calico_ds_nodeselector }}
|
|
priorityClassName: system-node-critical
|
|
hostNetwork: true
|
|
serviceAccountName: calico-node
|
|
tolerations:
|
|
# Make sure calico-node gets scheduled on all nodes.
|
|
- effect: NoSchedule
|
|
operator: Exists
|
|
# Mark the pod as a critical add-on for rescheduling.
|
|
- key: CriticalAddonsOnly
|
|
operator: Exists
|
|
- effect: NoExecute
|
|
operator: Exists
|
|
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
|
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
|
terminationGracePeriodSeconds: 0
|
|
initContainers:
|
|
{% if calico_datastore == "kdd" and not calico_ipam_host_local %}
|
|
# This container performs upgrade from host-local IPAM to calico-ipam.
|
|
# It can be deleted if this is a fresh installation, or if you have already
|
|
# upgraded to use calico-ipam.
|
|
- name: upgrade-ipam
|
|
image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }}
|
|
imagePullPolicy: {{ k8s_image_pull_policy }}
|
|
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
|
|
envFrom:
|
|
- configMapRef:
|
|
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
|
name: kubernetes-services-endpoint
|
|
optional: true
|
|
env:
|
|
- name: KUBERNETES_NODE_NAME
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: spec.nodeName
|
|
- name: CALICO_NETWORKING_BACKEND
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: calico-config
|
|
key: calico_backend
|
|
volumeMounts:
|
|
- mountPath: /var/lib/cni/networks
|
|
name: host-local-net-dir
|
|
- mountPath: /host/opt/cni/bin
|
|
name: cni-bin-dir
|
|
securityContext:
|
|
privileged: true
|
|
{% endif %}
|
|
# This container installs the Calico CNI binaries
|
|
# and CNI network config file on each node.
|
|
- name: install-cni
|
|
image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }}
|
|
imagePullPolicy: {{ k8s_image_pull_policy }}
|
|
command: ["/opt/cni/bin/install"]
|
|
envFrom:
|
|
- configMapRef:
|
|
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
|
name: kubernetes-services-endpoint
|
|
optional: true
|
|
env:
|
|
# The CNI network config to install on each node.
|
|
- name: CNI_NETWORK_CONFIG
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: calico-config
|
|
key: cni_network_config
|
|
# Name of the CNI config file to create.
|
|
- name: CNI_CONF_NAME
|
|
value: "10-calico.conflist"
|
|
{% if calico_mtu is defined %}
|
|
# CNI MTU Config variable
|
|
- name: CNI_MTU
|
|
value: "{{ calico_veth_mtu | default(calico_mtu) }}"
|
|
{% endif %}
|
|
# Prevents the container from sleeping forever.
|
|
- name: SLEEP
|
|
value: "false"
|
|
{% if calico_datastore == "etcd" %}
|
|
- name: ETCD_ENDPOINTS
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: calico-config
|
|
key: etcd_endpoints
|
|
{% endif %}
|
|
{% if calico_datastore == "kdd" %}
|
|
# Set the hostname based on the k8s node name.
|
|
- name: KUBERNETES_NODE_NAME
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: spec.nodeName
|
|
{% endif %}
|
|
volumeMounts:
|
|
- mountPath: /host/etc/cni/net.d
|
|
name: cni-net-dir
|
|
- mountPath: /host/opt/cni/bin
|
|
name: cni-bin-dir
|
|
securityContext:
|
|
privileged: true
|
|
# This init container mounts the necessary filesystems needed by the BPF data plane
|
|
# i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed
|
|
# in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode.
|
|
- name: "mount-bpffs"
|
|
image: {{ calico_node_image_repo }}:{{ calico_node_image_tag }}
|
|
imagePullPolicy: {{ k8s_image_pull_policy }}
|
|
command: ["calico-node", "-init", "-best-effort"]
|
|
volumeMounts:
|
|
- mountPath: /sys/fs
|
|
name: sys-fs
|
|
# Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host
|
|
# so that it outlives the init container.
|
|
mountPropagation: Bidirectional
|
|
- mountPath: /var/run/calico
|
|
name: var-run-calico
|
|
# Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host
|
|
# so that it outlives the init container.
|
|
mountPropagation: Bidirectional
|
|
# Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary,
|
|
# executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly.
|
|
- mountPath: /nodeproc
|
|
name: nodeproc
|
|
readOnly: true
|
|
securityContext:
|
|
privileged: true
|
|
containers:
|
|
# Runs calico/node container on each Kubernetes node. This
|
|
# container programs network policy and routes on each
|
|
# host.
|
|
- name: calico-node
|
|
image: {{ calico_node_image_repo }}:{{ calico_node_image_tag }}
|
|
imagePullPolicy: {{ k8s_image_pull_policy }}
|
|
envFrom:
|
|
- configMapRef:
|
|
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
|
name: kubernetes-services-endpoint
|
|
optional: true
|
|
env:
|
|
# The location of the Calico etcd cluster.
|
|
{% if calico_datastore == "etcd" %}
|
|
- name: ETCD_ENDPOINTS
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: calico-config
|
|
key: etcd_endpoints
|
|
# Location of the CA certificate for etcd.
|
|
- name: ETCD_CA_CERT_FILE
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: calico-config
|
|
key: etcd_ca
|
|
# Location of the client key for etcd.
|
|
- name: ETCD_KEY_FILE
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: calico-config
|
|
key: etcd_key
|
|
# Location of the client certificate for etcd.
|
|
- name: ETCD_CERT_FILE
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: calico-config
|
|
key: etcd_cert
|
|
{% elif calico_datastore == "kdd" %}
|
|
# Use Kubernetes API as the backing datastore.
|
|
- name: DATASTORE_TYPE
|
|
value: "kubernetes"
|
|
{% if typha_enabled %}
|
|
# Typha support: controlled by the ConfigMap.
|
|
- name: FELIX_TYPHAK8SSERVICENAME
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: calico-config
|
|
key: typha_service_name
|
|
{% if typha_secure %}
|
|
- name: FELIX_TYPHACN
|
|
value: typha-server
|
|
- name: FELIX_TYPHACAFILE
|
|
value: /etc/typha-ca/ca.crt
|
|
- name: FELIX_TYPHACERTFILE
|
|
value: /etc/typha-client/typha-client.crt
|
|
- name: FELIX_TYPHAKEYFILE
|
|
value: /etc/typha-client/typha-client.key
|
|
{% endif %}
|
|
{% endif %}
|
|
# Wait for the datastore.
|
|
- name: WAIT_FOR_DATASTORE
|
|
value: "true"
|
|
{% endif %}
|
|
{% if calico_network_backend == 'vxlan' %}
|
|
- name: FELIX_VXLANVNI
|
|
value: "{{ calico_vxlan_vni }}"
|
|
- name: FELIX_VXLANPORT
|
|
value: "{{ calico_vxlan_port }}"
|
|
{% endif %}
|
|
# Choose the backend to use.
|
|
- name: CALICO_NETWORKING_BACKEND
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: calico-config
|
|
key: calico_backend
|
|
# Cluster type to identify the deployment type
|
|
- name: CLUSTER_TYPE
|
|
value: "k8s,bgp"
|
|
# Set noderef for node controller.
|
|
- name: CALICO_K8S_NODE_REF
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: spec.nodeName
|
|
# Disable file logging so `kubectl logs` works.
|
|
- name: CALICO_DISABLE_FILE_LOGGING
|
|
value: "true"
|
|
# Set Felix endpoint to host default action to ACCEPT.
|
|
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
|
value: "{{ calico_endpoint_to_host_action | default('RETURN') }}"
|
|
- name: FELIX_HEALTHHOST
|
|
value: "{{ calico_healthhost }}"
|
|
{% if kube_proxy_mode == 'ipvs' and kube_apiserver_node_port_range is defined %}
|
|
- name: FELIX_KUBENODEPORTRANGES
|
|
value: "{{ kube_apiserver_node_port_range.split('-')[0] }}:{{ kube_apiserver_node_port_range.split('-')[1] }}"
|
|
{% endif %}
|
|
- name: FELIX_IPTABLESBACKEND
|
|
value: "{{ calico_iptables_backend }}"
|
|
- name: FELIX_IPTABLESLOCKTIMEOUTSECS
|
|
value: "{{ calico_iptables_lock_timeout_secs }}"
|
|
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
|
|
# chosen from this range. Changing this value after installation will have
|
|
# no effect. This should fall within `--cluster-cidr`.
|
|
# - name: CALICO_IPV4POOL_CIDR
|
|
# value: "192.168.0.0/16"
|
|
- name: CALICO_IPV4POOL_IPIP
|
|
value: "{{ calico_ipv4pool_ipip }}"
|
|
# Enable or Disable VXLAN on the default IP pool.
|
|
- name: CALICO_IPV4POOL_VXLAN
|
|
value: "Never"
|
|
- name: FELIX_IPV6SUPPORT
|
|
value: "{{ enable_dual_stack_networks | default(false) }}"
|
|
# Set Felix logging to "info"
|
|
- name: FELIX_LOGSEVERITYSCREEN
|
|
value: "{{ calico_loglevel }}"
|
|
# Set Calico startup logging to "error"
|
|
- name: CALICO_STARTUP_LOGLEVEL
|
|
value: "{{ calico_node_startup_loglevel }}"
|
|
# Enable or disable usage report
|
|
- name: FELIX_USAGEREPORTINGENABLED
|
|
value: "{{ calico_usage_reporting }}"
|
|
# Set MTU for tunnel device used if ipip is enabled
|
|
{% if calico_mtu is defined %}
|
|
# Set MTU for tunnel device used if ipip is enabled
|
|
- name: FELIX_IPINIPMTU
|
|
value: "{{ calico_veth_mtu | default(calico_mtu) }}"
|
|
# Set MTU for the VXLAN tunnel device.
|
|
- name: FELIX_VXLANMTU
|
|
value: "{{ calico_veth_mtu | default(calico_mtu) }}"
|
|
# Set MTU for the Wireguard tunnel device.
|
|
- name: FELIX_WIREGUARDMTU
|
|
value: "{{ calico_veth_mtu | default(calico_mtu) }}"
|
|
{% endif %}
|
|
- name: FELIX_CHAININSERTMODE
|
|
value: "{{ calico_felix_chaininsertmode }}"
|
|
- name: FELIX_PROMETHEUSMETRICSENABLED
|
|
value: "{{ calico_felix_prometheusmetricsenabled }}"
|
|
- name: FELIX_PROMETHEUSMETRICSPORT
|
|
value: "{{ calico_felix_prometheusmetricsport }}"
|
|
- name: FELIX_PROMETHEUSGOMETRICSENABLED
|
|
value: "{{ calico_felix_prometheusgometricsenabled }}"
|
|
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
|
|
value: "{{ calico_felix_prometheusprocessmetricsenabled }}"
|
|
{% if calico_ip_auto_method is defined %}
|
|
- name: IP_AUTODETECTION_METHOD
|
|
value: "{{ calico_ip_auto_method }}"
|
|
{% else %}
|
|
- name: NODEIP
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: status.hostIP
|
|
- name: IP_AUTODETECTION_METHOD
|
|
value: "can-reach=$(NODEIP)"
|
|
{% endif %}
|
|
- name: IP
|
|
value: "autodetect"
|
|
{% if calico_ip6_auto_method is defined and enable_dual_stack_networks %}
|
|
- name: IP6_AUTODETECTION_METHOD
|
|
value: "{{ calico_ip6_auto_method }}"
|
|
{% endif %}
|
|
{% if calico_felix_mtu_iface_pattern is defined %}
|
|
- name: FELIX_MTUIFACEPATTERN
|
|
value: "{{ calico_felix_mtu_iface_pattern }}"
|
|
{% endif %}
|
|
{% if enable_dual_stack_networks %}
|
|
- name: IP6
|
|
value: autodetect
|
|
{% endif %}
|
|
{% if calico_use_default_route_src_ipaddr | default(false) %}
|
|
- name: FELIX_DEVICEROUTESOURCEADDRESS
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: status.hostIP
|
|
{% endif %}
|
|
- name: NODENAME
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: spec.nodeName
|
|
- name: FELIX_HEALTHENABLED
|
|
value: "true"
|
|
- name: FELIX_IGNORELOOSERPF
|
|
value: "{{ calico_node_ignorelooserpf }}"
|
|
- name: CALICO_MANAGE_CNI
|
|
value: "true"
|
|
{% if calico_ipam_host_local %}
|
|
- name: USE_POD_CIDR
|
|
value: "true"
|
|
{% endif %}
|
|
{% if calico_node_extra_envs is defined %}
|
|
{% for key in calico_node_extra_envs %}
|
|
- name: {{ key }}
|
|
value: "{{ calico_node_extra_envs[key] }}"
|
|
{% endfor %}
|
|
{% endif %}
|
|
securityContext:
|
|
privileged: true
|
|
resources:
|
|
limits:
|
|
cpu: {{ calico_node_cpu_limit }}
|
|
memory: {{ calico_node_memory_limit }}
|
|
requests:
|
|
cpu: {{ calico_node_cpu_requests }}
|
|
memory: {{ calico_node_memory_requests }}
|
|
lifecycle:
|
|
preStop:
|
|
exec:
|
|
command:
|
|
- /bin/calico-node
|
|
- -shutdown
|
|
livenessProbe:
|
|
exec:
|
|
command:
|
|
- /bin/calico-node
|
|
- -felix-live
|
|
{% if calico_network_backend == "bird" %}
|
|
- -bird-live
|
|
{% endif %}
|
|
periodSeconds: 10
|
|
initialDelaySeconds: 10
|
|
timeoutSeconds: {{ calico_node_livenessprobe_timeout | default(10) }}
|
|
failureThreshold: 6
|
|
readinessProbe:
|
|
exec:
|
|
command:
|
|
- /bin/calico-node
|
|
{% if calico_network_backend == "bird" %}
|
|
- -bird-ready
|
|
{% endif %}
|
|
- -felix-ready
|
|
periodSeconds: 10
|
|
timeoutSeconds: {{ calico_node_readinessprobe_timeout | default(10) }}
|
|
failureThreshold: 6
|
|
volumeMounts:
|
|
- mountPath: /lib/modules
|
|
name: lib-modules
|
|
readOnly: true
|
|
- mountPath: /var/run/calico
|
|
name: var-run-calico
|
|
readOnly: false
|
|
- mountPath: /var/lib/calico
|
|
name: var-lib-calico
|
|
readOnly: false
|
|
{% if calico_datastore == "etcd" %}
|
|
- mountPath: /calico-secrets
|
|
name: etcd-certs
|
|
readOnly: true
|
|
{% endif %}
|
|
- name: xtables-lock
|
|
mountPath: /run/xtables.lock
|
|
readOnly: false
|
|
# For maintaining CNI plugin API credentials.
|
|
- mountPath: /host/etc/cni/net.d
|
|
name: cni-net-dir
|
|
readOnly: false
|
|
{% if typha_secure %}
|
|
- name: typha-client
|
|
mountPath: /etc/typha-client
|
|
readOnly: true
|
|
- name: typha-cacert
|
|
subPath: ca.crt
|
|
mountPath: /etc/typha-ca/ca.crt
|
|
readOnly: true
|
|
{% endif %}
|
|
- name: policysync
|
|
mountPath: /var/run/nodeagent
|
|
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
|
|
# parent directory.
|
|
- name: bpffs
|
|
mountPath: /sys/fs/bpf
|
|
- name: cni-log-dir
|
|
mountPath: /var/log/calico/cni
|
|
readOnly: true
|
|
volumes:
|
|
# Used by calico/node.
|
|
- name: lib-modules
|
|
hostPath:
|
|
path: /lib/modules
|
|
- name: var-run-calico
|
|
hostPath:
|
|
path: /var/run/calico
|
|
type: DirectoryOrCreate
|
|
- name: var-lib-calico
|
|
hostPath:
|
|
path: /var/lib/calico
|
|
type: DirectoryOrCreate
|
|
# Used to install CNI.
|
|
- name: cni-net-dir
|
|
hostPath:
|
|
path: /etc/cni/net.d
|
|
- name: cni-bin-dir
|
|
hostPath:
|
|
path: /opt/cni/bin
|
|
type: DirectoryOrCreate
|
|
{% if calico_datastore == "etcd" %}
|
|
# Mount in the etcd TLS secrets.
|
|
- name: etcd-certs
|
|
hostPath:
|
|
path: "{{ calico_cert_dir }}"
|
|
{% endif %}
|
|
# Mount the global iptables lock file, used by calico/node
|
|
- name: xtables-lock
|
|
hostPath:
|
|
path: /run/xtables.lock
|
|
type: FileOrCreate
|
|
{% if calico_datastore == "kdd" and not calico_ipam_host_local %}
|
|
# Mount in the directory for host-local IPAM allocations. This is
|
|
# used when upgrading from host-local to calico-ipam, and can be removed
|
|
# if not using the upgrade-ipam init container.
|
|
- name: host-local-net-dir
|
|
hostPath:
|
|
path: /var/lib/cni/networks
|
|
{% endif %}
|
|
{% if typha_enabled and typha_secure %}
|
|
- name: typha-client
|
|
secret:
|
|
secretName: typha-client
|
|
items:
|
|
- key: tls.crt
|
|
path: typha-client.crt
|
|
- key: tls.key
|
|
path: typha-client.key
|
|
- name: typha-cacert
|
|
hostPath:
|
|
path: "/etc/kubernetes/ssl/"
|
|
{% endif %}
|
|
- name: sys-fs
|
|
hostPath:
|
|
path: /sys/fs/
|
|
type: DirectoryOrCreate
|
|
- name: bpffs
|
|
hostPath:
|
|
path: /sys/fs/bpf
|
|
type: Directory
|
|
# mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs.
|
|
- name: nodeproc
|
|
hostPath:
|
|
path: /proc
|
|
# Used to access CNI logs.
|
|
- name: cni-log-dir
|
|
hostPath:
|
|
path: /var/log/calico/cni
|
|
# Used to create per-pod Unix Domain Sockets
|
|
- name: policysync
|
|
hostPath:
|
|
type: DirectoryOrCreate
|
|
path: /var/run/nodeagent
|