apiVersion: v1 kind: ConfigMap metadata: name: cilium-config namespace: kube-system data: # This etcd-config contains the etcd endpoints of your cluster. If you use # TLS please make sure you follow the tutorial in https://cilium.link/etcd-config etcd-config: |- --- endpoints: {% for host in groups['etcd'] %} - https://{{ host }}:2379 {% endfor %} # # In case you want to use TLS in etcd, uncomment the 'ca-file' line # and create a kubernetes secret by following the tutorial in # https://cilium.link/etcd-config ca-file: '/var/lib/etcd-secrets/etcd-ca' # # In case you want client to server authentication, uncomment the following # lines and create a kubernetes secret by following the tutorial in # https://cilium.link/etcd-config key-file: '/var/lib/etcd-secrets/etcd-client-key' cert-file: '/var/lib/etcd-secrets/etcd-client-crt' # If you want to run cilium in debug mode change this value to true debug: "{{ Debug_Mode }}" disable-ipv4: "false" # If you want to clean cilium state; change this value to true clean-cilium-state: "{{ Clean_Start }}" legacy-host-allows-world: "{{ Legacy_Host_Policy }}" # Regular expression matching compatible Istio sidecar istio-proxy # container image names sidecar-istio-proxy-image: "cilium/istio_proxy" --- kind: DaemonSet apiVersion: apps/v1beta2 metadata: name: cilium namespace: kube-system spec: updateStrategy: type: "RollingUpdate" rollingUpdate: # Specifies the maximum number of Pods that can be unavailable during the update process. maxUnavailable: 2 selector: matchLabels: k8s-app: cilium kubernetes.io/cluster-service: "true" template: metadata: labels: k8s-app: cilium kubernetes.io/cluster-service: "true" annotations: # This annotation plus the CriticalAddonsOnly toleration makes # cilium to be a critical pod in the cluster, which ensures cilium # gets priority scheduling. # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/tolerations: >- [{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}] prometheus.io/scrape: "true" prometheus.io/port: "9090" spec: serviceAccountName: cilium initContainers: - name: clean-cilium-state image: library/busybox:{{ busybox_ver }} imagePullPolicy: IfNotPresent command: ['sh', '-c', 'if [ "${CLEAN_CILIUM_STATE}" = "true" ]; then rm -rf /var/run/cilium/state; rm -rf /sys/fs/bpf/tc/globals/cilium_*; fi'] volumeMounts: - name: bpf-maps mountPath: /sys/fs/bpf - name: cilium-run mountPath: /var/run/cilium env: - name: "CLEAN_CILIUM_STATE" valueFrom: configMapKeyRef: name: cilium-config optional: true key: clean-cilium-state containers: - image: cilium/cilium:{{ cilium_ver }} imagePullPolicy: IfNotPresent name: cilium-agent command: [ "cilium-agent" ] args: - "--debug=$(CILIUM_DEBUG)" - "-t=vxlan" - "--kvstore=etcd" - "--kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config" - "--disable-ipv4=$(DISABLE_IPV4)" ports: - name: prometheus containerPort: 9090 lifecycle: postStart: exec: command: - "/cni-install.sh" preStop: exec: command: - "/cni-uninstall.sh" env: - name: "K8S_NODE_NAME" valueFrom: fieldRef: fieldPath: spec.nodeName - name: "CILIUM_DEBUG" valueFrom: configMapKeyRef: name: cilium-config key: debug - name: "DISABLE_IPV4" valueFrom: configMapKeyRef: name: cilium-config key: disable-ipv4 # Note: this variable is a no-op if not defined, and is used in the # prometheus examples. - name: "CILIUM_PROMETHEUS_SERVE_ADDR" valueFrom: configMapKeyRef: name: cilium-metrics-config optional: true key: prometheus-serve-addr - name: "CILIUM_LEGACY_HOST_ALLOWS_WORLD" valueFrom: configMapKeyRef: name: cilium-config optional: true key: legacy-host-allows-world - name: "CILIUM_SIDECAR_ISTIO_PROXY_IMAGE" valueFrom: configMapKeyRef: name: cilium-config key: sidecar-istio-proxy-image optional: true {% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %} # if hosts have multiple net interfaces, set following two ENVs - name: KUBERNETES_SERVICE_HOST value: "{{ MASTER_IP }}" #value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}" - name: KUBERNETES_SERVICE_PORT value: "{{ KUBE_APISERVER.split(':')[2] }}" {% endif %} livenessProbe: exec: command: - cilium - status # The initial delay for the liveness probe is intentionally large to # avoid an endless kill & restart cycle if in the event that the initial # bootstrapping takes longer than expected. initialDelaySeconds: 120 failureThreshold: 10 periodSeconds: 10 readinessProbe: exec: command: - cilium - status initialDelaySeconds: 5 periodSeconds: 5 volumeMounts: - name: bpf-maps mountPath: /sys/fs/bpf - name: cilium-run mountPath: /var/run/cilium - name: cni-path mountPath: /host/opt/cni/bin - name: etc-cni-netd mountPath: /host/etc/cni/net.d - name: docker-socket mountPath: /var/run/docker.sock readOnly: true - name: etcd-config-path mountPath: /var/lib/etcd-config readOnly: true - name: etcd-secrets mountPath: /var/lib/etcd-secrets readOnly: true securityContext: capabilities: add: - "NET_ADMIN" privileged: true hostNetwork: true volumes: # To keep state between restarts / upgrades - name: cilium-run hostPath: path: /var/run/cilium # To keep state between restarts / upgrades - name: bpf-maps hostPath: path: /sys/fs/bpf # To read docker events from the node - name: docker-socket hostPath: path: /var/run/docker.sock # To install cilium cni plugin in the host - name: cni-path hostPath: path: {{ bin_dir }} # To install cilium cni configuration in the host - name: etc-cni-netd hostPath: path: /etc/cni/net.d # To read the etcd config stored in config maps - name: etcd-config-path configMap: name: cilium-config items: - key: etcd-config path: etcd.config # To read the k8s etcd secrets in case the user might want to use TLS - name: etcd-secrets secret: secretName: cilium-etcd-secrets optional: true restartPolicy: Always tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master - effect: NoSchedule key: node.cloudprovider.kubernetes.io/uninitialized value: "true" # Mark cilium's pod as critical for rescheduling - key: CriticalAddonsOnly operator: "Exists" --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: cilium roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cilium subjects: - kind: ServiceAccount name: cilium namespace: kube-system - kind: Group name: system:nodes --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: cilium rules: - apiGroups: - "networking.k8s.io" resources: - networkpolicies verbs: - get - list - watch - apiGroups: - "" resources: - namespaces - services - nodes - endpoints - componentstatuses verbs: - get - list - watch - apiGroups: - "" resources: - pods - nodes verbs: - get - list - watch - update - apiGroups: - extensions resources: - networkpolicies #FIXME remove this when we drop support for k8s NP-beta GH-1202 - thirdpartyresources - ingresses verbs: - create - get - list - watch - apiGroups: - "apiextensions.k8s.io" resources: - customresourcedefinitions verbs: - create - get - list - watch - update - apiGroups: - cilium.io resources: - ciliumnetworkpolicies - ciliumendpoints verbs: - "*" --- kind: ServiceAccount apiVersion: v1 metadata: name: cilium namespace: kube-system ---