update cilium 1.11.5

pull/1166/head
jin.gjm 2022-06-13 19:29:30 +08:00
parent 42952eb72e
commit 5f86b82546
6 changed files with 81 additions and 1003 deletions

View File

@ -131,9 +131,6 @@ calico_ver: "__calico__"
calico_ver_main: "{{ calico_ver.split('.')[0] }}.{{ calico_ver.split('.')[1] }}"
# ------------------------------------------- cilium
# [cilium]CILIUM_ETCD_OPERATOR 创建的 etcd 集群节点数 1,3,5,7...
ETCD_CLUSTER_SIZE: 1
# [cilium]镜像版本
cilium_ver: "__cilium__"

2
ezdown
View File

@ -31,7 +31,7 @@ dashboardMetricsScraperVer=v1.0.8
metricsVer=v0.5.2
pauseVer=3.6
nfsProvisionerVer=v4.0.2
export ciliumVer=v1.4.1
ciliumVer=1.11.5
export kubeRouterVer=v0.3.1
export kubeOvnVer=v1.5.3
export promChartVer=35.5.1

Binary file not shown.

View File

@ -6,30 +6,16 @@
fail: msg="kernel {{ ansible_kernel }} is too old for cilium installing"
when: "KERNEL_VER|float <= 4.09"
- name: node 节点创建cilium 相关目录
file: name={{ item }} state=directory
with_items:
- /etc/cni/net.d
- /var/run/cilium
- name: Optional-Mount BPF FS
mount:
fstype: "bpf"
src: "bpffs"
path: "/sys/fs/bpf"
state: "mounted"
- block:
- name: 创建 cilium chart 个性化设置
template: src=values.yaml.j2 dest={{ cluster_dir }}/yml/cilium-values.yaml
- name: 配置 cilium DaemonSet yaml文件
template: src=cilium.yaml.j2 dest={{ cluster_dir }}/yml/cilium.yaml
tags: reconf
- name: helm 创建 cilium {{ cilium_ver }}
shell: "{{ base_dir }}/bin/helm upgrade cilium --install \
-n kube-system -f {{ cluster_dir }}/yml/cilium-values.yaml \
{{ base_dir }}/roles/cilium/files/cilium-{{ cilium_ver }}.tgz"
run_once: true
connection: local
# 只需单节点执行一次
- name: 运行 cilium网络
shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/cilium.yaml"
run_once: true
connection: local
connection: local
# 删除原有cni配置
- name: 删除默认cni配置
@ -37,7 +23,7 @@
# 等待网络插件部署成功,视下载镜像速度而定
- name: 轮询等待cilium-node 运行,视下载镜像速度而定
shell: "{{ bin_dir }}/kubectl get pod -n kube-system -o wide|grep 'cilium'|grep ' {{ inventory_hostname }} '|awk '{print $3}'"
shell: "{{ bin_dir }}/kubectl get pod -n kube-system -owide -lk8s-app=cilium|grep ' {{ inventory_hostname }} '|awk '{print $3}'"
register: pod_status
until: pod_status.stdout == "Running"
retries: 15

View File

@ -1,976 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cilium-config
namespace: kube-system
data:
# This etcd-config contains the etcd endpoints of your cluster. If you use
# TLS please make sure you follow the tutorial in https://cilium.link/etcd-config
etcd-config: |-
---
endpoints:
- https://cilium-etcd-client.kube-system.svc:2379
#
# In case you want to use TLS in etcd, uncomment the 'ca-file' line
# and create a kubernetes secret by following the tutorial in
# https://cilium.link/etcd-config
ca-file: '/var/lib/etcd-secrets/etcd-client-ca.crt'
#
# In case you want client to server authentication, uncomment the following
# lines and create a kubernetes secret by following the tutorial in
# https://cilium.link/etcd-config
key-file: '/var/lib/etcd-secrets/etcd-client.key'
cert-file: '/var/lib/etcd-secrets/etcd-client.crt'
# If you want to run cilium in debug mode change this value to true
debug: "false"
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
# address.
enable-ipv4: "true"
# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
# address.
enable-ipv6: "false"
# If a serious issue occurs during Cilium startup, this
# invasive option may be set to true to remove all persistent
# state. Endpoints will not be restored using knowledge from a
# prior Cilium run, so they may receive new IP addresses upon
# restart. This also triggers clean-cilium-bpf-state.
clean-cilium-state: "false"
# If you want to clean cilium BPF state, set this to true;
# Removes all BPF maps from the filesystem. Upon restart,
# endpoints are restored with the same IP addresses, however
# any ongoing connections may be disrupted briefly.
# Loadbalancing decisions will be reset, so any ongoing
# connections via a service may be loadbalanced to a different
# backend after restart.
clean-cilium-bpf-state: "false"
# In Cilium 1.0, all traffic from the host, including from local processes
# and traffic that is masqueraded from the outside world to the host IP,
# would be classified as from the host entity (reserved:host label).
# Furthermore, to allow Kubernetes agents to perform health checks over IP
# into the endpoints, the host is allowed by default. This means that all
# traffic from the outside world is also allowed by default, regardless of
# security policy.
#
# This option was introduced in Cilium 1.1 to disable this behaviour. It must
# be explicitly set to "false" to take effect on Cilium 1.3 or earlier.
# Cilium 1.4 sets this to "false" by default if it is not specified in the
# ConfigMap.
#
# This option has been deprecated, it will be removed in Cilium 1.5 or later.
#
# For more information, see https://cilium.link/host-vs-world
#legacy-host-allows-world: "false"
# If you want cilium monitor to aggregate tracing for packets, set this level
# to "low", "medium", or "maximum". The higher the level, the less packets
# that will be seen in monitor output.
monitor-aggregation-level: "none"
# ct-global-max-entries-* specifies the maximum number of connections
# supported across all endpoints, split by protocol: tcp or other. One pair
# of maps uses these values for IPv4 connections, and another pair of maps
# use these values for IPv6 connections.
#
# If these values are modified, then during the next Cilium startup the
# tracking of ongoing connections may be disrupted. This may lead to brief
# policy drops or a change in loadbalancing decisions for a connection.
#
# For users upgrading from Cilium 1.2 or earlier, to minimize disruption
# during the upgrade process, comment out these options.
ct-global-max-entries-tcp: "524288"
ct-global-max-entries-other: "262144"
# Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The
# default value below will minimize memory usage in the default installation;
# users who are sensitive to latency may consider setting this to "true".
#
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
# this option and behave as though it is set to "true".
#
# If this value is modified, then during the next Cilium startup the restore
# of existing endpoints and tracking of ongoing connections may be disrupted.
# This may lead to policy drops or a change in loadbalancing decisions for a
# connection for some time. Endpoints may need to be recreated to restore
# connectivity.
#
# If this option is set to "false" during an upgrade from 1.3 or earlier to
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
preallocate-bpf-maps: "false"
# Regular expression matching compatible Istio sidecar istio-proxy
# container image names
sidecar-istio-proxy-image: "cilium/istio_proxy"
# Encapsulation mode for communication between nodes
# Possible values:
# - disabled
# - vxlan (default)
# - geneve
tunnel: "vxlan"
# Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: default
# Unique ID of the cluster. Must be unique across all conneted clusters and
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
#cluster-id: 1
# Interface to be used when running Cilium on top of a CNI plugin.
# For flannel, use "cni0"
flannel-master-device: ""
# When running Cilium with policy enforcement enabled on top of a CNI plugin
# the BPF programs will be installed on the network interface specified in
# 'flannel-master-device' and on all network interfaces belonging to
# a container. When the Cilium DaemonSet is removed, the BPF programs will
# be kept in the interfaces unless this option is set to "true".
flannel-uninstall-on-exit: "true"
# Installs a BPF program to allow for policy enforcement in already running
# containers managed by Flannel.
# NOTE: This requires Cilium DaemonSet to be running in the hostPID.
# To run in this mode in Kubernetes change the value of the hostPID from
# false to true. Can be found under the path `spec.spec.hostPID`
flannel-manage-existing-containers: "false"
# DNS Polling periodically issues a DNS lookup for each `matchName` from
# cilium-agent. The result is used to regenerate endpoint policy.
# DNS lookups are repeated with an interval of 5 seconds, and are made for
# A(IPv4) and AAAA(IPv6) addresses. Should a lookup fail, the most recent IP
# data is used instead. An IP change will trigger a regeneration of the Cilium
# policy for each endpoint and increment the per cilium-agent policy
# repository revision.
#
# This option is disabled by default starting from version 1.4.x in favor
# of a more powerful DNS proxy-based implementation, see [0] for details.
# Enable this option if you want to use FQDN policies but do not want to use
# the DNS proxy.
#
# To ease upgrade, users may opt to set this option to "true".
# Otherwise please refer to the Upgrade Guide [1] which explains how to
# prepare policy rules for upgrade.
#
# [0] http://docs.cilium.io/en/stable/policy/language/#dns-based
# [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
tofqdns-enable-poller: "false"
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
k8s-app: cilium
kubernetes.io/cluster-service: "true"
name: cilium
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: cilium
kubernetes.io/cluster-service: "true"
template:
metadata:
annotations:
prometheus.io/port: "9090"
prometheus.io/scrape: "true"
# This annotation plus the CriticalAddonsOnly toleration makes
# cilium to be a critical pod in the cluster, which ensures cilium
# gets priority scheduling.
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]'
labels:
k8s-app: cilium
kubernetes.io/cluster-service: "true"
spec:
priorityClassName: system-cluster-critical
containers:
- args:
- --debug=$(CILIUM_DEBUG)
- --kvstore=etcd
- --kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config
command:
- cilium-agent
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
- name: CILIUM_ENABLE_IPV4
valueFrom:
configMapKeyRef:
key: enable-ipv4
name: cilium-config
optional: true
- name: CILIUM_ENABLE_IPV6
valueFrom:
configMapKeyRef:
key: enable-ipv6
name: cilium-config
optional: true
# Note: this variable is a no-op if not defined, and is used in the
# prometheus examples.
- name: CILIUM_PROMETHEUS_SERVE_ADDR
valueFrom:
configMapKeyRef:
key: prometheus-serve-addr
name: cilium-metrics-config
optional: true
- name: CILIUM_LEGACY_HOST_ALLOWS_WORLD
valueFrom:
configMapKeyRef:
key: legacy-host-allows-world
name: cilium-config
optional: true
- name: CILIUM_SIDECAR_ISTIO_PROXY_IMAGE
valueFrom:
configMapKeyRef:
key: sidecar-istio-proxy-image
name: cilium-config
optional: true
- name: CILIUM_TUNNEL
valueFrom:
configMapKeyRef:
key: tunnel
name: cilium-config
optional: true
- name: CILIUM_MONITOR_AGGREGATION_LEVEL
valueFrom:
configMapKeyRef:
key: monitor-aggregation-level
name: cilium-config
optional: true
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: CILIUM_CLUSTER_NAME
valueFrom:
configMapKeyRef:
key: cluster-name
name: cilium-config
optional: true
- name: CILIUM_CLUSTER_ID
valueFrom:
configMapKeyRef:
key: cluster-id
name: cilium-config
optional: true
- name: CILIUM_GLOBAL_CT_MAX_TCP
valueFrom:
configMapKeyRef:
key: ct-global-max-entries-tcp
name: cilium-config
optional: true
- name: CILIUM_GLOBAL_CT_MAX_ANY
valueFrom:
configMapKeyRef:
key: ct-global-max-entries-other
name: cilium-config
optional: true
- name: CILIUM_PREALLOCATE_BPF_MAPS
valueFrom:
configMapKeyRef:
key: preallocate-bpf-maps
name: cilium-config
optional: true
- name: CILIUM_FLANNEL_MASTER_DEVICE
valueFrom:
configMapKeyRef:
key: flannel-master-device
name: cilium-config
optional: true
- name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT
valueFrom:
configMapKeyRef:
key: flannel-uninstall-on-exit
name: cilium-config
optional: true
- name: CILIUM_FLANNEL_MANAGE_EXISTING_CONTAINERS
valueFrom:
configMapKeyRef:
key: flannel-manage-existing-containers
name: cilium-config
optional: true
- name: CILIUM_DATAPATH_MODE
valueFrom:
configMapKeyRef:
key: datapath-mode
name: cilium-config
optional: true
- name: CILIUM_IPVLAN_MASTER_DEVICE
valueFrom:
configMapKeyRef:
key: ipvlan-master-device
name: cilium-config
optional: true
- name: CILIUM_INSTALL_IPTABLES_RULES
valueFrom:
configMapKeyRef:
key: install-iptables-rules
name: cilium-config
optional: true
- name: CILIUM_MASQUERADE
valueFrom:
configMapKeyRef:
key: masquerade
name: cilium-config
optional: true
- name: CILIUM_AUTO_DIRECT_NODE_ROUTES
valueFrom:
configMapKeyRef:
key: auto-direct-node-routes
name: cilium-config
optional: true
- name: CILIUM_TOFQDNS_ENABLE_POLLER
valueFrom:
configMapKeyRef:
key: tofqdns-enable-poller
name: cilium-config
optional: true
- name: CILIUM_TOFQDNS_PRE_CACHE
valueFrom:
configMapKeyRef:
key: tofqdns-pre-cache
name: cilium-config
optional: true
image: docker.io/cilium/cilium:{{ cilium_ver }}
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- /cni-install.sh
preStop:
exec:
command:
- /cni-uninstall.sh
livenessProbe:
exec:
command:
- cilium
- status
failureThreshold: 10
# The initial delay for the liveness probe is intentionally large to
# avoid an endless kill & restart cycle if in the event that the initial
# bootstrapping takes longer than expected.
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: cilium-agent
ports:
- containerPort: 9090
hostPort: 9090
name: prometheus
protocol: TCP
readinessProbe:
exec:
command:
- cilium
- status
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 1
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /host/opt/cni/bin
name: cni-path
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
- mountPath: /var/run/docker.sock
name: docker-socket
readOnly: true
- mountPath: /var/lib/etcd-config
name: etcd-config-path
readOnly: true
- mountPath: /var/lib/etcd-secrets
name: etcd-secrets
readOnly: true
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
hostPID: false
initContainers:
- command:
- /init-container.sh
env:
- name: CLEAN_CILIUM_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-state
name: cilium-config
optional: true
- name: CLEAN_CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
image: docker.io/cilium/cilium-init:2018-10-16
imagePullPolicy: IfNotPresent
name: clean-cilium-state
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
restartPolicy: Always
serviceAccount: cilium
serviceAccountName: cilium
terminationGracePeriodSeconds: 1
tolerations:
- operator: Exists
volumes:
# To keep state between restarts / upgrades
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
name: cilium-run
# To keep state between restarts / upgrades for bpf maps
- hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
# To read docker events from the node
- hostPath:
path: /var/run/docker.sock
type: Socket
name: docker-socket
# To install cilium cni plugin in the host
- hostPath:
path: {{ bin_dir }}
type: DirectoryOrCreate
name: cni-path
# To install cilium cni configuration in the host
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
name: etc-cni-netd
# To read the etcd config stored in config maps
- configMap:
defaultMode: 420
items:
- key: etcd-config
path: etcd.config
name: cilium-config
name: etcd-config-path
# To read the k8s etcd secrets in case the user might want to use TLS
- name: etcd-secrets
secret:
defaultMode: 420
optional: true
secretName: cilium-etcd-secrets
# To read the clustermesh configuration
- name: clustermesh-secrets
secret:
defaultMode: 420
optional: true
secretName: cilium-clustermesh
updateStrategy:
rollingUpdate:
# Specifies the maximum number of Pods that can be unavailable during the update process.
maxUnavailable: 2
type: RollingUpdate
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.cilium/app: operator
name: cilium-operator
name: cilium-operator
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
io.cilium/app: operator
name: cilium-operator
spec:
containers:
- args:
- --debug=$(CILIUM_DEBUG)
- --kvstore=etcd
- --kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config
command:
- cilium-operator
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
- name: CILIUM_CLUSTER_NAME
valueFrom:
configMapKeyRef:
key: cluster-name
name: cilium-config
optional: true
- name: CILIUM_CLUSTER_ID
valueFrom:
configMapKeyRef:
key: cluster-id
name: cilium-config
optional: true
- name: CILIUM_DISABLE_ENDPOINT_CRD
valueFrom:
configMapKeyRef:
key: disable-endpoint-crd
name: cilium-config
optional: true
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: cilium-aws
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: cilium-aws
optional: true
- name: AWS_DEFAULT_REGION
valueFrom:
secretKeyRef:
key: AWS_DEFAULT_REGION
name: cilium-aws
optional: true
image: docker.io/cilium/operator:{{ cilium_ver }}
imagePullPolicy: IfNotPresent
name: cilium-operator
volumeMounts:
- mountPath: /var/lib/etcd-config
name: etcd-config-path
readOnly: true
- mountPath: /var/lib/etcd-secrets
name: etcd-secrets
readOnly: true
dnsPolicy: ClusterFirst
restartPolicy: Always
serviceAccount: cilium-operator
serviceAccountName: cilium-operator
volumes:
# To read the etcd config stored in config maps
- configMap:
defaultMode: 420
items:
- key: etcd-config
path: etcd.config
name: cilium-config
name: etcd-config-path
# To read the k8s etcd secrets in case the user might want to use TLS
- name: etcd-secrets
secret:
defaultMode: 420
optional: true
secretName: cilium-etcd-secrets
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium-operator
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: cilium-operator
rules:
- apiGroups:
- ""
resources:
- pods
- deployments
- componentstatuses
verbs:
- '*'
- apiGroups:
- ""
resources:
- services
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumendpoints
- ciliumendpoints/status
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: cilium-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium-operator
subjects:
- kind: ServiceAccount
name: cilium-operator
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: cilium-etcd-operator
rules:
- apiGroups:
- etcd.database.coreos.com
resources:
- etcdclusters
verbs:
- get
- delete
- create
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- delete
- get
- create
- apiGroups:
- ""
resources:
- deployments
verbs:
- delete
- create
- get
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- delete
- get
- apiGroups:
- apps
resources:
- deployments
verbs:
- delete
- create
- get
- update
- apiGroups:
- ""
resources:
- componentstatuses
verbs:
- get
- apiGroups:
- extensions
resources:
- deployments
verbs:
- delete
- create
- get
- update
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
- delete
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: cilium-etcd-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium-etcd-operator
subjects:
- kind: ServiceAccount
name: cilium-etcd-operator
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: etcd-operator
rules:
- apiGroups:
- etcd.database.coreos.com
resources:
- etcdclusters
- etcdbackups
- etcdrestores
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- '*'
- apiGroups:
- ""
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
- events
- deployments
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
- apiGroups:
- extensions
resources:
- deployments
verbs:
- create
- get
- list
- patch
- update
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: etcd-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: etcd-operator
subjects:
- kind: ServiceAccount
name: cilium-etcd-sa
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium-etcd-operator
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium-etcd-sa
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.cilium/app: etcd-operator
name: cilium-etcd-operator
name: cilium-etcd-operator
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
io.cilium/app: etcd-operator
name: cilium-etcd-operator
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
io.cilium/app: etcd-operator
name: cilium-etcd-operator
spec:
containers:
- command:
- /usr/bin/cilium-etcd-operator
env:
- name: CILIUM_ETCD_OPERATOR_CLUSTER_DOMAIN
value: cluster.local
- name: CILIUM_ETCD_OPERATOR_ETCD_CLUSTER_SIZE
value: "{{ ETCD_CLUSTER_SIZE }}"
- name: CILIUM_ETCD_OPERATOR_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_ETCD_OPERATOR_POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: CILIUM_ETCD_OPERATOR_POD_UID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.uid
image: docker.io/cilium/cilium-etcd-operator:v2.0.5
imagePullPolicy: IfNotPresent
name: cilium-etcd-operator
dnsPolicy: ClusterFirst
hostNetwork: true
restartPolicy: Always
serviceAccount: cilium-etcd-operator
serviceAccountName: cilium-etcd-operator
tolerations:
- operator: Exists
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium
subjects:
- kind: ServiceAccount
name: cilium
namespace: kube-system
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: cilium
rules:
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
- services
- nodes
- endpoints
- componentstatuses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
- nodes
verbs:
- get
- list
- watch
- update
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- create
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- get
- list
- watch
- update
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumendpoints
- ciliumendpoints/status
verbs:
- '*'
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium
namespace: kube-system

View File

@ -0,0 +1,71 @@
image:
repository: quay.io/cilium/cilium
useDigest: false
resources:
limits:
cpu: 4000m
memory: 4Gi
requests:
cpu: 100m
memory: 512Mi
cni:
binPath: {{ bin_dir }}
containerRuntime:
integration: containerd
socketPath: unix:///run/containerd/containerd.sock
hubble:
enabled: false
identityAllocationMode: "crd"
ipam:
# ref: https://docs.cilium.io/en/stable/concepts/networking/ipam/
mode: "cluster-pool"
operator:
clusterPoolIPv4PodCIDRList: ["{{ CLUSTER_CIDR }}"]
clusterPoolIPv4MaskSize: 24
l7Proxy: true
# -- Configure Istio proxy options.
proxy:
prometheus:
enabled: false
port: "9095"
# -- Regular expression matching compatible Istio sidecar istio-proxy
# container image names
sidecarImageRegex: "cilium/istio_proxy"
# -- Configure TLS configuration in the agent.
tls:
enabled: true
secretsBackend: local
tunnel: "vxlan"
etcd:
enabled: false
operator:
enabled: true
image:
repository: quay.io/cilium/operator
useDigest: false
replicas: 1
resources:
limits:
cpu: 1000m
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
preflight:
enabled: false
clustermesh:
useAPIServer: false