From 399de72fe49a386c89613f49d71d662afb814f1f Mon Sep 17 00:00:00 2001 From: gjmzj Date: Sun, 3 Mar 2019 10:01:22 +0800 Subject: [PATCH] update cilium v1.4.1 --- roles/cilium/cilium.yml | 5 + roles/cilium/defaults/main.yml | 18 +- roles/cilium/tasks/main.yml | 47 +- roles/cilium/templates/cilium.yaml.j2 | 1011 ++++++++++++++++++++----- roles/prepare/tasks/main.yml | 4 +- 5 files changed, 864 insertions(+), 221 deletions(-) create mode 100644 roles/cilium/cilium.yml diff --git a/roles/cilium/cilium.yml b/roles/cilium/cilium.yml new file mode 100644 index 0000000..5e84dbb --- /dev/null +++ b/roles/cilium/cilium.yml @@ -0,0 +1,5 @@ +- hosts: + - kube-master + - kube-node + roles: + - cilium diff --git a/roles/cilium/defaults/main.yml b/roles/cilium/defaults/main.yml index 1595018..4f38ba6 100644 --- a/roles/cilium/defaults/main.yml +++ b/roles/cilium/defaults/main.yml @@ -1,22 +1,14 @@ # 部分cilium相关配置, Note: cilium 需要Linux kernel >= 4.9.17 # 如果 node 节点有多块网卡,请设置 true -# 另外发现设置为 true 时能够解决v1.10使用ipvs偶尔出现pod内‘dial tcp 10.68.0.1:443: i/o timeout’的 bug +# 如果发现‘dial tcp 10.68.0.1:443: i/o timeout’的错误,请设置 true NODE_WITH_MULTIPLE_NETWORKS: "true" -# debug mode -Debug_Mode: "false" - -# Removes any Cilium state, e.g. BPF policy maps, before starting -Clean_Start: "false" - -# If true, the policy with the entity 'reserved:host' allows traffic from 'world' -Legacy_Host_Policy: "true" - # 镜像版本 -busybox_ver: "1.28.4" -cilium_ver: "v1.1.4" +cilium_ver: "v1.4.1" # 离线镜像tar包 cilium_offline: "cilium_{{ cilium_ver }}.tar" -busybox_offline: "busybox_{{ busybox_ver }}.tar" + +# CILIUM_ETCD_OPERATOR 创建的 etcd 集群节点数 1,3,5,7... +ETCD_CLUSTER_SIZE: 1 diff --git a/roles/cilium/tasks/main.yml b/roles/cilium/tasks/main.yml index efedb27..ff2b56f 100644 --- a/roles/cilium/tasks/main.yml +++ b/roles/cilium/tasks/main.yml @@ -1,38 +1,18 @@ -- block: - - name: 在deploy 节点创建cilium 相关目录 - file: name={{ item }} state=directory - with_items: - - /etc/cilium/ssl - - /opt/kube/kube-system/cilium - - - name: 创建cilium 证书请求 - template: src=cilium-csr.json.j2 dest=/etc/cilium/ssl/cilium-csr.json - - - name: 创建 cilium证书和私钥 - shell: "cd /etc/cilium/ssl && {{ bin_dir }}/cfssl gencert \ - -ca={{ ca_dir }}/ca.pem \ - -ca-key={{ ca_dir }}/ca-key.pem \ - -config={{ ca_dir }}/ca-config.json \ - -profile=kubernetes cilium-csr.json | {{ bin_dir }}/cfssljson -bare cilium" - - - name: get cilium-etcd-secrets info - shell: "{{ bin_dir }}/kubectl get secrets -n kube-system" - register: secrets_info - - - name: 创建 cilium-etcd-secrets - shell: "cd /etc/cilium/ssl && \ - {{ bin_dir }}/kubectl create secret generic -n kube-system cilium-etcd-secrets \ - --from-file=etcd-ca={{ ca_dir }}/ca.pem \ - --from-file=etcd-client-key=cilium-key.pem \ - --from-file=etcd-client-crt=cilium.pem" - when: '"cilium-etcd-secrets" not in secrets_info.stdout' - - - name: 配置 cilium DaemonSet yaml文件 - template: src=cilium.yaml.j2 dest=/opt/kube/kube-system/cilium/cilium.yaml - tags: reconf +- name: 在deploy 节点创建cilium 相关目录 + file: name=/opt/kube/kube-system/cilium state=directory delegate_to: "{{ groups.deploy[0] }}" run_once: true +- name: 配置 cilium DaemonSet yaml文件 + template: src=cilium.yaml.j2 dest=/opt/kube/kube-system/cilium/cilium.yaml + tags: reconf + delegate_to: "{{ groups.deploy[0] }}" + run_once: true + +- name: 检查内核版本>4.9 + fail: msg="kernel {{ ansible_kernel }} is too old for cilium installing" + when: "ansible_kernel.split('-')[0].split('.')[0]|int < 4 or ansible_kernel.split('-')[0].split('.')[1]|int < 10" + - name: 检查是否已下载离线cilium镜像 command: "ls {{ base_dir }}/down" register: download_info @@ -42,7 +22,6 @@ - name: node 节点创建cilium 相关目录 file: name={{ item }} state=directory with_items: - - /etc/cilium/ssl - /etc/cni/net.d - /var/run/cilium - /opt/kube/images @@ -61,7 +40,6 @@ with_items: - "pause_3.1.tar" - "{{ cilium_offline }}" - - "{{ busybox_offline }}" ignore_errors: true - name: 获取cilium离线镜像推送情况 @@ -75,7 +53,6 @@ with_items: - "pause_3.1.tar" - "{{ cilium_offline }}" - - "{{ busybox_offline }}" ignore_errors: true # 只需单节点执行一次 diff --git a/roles/cilium/templates/cilium.yaml.j2 b/roles/cilium/templates/cilium.yaml.j2 index cb9d81f..30cbd82 100644 --- a/roles/cilium/templates/cilium.yaml.j2 +++ b/roles/cilium/templates/cilium.yaml.j2 @@ -1,3 +1,4 @@ +--- apiVersion: v1 kind: ConfigMap metadata: @@ -9,235 +10,903 @@ data: etcd-config: |- --- endpoints: -{% for host in groups['etcd'] %} - - https://{{ host }}:2379 -{% endfor %} + - https://cilium-etcd-client.kube-system.svc:2379 # # In case you want to use TLS in etcd, uncomment the 'ca-file' line # and create a kubernetes secret by following the tutorial in # https://cilium.link/etcd-config - ca-file: '/var/lib/etcd-secrets/etcd-ca' + ca-file: '/var/lib/etcd-secrets/etcd-client-ca.crt' # # In case you want client to server authentication, uncomment the following # lines and create a kubernetes secret by following the tutorial in # https://cilium.link/etcd-config - key-file: '/var/lib/etcd-secrets/etcd-client-key' - cert-file: '/var/lib/etcd-secrets/etcd-client-crt' + key-file: '/var/lib/etcd-secrets/etcd-client.key' + cert-file: '/var/lib/etcd-secrets/etcd-client.crt' # If you want to run cilium in debug mode change this value to true - debug: "{{ Debug_Mode }}" - disable-ipv4: "false" - # If you want to clean cilium state; change this value to true - clean-cilium-state: "{{ Clean_Start }}" - legacy-host-allows-world: "{{ Legacy_Host_Policy }}" + debug: "false" + + # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 + # address. + enable-ipv4: "true" + + # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 + # address. + enable-ipv6: "false" + + # If a serious issue occurs during Cilium startup, this + # invasive option may be set to true to remove all persistent + # state. Endpoints will not be restored using knowledge from a + # prior Cilium run, so they may receive new IP addresses upon + # restart. This also triggers clean-cilium-bpf-state. + clean-cilium-state: "false" + # If you want to clean cilium BPF state, set this to true; + # Removes all BPF maps from the filesystem. Upon restart, + # endpoints are restored with the same IP addresses, however + # any ongoing connections may be disrupted briefly. + # Loadbalancing decisions will be reset, so any ongoing + # connections via a service may be loadbalanced to a different + # backend after restart. + clean-cilium-bpf-state: "false" + + # In Cilium 1.0, all traffic from the host, including from local processes + # and traffic that is masqueraded from the outside world to the host IP, + # would be classified as from the host entity (reserved:host label). + # Furthermore, to allow Kubernetes agents to perform health checks over IP + # into the endpoints, the host is allowed by default. This means that all + # traffic from the outside world is also allowed by default, regardless of + # security policy. + # + # This option was introduced in Cilium 1.1 to disable this behaviour. It must + # be explicitly set to "false" to take effect on Cilium 1.3 or earlier. + # Cilium 1.4 sets this to "false" by default if it is not specified in the + # ConfigMap. + # + # This option has been deprecated, it will be removed in Cilium 1.5 or later. + # + # For more information, see https://cilium.link/host-vs-world + #legacy-host-allows-world: "false" + + # If you want cilium monitor to aggregate tracing for packets, set this level + # to "low", "medium", or "maximum". The higher the level, the less packets + # that will be seen in monitor output. + monitor-aggregation-level: "none" + + # ct-global-max-entries-* specifies the maximum number of connections + # supported across all endpoints, split by protocol: tcp or other. One pair + # of maps uses these values for IPv4 connections, and another pair of maps + # use these values for IPv6 connections. + # + # If these values are modified, then during the next Cilium startup the + # tracking of ongoing connections may be disrupted. This may lead to brief + # policy drops or a change in loadbalancing decisions for a connection. + # + # For users upgrading from Cilium 1.2 or earlier, to minimize disruption + # during the upgrade process, comment out these options. + ct-global-max-entries-tcp: "524288" + ct-global-max-entries-other: "262144" + + # Pre-allocation of map entries allows per-packet latency to be reduced, at + # the expense of up-front memory allocation for the entries in the maps. The + # default value below will minimize memory usage in the default installation; + # users who are sensitive to latency may consider setting this to "true". + # + # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore + # this option and behave as though it is set to "true". + # + # If this value is modified, then during the next Cilium startup the restore + # of existing endpoints and tracking of ongoing connections may be disrupted. + # This may lead to policy drops or a change in loadbalancing decisions for a + # connection for some time. Endpoints may need to be recreated to restore + # connectivity. + # + # If this option is set to "false" during an upgrade from 1.3 or earlier to + # 1.4 or later, then it may cause one-time disruptions during the upgrade. + preallocate-bpf-maps: "false" # Regular expression matching compatible Istio sidecar istio-proxy # container image names sidecar-istio-proxy-image: "cilium/istio_proxy" + + # Encapsulation mode for communication between nodes + # Possible values: + # - disabled + # - vxlan (default) + # - geneve + tunnel: "vxlan" + + # Name of the cluster. Only relevant when building a mesh of clusters. + cluster-name: default + + # Unique ID of the cluster. Must be unique across all conneted clusters and + # in the range of 1 and 255. Only relevant when building a mesh of clusters. + #cluster-id: 1 + + # Interface to be used when running Cilium on top of a CNI plugin. + # For flannel, use "cni0" + flannel-master-device: "" + # When running Cilium with policy enforcement enabled on top of a CNI plugin + # the BPF programs will be installed on the network interface specified in + # 'flannel-master-device' and on all network interfaces belonging to + # a container. When the Cilium DaemonSet is removed, the BPF programs will + # be kept in the interfaces unless this option is set to "true". + flannel-uninstall-on-exit: "true" + # Installs a BPF program to allow for policy enforcement in already running + # containers managed by Flannel. + # NOTE: This requires Cilium DaemonSet to be running in the hostPID. + # To run in this mode in Kubernetes change the value of the hostPID from + # false to true. Can be found under the path `spec.spec.hostPID` + flannel-manage-existing-containers: "false" + + # DNS Polling periodically issues a DNS lookup for each `matchName` from + # cilium-agent. The result is used to regenerate endpoint policy. + # DNS lookups are repeated with an interval of 5 seconds, and are made for + # A(IPv4) and AAAA(IPv6) addresses. Should a lookup fail, the most recent IP + # data is used instead. An IP change will trigger a regeneration of the Cilium + # policy for each endpoint and increment the per cilium-agent policy + # repository revision. + # + # This option is disabled by default starting from version 1.4.x in favor + # of a more powerful DNS proxy-based implementation, see [0] for details. + # Enable this option if you want to use FQDN policies but do not want to use + # the DNS proxy. + # + # To ease upgrade, users may opt to set this option to "true". + # Otherwise please refer to the Upgrade Guide [1] which explains how to + # prepare policy rules for upgrade. + # + # [0] http://docs.cilium.io/en/stable/policy/language/#dns-based + # [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action + tofqdns-enable-poller: "false" --- -kind: DaemonSet apiVersion: apps/v1beta2 +kind: DaemonSet metadata: + labels: + k8s-app: cilium + kubernetes.io/cluster-service: "true" name: cilium namespace: kube-system spec: - updateStrategy: - type: "RollingUpdate" - rollingUpdate: - # Specifies the maximum number of Pods that can be unavailable during the update process. - maxUnavailable: 2 selector: matchLabels: k8s-app: cilium kubernetes.io/cluster-service: "true" template: metadata: - labels: - k8s-app: cilium - kubernetes.io/cluster-service: "true" annotations: + prometheus.io/port: "9090" + prometheus.io/scrape: "true" # This annotation plus the CriticalAddonsOnly toleration makes # cilium to be a critical pod in the cluster, which ensures cilium # gets priority scheduling. # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: >- - [{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}] - prometheus.io/scrape: "true" - prometheus.io/port: "9090" + scheduler.alpha.kubernetes.io/critical-pod: "" + scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]' + labels: + k8s-app: cilium + kubernetes.io/cluster-service: "true" spec: - serviceAccountName: cilium - initContainers: - - name: clean-cilium-state - image: library/busybox:{{ busybox_ver }} - imagePullPolicy: IfNotPresent - command: ['sh', '-c', 'if [ "${CLEAN_CILIUM_STATE}" = "true" ]; then rm -rf /var/run/cilium/state; rm -rf /sys/fs/bpf/tc/globals/cilium_*; fi'] - volumeMounts: - - name: bpf-maps - mountPath: /sys/fs/bpf - - name: cilium-run - mountPath: /var/run/cilium - env: - - name: "CLEAN_CILIUM_STATE" - valueFrom: - configMapKeyRef: - name: cilium-config - optional: true - key: clean-cilium-state containers: - - image: cilium/cilium:{{ cilium_ver }} + - args: + - --debug=$(CILIUM_DEBUG) + - --kvstore=etcd + - --kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config + command: + - cilium-agent + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + - name: CILIUM_ENABLE_IPV4 + valueFrom: + configMapKeyRef: + key: enable-ipv4 + name: cilium-config + optional: true + - name: CILIUM_ENABLE_IPV6 + valueFrom: + configMapKeyRef: + key: enable-ipv6 + name: cilium-config + optional: true + # Note: this variable is a no-op if not defined, and is used in the + # prometheus examples. + - name: CILIUM_PROMETHEUS_SERVE_ADDR + valueFrom: + configMapKeyRef: + key: prometheus-serve-addr + name: cilium-metrics-config + optional: true + - name: CILIUM_LEGACY_HOST_ALLOWS_WORLD + valueFrom: + configMapKeyRef: + key: legacy-host-allows-world + name: cilium-config + optional: true + - name: CILIUM_SIDECAR_ISTIO_PROXY_IMAGE + valueFrom: + configMapKeyRef: + key: sidecar-istio-proxy-image + name: cilium-config + optional: true + - name: CILIUM_TUNNEL + valueFrom: + configMapKeyRef: + key: tunnel + name: cilium-config + optional: true + - name: CILIUM_MONITOR_AGGREGATION_LEVEL + valueFrom: + configMapKeyRef: + key: monitor-aggregation-level + name: cilium-config + optional: true + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + - name: CILIUM_CLUSTER_NAME + valueFrom: + configMapKeyRef: + key: cluster-name + name: cilium-config + optional: true + - name: CILIUM_CLUSTER_ID + valueFrom: + configMapKeyRef: + key: cluster-id + name: cilium-config + optional: true + - name: CILIUM_GLOBAL_CT_MAX_TCP + valueFrom: + configMapKeyRef: + key: ct-global-max-entries-tcp + name: cilium-config + optional: true + - name: CILIUM_GLOBAL_CT_MAX_ANY + valueFrom: + configMapKeyRef: + key: ct-global-max-entries-other + name: cilium-config + optional: true + - name: CILIUM_PREALLOCATE_BPF_MAPS + valueFrom: + configMapKeyRef: + key: preallocate-bpf-maps + name: cilium-config + optional: true + - name: CILIUM_FLANNEL_MASTER_DEVICE + valueFrom: + configMapKeyRef: + key: flannel-master-device + name: cilium-config + optional: true + - name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT + valueFrom: + configMapKeyRef: + key: flannel-uninstall-on-exit + name: cilium-config + optional: true + - name: CILIUM_FLANNEL_MANAGE_EXISTING_CONTAINERS + valueFrom: + configMapKeyRef: + key: flannel-manage-existing-containers + name: cilium-config + optional: true + - name: CILIUM_DATAPATH_MODE + valueFrom: + configMapKeyRef: + key: datapath-mode + name: cilium-config + optional: true + - name: CILIUM_IPVLAN_MASTER_DEVICE + valueFrom: + configMapKeyRef: + key: ipvlan-master-device + name: cilium-config + optional: true + - name: CILIUM_INSTALL_IPTABLES_RULES + valueFrom: + configMapKeyRef: + key: install-iptables-rules + name: cilium-config + optional: true + - name: CILIUM_MASQUERADE + valueFrom: + configMapKeyRef: + key: masquerade + name: cilium-config + optional: true + - name: CILIUM_AUTO_DIRECT_NODE_ROUTES + valueFrom: + configMapKeyRef: + key: auto-direct-node-routes + name: cilium-config + optional: true + - name: CILIUM_TOFQDNS_ENABLE_POLLER + valueFrom: + configMapKeyRef: + key: tofqdns-enable-poller + name: cilium-config + optional: true + - name: CILIUM_TOFQDNS_PRE_CACHE + valueFrom: + configMapKeyRef: + key: tofqdns-pre-cache + name: cilium-config + optional: true +{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %} + # if hosts have multiple net interfaces, set following two ENVs + - name: KUBERNETES_SERVICE_HOST + value: "{{ MASTER_IP }}" + #value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}" + - name: KUBERNETES_SERVICE_PORT + value: "{{ KUBE_APISERVER.split(':')[2] }}" +{% endif %} + image: docker.io/cilium/cilium:{{ cilium_ver }} imagePullPolicy: IfNotPresent - name: cilium-agent - command: [ "cilium-agent" ] - args: - - "--debug=$(CILIUM_DEBUG)" - - "-t=vxlan" - - "--kvstore=etcd" - - "--kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config" - - "--disable-ipv4=$(DISABLE_IPV4)" - ports: - - name: prometheus - containerPort: 9090 lifecycle: postStart: exec: command: - - "/cni-install.sh" + - /cni-install.sh preStop: exec: command: - - "/cni-uninstall.sh" - env: - - name: "K8S_NODE_NAME" - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: "CILIUM_DEBUG" - valueFrom: - configMapKeyRef: - name: cilium-config - key: debug - - name: "DISABLE_IPV4" - valueFrom: - configMapKeyRef: - name: cilium-config - key: disable-ipv4 - # Note: this variable is a no-op if not defined, and is used in the - # prometheus examples. - - name: "CILIUM_PROMETHEUS_SERVE_ADDR" - valueFrom: - configMapKeyRef: - name: cilium-metrics-config - optional: true - key: prometheus-serve-addr - - name: "CILIUM_LEGACY_HOST_ALLOWS_WORLD" - valueFrom: - configMapKeyRef: - name: cilium-config - optional: true - key: legacy-host-allows-world - - name: "CILIUM_SIDECAR_ISTIO_PROXY_IMAGE" - valueFrom: - configMapKeyRef: - name: cilium-config - key: sidecar-istio-proxy-image - optional: true -{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %} - # if hosts have multiple net interfaces, set following two ENVs - - name: KUBERNETES_SERVICE_HOST - value: "{{ MASTER_IP }}" - #value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}" - - name: KUBERNETES_SERVICE_PORT - value: "{{ KUBE_APISERVER.split(':')[2] }}" -{% endif %} + - /cni-uninstall.sh livenessProbe: exec: command: - cilium - status + failureThreshold: 10 # The initial delay for the liveness probe is intentionally large to # avoid an endless kill & restart cycle if in the event that the initial # bootstrapping takes longer than expected. initialDelaySeconds: 120 - failureThreshold: 10 periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: cilium-agent + ports: + - containerPort: 9090 + hostPort: 9090 + name: prometheus + protocol: TCP readinessProbe: exec: command: - cilium - status + failureThreshold: 3 initialDelaySeconds: 5 periodSeconds: 5 - volumeMounts: - - name: bpf-maps - mountPath: /sys/fs/bpf - - name: cilium-run - mountPath: /var/run/cilium - - name: cni-path - mountPath: /host/opt/cni/bin - - name: etc-cni-netd - mountPath: /host/etc/cni/net.d - - name: docker-socket - mountPath: /var/run/docker.sock - readOnly: true - - name: etcd-config-path - mountPath: /var/lib/etcd-config - readOnly: true - - name: etcd-secrets - mountPath: /var/lib/etcd-secrets - readOnly: true + successThreshold: 1 + timeoutSeconds: 1 securityContext: capabilities: add: - - "NET_ADMIN" + - NET_ADMIN privileged: true + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/opt/cni/bin + name: cni-path + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd + - mountPath: /var/run/docker.sock + name: docker-socket + readOnly: true + - mountPath: /var/lib/etcd-config + name: etcd-config-path + readOnly: true + - mountPath: /var/lib/etcd-secrets + name: etcd-secrets + readOnly: true + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + dnsPolicy: ClusterFirstWithHostNet hostNetwork: true - volumes: - # To keep state between restarts / upgrades - - name: cilium-run - hostPath: - path: /var/run/cilium - # To keep state between restarts / upgrades - - name: bpf-maps - hostPath: - path: /sys/fs/bpf - # To read docker events from the node - - name: docker-socket - hostPath: - path: /var/run/docker.sock - # To install cilium cni plugin in the host - - name: cni-path - hostPath: - path: {{ bin_dir }} - # To install cilium cni configuration in the host - - name: etc-cni-netd - hostPath: - path: /etc/cni/net.d - # To read the etcd config stored in config maps - - name: etcd-config-path - configMap: - name: cilium-config - items: - - key: etcd-config - path: etcd.config - # To read the k8s etcd secrets in case the user might want to use TLS - - name: etcd-secrets - secret: - secretName: cilium-etcd-secrets - optional: true + hostPID: false + initContainers: + - command: + - /init-container.sh + env: + - name: CLEAN_CILIUM_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CLEAN_CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + image: docker.io/cilium/cilium-init:2018-10-16 + imagePullPolicy: IfNotPresent + name: clean-cilium-state + securityContext: + capabilities: + add: + - NET_ADMIN + privileged: true + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run restartPolicy: Always + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - effect: NoSchedule - key: node.cloudprovider.kubernetes.io/uninitialized - value: "true" - # Mark cilium's pod as critical for rescheduling - - key: CriticalAddonsOnly - operator: "Exists" + - operator: Exists + volumes: + # To keep state between restarts / upgrades + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + # To keep state between restarts / upgrades for bpf maps + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + # To read docker events from the node + - hostPath: + path: /var/run/docker.sock + type: Socket + name: docker-socket + # To install cilium cni plugin in the host + - hostPath: + path: {{ bin_dir }} + type: DirectoryOrCreate + name: cni-path + # To install cilium cni configuration in the host + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + # To read the etcd config stored in config maps + - configMap: + defaultMode: 420 + items: + - key: etcd-config + path: etcd.config + name: cilium-config + name: etcd-config-path + # To read the k8s etcd secrets in case the user might want to use TLS + - name: etcd-secrets + secret: + defaultMode: 420 + optional: true + secretName: cilium-etcd-secrets + # To read the clustermesh configuration + - name: clustermesh-secrets + secret: + defaultMode: 420 + optional: true + secretName: cilium-clustermesh + updateStrategy: + rollingUpdate: + # Specifies the maximum number of Pods that can be unavailable during the update process. + maxUnavailable: 2 + type: RollingUpdate +--- +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + labels: + io.cilium/app: operator + name: cilium-operator + name: cilium-operator + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + io.cilium/app: operator + name: cilium-operator + spec: + containers: + - args: + - --debug=$(CILIUM_DEBUG) + - --kvstore=etcd + - --kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config + command: + - cilium-operator + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + - name: CILIUM_CLUSTER_NAME + valueFrom: + configMapKeyRef: + key: cluster-name + name: cilium-config + optional: true + - name: CILIUM_CLUSTER_ID + valueFrom: + configMapKeyRef: + key: cluster-id + name: cilium-config + optional: true + - name: CILIUM_DISABLE_ENDPOINT_CRD + valueFrom: + configMapKeyRef: + key: disable-endpoint-crd + name: cilium-config + optional: true + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: AWS_ACCESS_KEY_ID + name: cilium-aws + optional: true + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: AWS_SECRET_ACCESS_KEY + name: cilium-aws + optional: true + - name: AWS_DEFAULT_REGION + valueFrom: + secretKeyRef: + key: AWS_DEFAULT_REGION + name: cilium-aws + optional: true + image: docker.io/cilium/operator:{{ cilium_ver }} + imagePullPolicy: IfNotPresent + name: cilium-operator + volumeMounts: + - mountPath: /var/lib/etcd-config + name: etcd-config-path + readOnly: true + - mountPath: /var/lib/etcd-secrets + name: etcd-secrets + readOnly: true + dnsPolicy: ClusterFirst + restartPolicy: Always + serviceAccount: cilium-operator + serviceAccountName: cilium-operator + volumes: + # To read the etcd config stored in config maps + - configMap: + defaultMode: 420 + items: + - key: etcd-config + path: etcd.config + name: cilium-config + name: etcd-config-path + # To read the k8s etcd secrets in case the user might want to use TLS + - name: etcd-secrets + secret: + defaultMode: 420 + optional: true + secretName: cilium-etcd-secrets +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium-operator + namespace: kube-system --- -kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cilium-operator +rules: +- apiGroups: + - "" + resources: + - pods + - deployments + - componentstatuses + verbs: + - '*' +- apiGroups: + - "" + resources: + - services + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumendpoints + - ciliumendpoints/status + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cilium-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: +- kind: ServiceAccount + name: cilium-operator + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cilium-etcd-operator +rules: +- apiGroups: + - etcd.database.coreos.com + resources: + - etcdclusters + verbs: + - get + - delete + - create +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - delete + - get + - create +- apiGroups: + - "" + resources: + - deployments + verbs: + - delete + - create + - get + - update +- apiGroups: + - "" + resources: + - pods + verbs: + - list + - delete + - get +- apiGroups: + - apps + resources: + - deployments + verbs: + - delete + - create + - get + - update +- apiGroups: + - "" + resources: + - componentstatuses + verbs: + - get +- apiGroups: + - extensions + resources: + - deployments + verbs: + - delete + - create + - get + - update +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cilium-etcd-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-etcd-operator +subjects: +- kind: ServiceAccount + name: cilium-etcd-operator + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: etcd-operator +rules: +- apiGroups: + - etcd.database.coreos.com + resources: + - etcdclusters + - etcdbackups + - etcdrestores + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - '*' +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + - deployments + verbs: + - '*' +- apiGroups: + - apps + resources: + - deployments + verbs: + - '*' +- apiGroups: + - extensions + resources: + - deployments + verbs: + - create + - get + - list + - patch + - update +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: etcd-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: etcd-operator +subjects: +- kind: ServiceAccount + name: cilium-etcd-sa + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium-etcd-operator + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium-etcd-sa + namespace: kube-system +--- +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + labels: + io.cilium/app: etcd-operator + name: cilium-etcd-operator + name: cilium-etcd-operator + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + io.cilium/app: etcd-operator + name: cilium-etcd-operator + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + io.cilium/app: etcd-operator + name: cilium-etcd-operator + spec: + containers: + - command: + - /usr/bin/cilium-etcd-operator + env: + - name: CILIUM_ETCD_OPERATOR_CLUSTER_DOMAIN + value: cluster.local + - name: CILIUM_ETCD_OPERATOR_ETCD_CLUSTER_SIZE + value: "{{ ETCD_CLUSTER_SIZE }}" + - name: CILIUM_ETCD_OPERATOR_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_ETCD_OPERATOR_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: CILIUM_ETCD_OPERATOR_POD_UID + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.uid +{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %} + # if hosts have multiple net interfaces, set following two ENVs + - name: KUBERNETES_SERVICE_HOST + value: "{{ MASTER_IP }}" + #value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}" + - name: KUBERNETES_SERVICE_PORT + value: "{{ KUBE_APISERVER.split(':')[2] }}" +{% endif %} + image: docker.io/cilium/cilium-etcd-operator:v2.0.5 + imagePullPolicy: IfNotPresent + name: cilium-etcd-operator + dnsPolicy: ClusterFirst + hostNetwork: true + restartPolicy: Always + serviceAccount: cilium-etcd-operator + serviceAccountName: cilium-etcd-operator + tolerations: + - operator: Exists +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding metadata: name: cilium roleRef: @@ -248,16 +917,17 @@ subjects: - kind: ServiceAccount name: cilium namespace: kube-system -- kind: Group +- apiGroup: rbac.authorization.k8s.io + kind: Group name: system:nodes --- -kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole metadata: name: cilium rules: - apiGroups: - - "networking.k8s.io" + - networking.k8s.io resources: - networkpolicies verbs: @@ -289,8 +959,6 @@ rules: - apiGroups: - extensions resources: - - networkpolicies #FIXME remove this when we drop support for k8s NP-beta GH-1202 - - thirdpartyresources - ingresses verbs: - create @@ -298,7 +966,7 @@ rules: - list - watch - apiGroups: - - "apiextensions.k8s.io" + - apiextensions.k8s.io resources: - customresourcedefinitions verbs: @@ -311,13 +979,14 @@ rules: - cilium.io resources: - ciliumnetworkpolicies + - ciliumnetworkpolicies/status - ciliumendpoints + - ciliumendpoints/status verbs: - - "*" + - '*' --- -kind: ServiceAccount apiVersion: v1 +kind: ServiceAccount metadata: name: cilium namespace: kube-system ---- diff --git a/roles/prepare/tasks/main.yml b/roles/prepare/tasks/main.yml index 5a6cb8c..7fb8262 100644 --- a/roles/prepare/tasks/main.yml +++ b/roles/prepare/tasks/main.yml @@ -21,8 +21,8 @@ - cfssl-certinfo - cfssljson -- name: 创建 kubeasz 工具的软连接 - file: src={{ base_dir }}/tools/kubeasz dest=/usr/bin/kubeasz state=link +- name: 创建 easzctl 工具的软连接 + file: src={{ base_dir }}/tools/easzctl dest=/usr/bin/easzctl state=link connection: local - name: 写入环境变量$PATH