2019-04-25 20:00:48 +08:00
|
|
|
# This manifest creates a Service, which will be backed by Calico's Typha daemon.
|
|
|
|
# Typha sits in between Felix and the API server, reducing Calico's load on the API server.
|
|
|
|
|
|
|
|
apiVersion: v1
|
|
|
|
kind: Service
|
|
|
|
metadata:
|
|
|
|
name: calico-typha
|
|
|
|
namespace: kube-system
|
|
|
|
labels:
|
|
|
|
k8s-app: calico-typha
|
|
|
|
spec:
|
|
|
|
ports:
|
|
|
|
- port: 5473
|
|
|
|
protocol: TCP
|
|
|
|
targetPort: calico-typha
|
|
|
|
name: calico-typha
|
|
|
|
selector:
|
|
|
|
k8s-app: calico-typha
|
|
|
|
|
|
|
|
---
|
|
|
|
|
|
|
|
# This manifest creates a Deployment of Typha to back the above service.
|
|
|
|
|
2019-09-11 03:06:55 +08:00
|
|
|
apiVersion: apps/v1
|
2019-04-25 20:00:48 +08:00
|
|
|
kind: Deployment
|
|
|
|
metadata:
|
|
|
|
name: calico-typha
|
|
|
|
namespace: kube-system
|
|
|
|
labels:
|
|
|
|
k8s-app: calico-typha
|
|
|
|
spec:
|
|
|
|
# Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the
|
|
|
|
# typha_service_name variable in the calico-config ConfigMap above.
|
|
|
|
#
|
|
|
|
# We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
|
|
|
|
# (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
|
|
|
|
# production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade.
|
|
|
|
replicas: {{ typha_replicas }}
|
|
|
|
revisionHistoryLimit: 2
|
2019-10-16 22:53:42 +08:00
|
|
|
selector:
|
|
|
|
matchLabels:
|
2019-10-17 09:26:38 +08:00
|
|
|
k8s-app: calico-typha
|
2019-04-25 20:00:48 +08:00
|
|
|
template:
|
|
|
|
metadata:
|
|
|
|
labels:
|
|
|
|
k8s-app: calico-typha
|
|
|
|
annotations:
|
|
|
|
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
|
2021-06-07 19:58:39 +08:00
|
|
|
{% if typha_prometheusmetricsenabled %}
|
|
|
|
prometheus.io/scrape: 'true'
|
|
|
|
prometheus.io/port: "{{ typha_prometheusmetricsport }}"
|
|
|
|
{% endif %}
|
2019-04-25 20:00:48 +08:00
|
|
|
spec:
|
|
|
|
nodeSelector:
|
2020-04-17 20:51:06 +08:00
|
|
|
kubernetes.io/os: linux
|
2019-04-25 20:00:48 +08:00
|
|
|
hostNetwork: true
|
|
|
|
tolerations:
|
2019-12-05 22:24:32 +08:00
|
|
|
- key: node-role.kubernetes.io/master
|
2019-04-25 20:00:48 +08:00
|
|
|
operator: Exists
|
2019-12-05 22:24:32 +08:00
|
|
|
effect: NoSchedule
|
2021-01-22 00:13:03 +08:00
|
|
|
- key: node-role.kubernetes.io/control-plane
|
|
|
|
operator: Exists
|
|
|
|
effect: NoSchedule
|
2019-04-25 20:00:48 +08:00
|
|
|
# Since Calico can't network a pod until Typha is up, we need to run Typha itself
|
|
|
|
# as a host-networked pod.
|
|
|
|
serviceAccountName: calico-node
|
2019-10-17 22:02:38 +08:00
|
|
|
priorityClassName: system-cluster-critical
|
2021-06-07 19:58:39 +08:00
|
|
|
# fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573
|
|
|
|
securityContext:
|
|
|
|
fsGroup: 65534
|
2019-04-25 20:00:48 +08:00
|
|
|
containers:
|
|
|
|
- image: {{ calico_typha_image_repo }}:{{ calico_typha_image_tag }}
|
2022-04-08 15:02:42 +08:00
|
|
|
imagePullPolicy: {{ k8s_image_pull_policy }}
|
2019-04-25 20:00:48 +08:00
|
|
|
name: calico-typha
|
|
|
|
ports:
|
|
|
|
- containerPort: 5473
|
|
|
|
name: calico-typha
|
|
|
|
protocol: TCP
|
2021-06-07 19:58:39 +08:00
|
|
|
envFrom:
|
|
|
|
- configMapRef:
|
|
|
|
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
|
|
|
name: kubernetes-services-endpoint
|
|
|
|
optional: true
|
2019-04-25 20:00:48 +08:00
|
|
|
env:
|
|
|
|
# Enable "info" logging by default. Can be set to "debug" to increase verbosity.
|
|
|
|
- name: TYPHA_LOGSEVERITYSCREEN
|
|
|
|
value: "info"
|
|
|
|
# Disable logging to file and syslog since those don't make sense in Kubernetes.
|
|
|
|
- name: TYPHA_LOGFILEPATH
|
|
|
|
value: "none"
|
|
|
|
- name: TYPHA_LOGSEVERITYSYS
|
|
|
|
value: "none"
|
|
|
|
# Monitor the Kubernetes API to find the number of running instances and rebalance
|
|
|
|
# connections.
|
|
|
|
- name: TYPHA_CONNECTIONREBALANCINGMODE
|
|
|
|
value: "kubernetes"
|
|
|
|
- name: TYPHA_DATASTORETYPE
|
|
|
|
value: "kubernetes"
|
|
|
|
- name: TYPHA_HEALTHENABLED
|
|
|
|
value: "true"
|
2020-01-10 16:24:33 +08:00
|
|
|
- name: TYPHA_MAXCONNECTIONSLOWERLIMIT
|
|
|
|
value: "{{ typha_max_connections_lower_limit }}"
|
2019-10-17 22:02:38 +08:00
|
|
|
{% if typha_secure %}
|
|
|
|
- name: TYPHA_CAFILE
|
|
|
|
value: /etc/ca/ca.crt
|
|
|
|
- name: TYPHA_CLIENTCN
|
|
|
|
value: typha-client
|
|
|
|
- name: TYPHA_SERVERCERTFILE
|
|
|
|
value: /etc/typha/server_certificate.pem
|
|
|
|
- name: TYPHA_SERVERKEYFILE
|
|
|
|
value: /etc/typha/server_key.pem
|
|
|
|
{% endif %}
|
2021-06-07 19:58:39 +08:00
|
|
|
{% if typha_prometheusmetricsenabled %}
|
|
|
|
# Since Typha is host-networked,
|
2019-04-25 20:00:48 +08:00
|
|
|
# this opens a port on the host, which may need to be secured.
|
2021-06-07 19:58:39 +08:00
|
|
|
- name: TYPHA_PROMETHEUSMETRICSENABLED
|
|
|
|
value: "true"
|
|
|
|
- name: TYPHA_PROMETHEUSMETRICSPORT
|
|
|
|
value: "{{ typha_prometheusmetricsport }}"
|
Issue 8004: Fix typha prometheus (#8005)
The typha prometheus settings were in the `volumeMounts` section of the
spec and not in the `envs` section. This was cauing the deployment to
fail because it was looking for a volumeMount.
```
failed: [controller-001.a2.da.dev.logdna.net] (item=calico-typha.yml) => {"ansible_loop_var": "item", "changed": false, "item": {"ansible_loop_var": "item", "changed": true, "checksum": "598ac79530749e8e2110793b53fc49ac208e7130", "dest": "/etc/kubernetes/calico-typha.yml", "diff": [], "failed": false, "gid": 0, "group": "root", "invocation": {"module_args": {"_original_basename": "calico-typha.yml.j2", "attributes": null, "backup": false, "checksum": "598ac79530749e8e2110793b53fc49ac208e7130", "content": null, "delimiter": null, "dest": "/etc/kubernetes/calico-typha.yml", "directory_mode": null, "follow": false, "force": true, "group": null, "local_follow": null, "mode": null, "owner": null, "regexp": null, "remote_src": null, "selevel": null, "serole": null, "setype": null, "seuser": null, "src": "/home/core/.ansible/tmp/ansible-tmp-1632349768.56-75434-32452975679246/source", "unsafe_writes": null, "validate": null}}, "item": {"file": "calico-typha.yml", "name": "calico", "type": "typha"}, "md5sum": "53c00ac7f562cf9ecbbfd27899ea066d", "mode": "0644", "owner": "root", "size": 5378, "src": "/home/core/.ansible/tmp/ansible-tmp-1632349768.56-75434-32452975679246/source", "state": "file", "uid": 0}, "msg": "error running kubectl (/opt/bin/kubectl --namespace=kube-system apply --force --filename=/etc/kubernetes/calico-typha.yml) command (rc=1), out='service/calico-typha unchanged\n', err='error: error validating \"/etc/kubernetes/calico-typha.yml\": error validating data: [ValidationError(Deployment.spec.template.spec.containers[0].volumeMounts[2]): unknown field \"value\" in io.k8s.api.core.v1.VolumeMount, ValidationError(Deployment.spec.template.spec.containers[0].volumeMounts[2]): missing required field \"mountPath\" in io.k8s.api.core.v1.VolumeMount, ValidationError(Deployment.spec.template.spec.containers[0].volumeMounts[3]): unknown field \"value\" in io.k8s.api.core.v1.VolumeMount, ValidationError(Deployment.spec.template.spec.containers[0].volumeMounts[3]): missing required field \"mountPath\" in io.k8s.api.core.v1.VolumeMount]; if you choose to ignore these errors, turn validation off with --validate=false\n'"}
```
2021-09-23 23:37:22 +08:00
|
|
|
{% endif %}
|
|
|
|
{% if typha_secure %}
|
|
|
|
volumeMounts:
|
|
|
|
- mountPath: /etc/typha
|
|
|
|
name: typha-server
|
|
|
|
readOnly: true
|
|
|
|
- mountPath: /etc/ca/ca.crt
|
|
|
|
subPath: ca.crt
|
|
|
|
name: cacert
|
|
|
|
readOnly: true
|
2021-06-07 19:58:39 +08:00
|
|
|
{% endif %}
|
2019-07-10 03:42:28 +08:00
|
|
|
# Needed for version >=3.7 when the 'host-local' ipam is used
|
|
|
|
# Should never happen given templates/cni-calico.conflist.j2
|
|
|
|
# Configure route aggregation based on pod CIDR.
|
|
|
|
# - name: USE_POD_CIDR
|
|
|
|
# value: "true"
|
2019-04-25 20:00:48 +08:00
|
|
|
livenessProbe:
|
2019-07-25 00:12:16 +08:00
|
|
|
httpGet:
|
|
|
|
path: /liveness
|
|
|
|
port: 9098
|
|
|
|
host: localhost
|
2019-04-25 20:00:48 +08:00
|
|
|
periodSeconds: 30
|
|
|
|
initialDelaySeconds: 30
|
|
|
|
readinessProbe:
|
2019-07-25 00:12:16 +08:00
|
|
|
httpGet:
|
|
|
|
path: /readiness
|
|
|
|
port: 9098
|
|
|
|
host: localhost
|
2019-04-25 20:00:48 +08:00
|
|
|
periodSeconds: 10
|
2019-10-17 22:02:38 +08:00
|
|
|
{% if typha_secure %}
|
|
|
|
volumes:
|
2020-06-30 03:39:59 +08:00
|
|
|
- name: typha-server
|
|
|
|
secret:
|
|
|
|
secretName: typha-server
|
|
|
|
items:
|
|
|
|
- key: tls.crt
|
|
|
|
path: server_certificate.pem
|
|
|
|
- key: tls.key
|
|
|
|
path: server_key.pem
|
|
|
|
- name: cacert
|
|
|
|
hostPath:
|
|
|
|
path: "{{ kube_cert_dir }}"
|
2019-10-17 22:02:38 +08:00
|
|
|
{% endif %}
|
2019-04-25 20:00:48 +08:00
|
|
|
|
|
|
|
---
|
|
|
|
|
|
|
|
# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
|
|
|
|
|
|
|
|
apiVersion: policy/v1beta1
|
|
|
|
kind: PodDisruptionBudget
|
|
|
|
metadata:
|
|
|
|
name: calico-typha
|
|
|
|
namespace: kube-system
|
|
|
|
labels:
|
|
|
|
k8s-app: calico-typha
|
|
|
|
spec:
|
|
|
|
maxUnavailable: 1
|
|
|
|
selector:
|
|
|
|
matchLabels:
|
|
|
|
k8s-app: calico-typha
|