kubespray/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2

185 lines
5.7 KiB
Plaintext
Raw Normal View History

---
apiVersion: extensions/v1beta1
kind: Deployment
2016-09-02 01:01:15 +08:00
metadata:
name: kube-dns
namespace: kube-system
2016-09-02 01:01:15 +08:00
labels:
k8s-app: kube-dns
2016-09-02 01:01:15 +08:00
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
2016-09-02 01:01:15 +08:00
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
2016-09-02 01:01:15 +08:00
selector:
matchLabels:
k8s-app: kube-dns
2016-09-02 01:01:15 +08:00
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
2016-09-02 01:01:15 +08:00
spec:
{% if kube_version is version('v1.11.1', '>=') %}
priorityClassName: system-cluster-critical
{% endif %}
nodeSelector:
beta.kubernetes.io/os: linux
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: "NoSchedule"
operator: "Equal"
key: "node-role.kubernetes.io/master"
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
k8s-app: kube-dns
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: node-role.kubernetes.io/master
operator: In
values:
- ""
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
2016-09-02 01:01:15 +08:00
containers:
- name: kubedns
image: "{{ kubedns_image_repo }}:{{ kubedns_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
2016-09-02 01:01:15 +08:00
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: {{ dns_memory_limit }}
2016-09-02 01:01:15 +08:00
requests:
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
2016-09-02 01:01:15 +08:00
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
2016-09-02 01:01:15 +08:00
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
2016-09-02 01:01:15 +08:00
timeoutSeconds: 5
args:
- --domain={{ dns_domain }}.
2016-09-02 01:01:15 +08:00
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v={{ kube_log_level }}
{% if resolvconf_mode == 'host_resolvconf' and upstream_dns_servers is defined and upstream_dns_servers|length > 0 %}
- --nameservers={{ upstream_dns_servers|join(',') }}
{% endif %}
env:
- name: PROMETHEUS_PORT
value: "10055"
2016-09-02 01:01:15 +08:00
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
2016-09-02 01:01:15 +08:00
- name: dnsmasq
image: "{{ dnsmasq_nanny_image_repo }}:{{ dnsmasq_nanny_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
2016-09-02 01:01:15 +08:00
args:
- -v={{ kube_log_level }}
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
2016-09-02 01:01:15 +08:00
- --cache-size=1000
- --no-negcache
- --dns-loop-detect
- --log-facility=-
- --server=/{{ dns_domain }}/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
2016-09-02 01:01:15 +08:00
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
2016-09-02 01:01:15 +08:00
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: "{{ dnsmasq_sidecar_image_repo }}:{{ dnsmasq_sidecar_image_tag }}"
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
2016-09-02 01:01:15 +08:00
args:
- --v={{ kube_log_level }}
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ dns_domain }},5,SRV
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ dns_domain }},5,SRV
2016-09-02 01:01:15 +08:00
ports:
- containerPort: 10054
name: metrics
2016-09-02 01:01:15 +08:00
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
2016-09-02 01:01:15 +08:00
dnsPolicy: Default # Don't use cluster DNS.
2017-06-27 12:27:25 +08:00
serviceAccountName: kube-dns