2016-09-02 01:01:15 +08:00
|
|
|
apiVersion: v1
|
|
|
|
kind: ReplicationController
|
|
|
|
metadata:
|
2016-09-06 21:43:17 +08:00
|
|
|
name: kubedns
|
2016-09-02 01:01:15 +08:00
|
|
|
namespace: kube-system
|
|
|
|
labels:
|
2016-09-02 19:09:38 +08:00
|
|
|
k8s-app: kubedns
|
2016-09-02 01:01:15 +08:00
|
|
|
version: v19
|
|
|
|
kubernetes.io/cluster-service: "true"
|
|
|
|
spec:
|
|
|
|
replicas: 1
|
|
|
|
selector:
|
2016-09-06 21:43:17 +08:00
|
|
|
k8s-app: kubedns
|
2016-09-02 01:01:15 +08:00
|
|
|
version: v19
|
|
|
|
template:
|
|
|
|
metadata:
|
|
|
|
labels:
|
2016-09-06 21:43:17 +08:00
|
|
|
k8s-app: kubedns
|
2016-09-02 01:01:15 +08:00
|
|
|
version: v19
|
|
|
|
kubernetes.io/cluster-service: "true"
|
|
|
|
spec:
|
|
|
|
containers:
|
|
|
|
- name: kubedns
|
2016-10-15 05:46:44 +08:00
|
|
|
image: "{{ kubedns_image_repo }}:{{ kubedns_image_tag }}"
|
2016-09-02 01:01:15 +08:00
|
|
|
resources:
|
|
|
|
# TODO: Set memory limits when we've profiled the container for large
|
|
|
|
# clusters, then set request = limit to keep this container in
|
|
|
|
# guaranteed class. Currently, this container falls into the
|
|
|
|
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
|
|
|
limits:
|
|
|
|
cpu: 100m
|
|
|
|
memory: 170Mi
|
|
|
|
requests:
|
|
|
|
cpu: 100m
|
|
|
|
memory: 70Mi
|
|
|
|
livenessProbe:
|
|
|
|
httpGet:
|
|
|
|
path: /healthz
|
|
|
|
port: 8080
|
|
|
|
scheme: HTTP
|
|
|
|
initialDelaySeconds: 60
|
|
|
|
timeoutSeconds: 5
|
|
|
|
successThreshold: 1
|
|
|
|
failureThreshold: 5
|
|
|
|
readinessProbe:
|
|
|
|
httpGet:
|
|
|
|
path: /readiness
|
|
|
|
port: 8081
|
|
|
|
scheme: HTTP
|
|
|
|
# we poll on pod startup for the Kubernetes master service and
|
|
|
|
# only setup the /readiness HTTP server once that's available.
|
|
|
|
initialDelaySeconds: 30
|
|
|
|
timeoutSeconds: 5
|
|
|
|
args:
|
|
|
|
# command = "/kube-dns"
|
2016-09-27 20:15:27 +08:00
|
|
|
- --domain={{ dns_domain }}.
|
2016-09-02 01:01:15 +08:00
|
|
|
- --dns-port=10053
|
|
|
|
ports:
|
|
|
|
- containerPort: 10053
|
|
|
|
name: dns-local
|
|
|
|
protocol: UDP
|
|
|
|
- containerPort: 10053
|
|
|
|
name: dns-tcp-local
|
|
|
|
protocol: TCP
|
|
|
|
- name: dnsmasq
|
2016-10-15 05:46:44 +08:00
|
|
|
image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}"
|
2016-09-02 01:01:15 +08:00
|
|
|
args:
|
2016-09-23 01:14:51 +08:00
|
|
|
- --log-facility=-
|
2016-09-02 01:01:15 +08:00
|
|
|
- --cache-size=1000
|
|
|
|
- --no-resolv
|
|
|
|
- --server=127.0.0.1#10053
|
|
|
|
ports:
|
|
|
|
- containerPort: 53
|
|
|
|
name: dns
|
|
|
|
protocol: UDP
|
|
|
|
- containerPort: 53
|
|
|
|
name: dns-tcp
|
|
|
|
protocol: TCP
|
|
|
|
- name: healthz
|
2016-10-15 05:46:44 +08:00
|
|
|
image: "{{ exechealthz_image_repo }}:{{ exechealthz_image_tag }}"
|
2016-09-02 01:01:15 +08:00
|
|
|
resources:
|
|
|
|
# keep request = limit to keep this container in guaranteed class
|
|
|
|
limits:
|
|
|
|
cpu: 10m
|
|
|
|
memory: 50Mi
|
|
|
|
requests:
|
|
|
|
cpu: 10m
|
|
|
|
# Note that this container shouldn't really need 50Mi of memory. The
|
|
|
|
# limits are set higher than expected pending investigation on #29688.
|
|
|
|
# The extra memory was stolen from the kubedns container to keep the
|
|
|
|
# net memory requested by the pod constant.
|
|
|
|
memory: 50Mi
|
|
|
|
args:
|
2016-09-27 20:15:27 +08:00
|
|
|
- -cmd=nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1 >/dev/null && nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1:10053 >/dev/null
|
2016-09-02 01:01:15 +08:00
|
|
|
- -port=8080
|
|
|
|
- -quiet
|
|
|
|
ports:
|
|
|
|
- containerPort: 8080
|
|
|
|
protocol: TCP
|
|
|
|
dnsPolicy: Default # Don't use cluster DNS.
|