Merge pull request #652 from kubernetes-incubator/debug_mode

Tune dnsmasq/kubedns limits, replicas, logging
pull/656/head
Bogdan Dobrelya 2016-11-25 16:57:15 +01:00 committed by GitHub
commit fbdda81515
11 changed files with 47 additions and 12 deletions

View File

@ -21,5 +21,10 @@ For a large scaled deployments, consider the following configuration changes:
load on a delegate (the first K8s master node) then retrying failed
push or download operations.
* Tune parameters for DNS related applications (dnsmasq daemon set, kubedns
replication controller). Those are ``dns_replicas``, ``dns_cpu_limit``,
``dns_cpu_requests``, ``dns_memory_limit``, ``dns_memory_requests``.
Please note that limits must always be greater than or equal to requests.
For example, when deploying 200 nodes, you may want to run ansible with
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.

View File

@ -27,3 +27,9 @@ skip_dnsmasq: false
# Skip setting up dnsmasq daemonset
skip_dnsmasq_k8s: "{{ skip_dnsmasq }}"
# Limits for dnsmasq/kubedns apps
dns_cpu_limit: 100m
dns_memory_limit: 170Mi
dns_cpu_requests: 70m
dns_memory_requests: 70Mi

View File

@ -20,6 +20,9 @@ server=169.254.169.254
server=8.8.4.4
{% endif %}
{% if kube_log_level == 4 %}
log-queries
{% endif %}
bogus-priv
no-resolv
no-negcache

View File

@ -29,8 +29,11 @@ spec:
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 100m
memory: 256M
cpu: {{ dns_cpu_limit }}
memory: {{ dns_memory_limit }}
requests:
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
ports:
- name: dns
containerPort: 53

View File

@ -3,6 +3,13 @@ kubedns_version: 1.7
kubednsmasq_version: 1.3
exechealthz_version: 1.1
# Limits for dnsmasq/kubedns apps
dns_cpu_limit: 100m
dns_memory_limit: 170Mi
dns_cpu_requests: 70m
dns_memory_requests: 70Mi
dns_replicas: 1
# Images
kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64"
kubedns_image_tag: "{{ kubedns_version }}"

View File

@ -8,7 +8,7 @@ metadata:
version: v19
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
replicas: {{ dns_replicas }}
selector:
k8s-app: kubedns
version: v19
@ -29,11 +29,11 @@ spec:
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
cpu: 100m
memory: 170Mi
cpu: {{ dns_cpu_limit }}
memory: {{ dns_memory_limit }}
requests:
cpu: 100m
memory: 70Mi
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
livenessProbe:
httpGet:
path: /healthz
@ -56,6 +56,7 @@ spec:
# command = "/kube-dns"
- --domain={{ dns_domain }}.
- --dns-port=10053
- --v={{ kube_log_level }}
ports:
- containerPort: 10053
name: dns-local
@ -66,11 +67,21 @@ spec:
- name: dnsmasq
image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ dns_cpu_limit }}
memory: {{ dns_memory_limit }}
requests:
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
args:
- --log-facility=-
- --cache-size=1000
- --no-resolv
- --server=127.0.0.1#10053
{% if kube_log_level == 4 %}
- --log-queries
{% endif %}
ports:
- containerPort: 53
name: dns

View File

@ -41,7 +41,7 @@ spec:
{% if enable_network_policy is defined and enable_network_policy == True %}
- --runtime-config=extensions/v1beta1/networkpolicies=true
{% endif %}
- --v={{ kube_log_level | default('2') }}
- --v={{ kube_log_level }}
- --allow-privileged=true
{% if cloud_provider is defined and cloud_provider == "openstack" %}
- --cloud-provider={{ cloud_provider }}

View File

@ -19,7 +19,7 @@ spec:
- --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
- --root-ca-file={{ kube_cert_dir }}/ca.pem
- --enable-hostpath-provisioner={{ kube_hostpath_dynamic_provisioner }}
- --v={{ kube_log_level | default('2') }}
- --v={{ kube_log_level }}
{% if cloud_provider is defined and cloud_provider == "openstack" %}
- --cloud-provider={{cloud_provider}}
- --cloud-config={{ kube_config_dir }}/cloud_config

View File

@ -16,7 +16,7 @@ spec:
- scheduler
- --leader-elect=true
- --master={{ kube_apiserver_endpoint }}
- --v={{ kube_log_level | default('2') }}
- --v={{ kube_log_level }}
livenessProbe:
httpGet:
host: 127.0.0.1

View File

@ -5,7 +5,7 @@ KUBE_LOGGING="--log-dir={{ kube_log_dir }} --logtostderr=true"
# logging to stderr means we get it in the systemd journal
KUBE_LOGGING="--logtostderr=true"
{% endif %}
KUBE_LOG_LEVEL="--v={{ kube_log_level | default('2') }}"
KUBE_LOG_LEVEL="--v={{ kube_log_level }}"
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS="--address={{ ip | default("0.0.0.0") }}"
# The port for the info server to serve on

View File

@ -14,7 +14,7 @@ spec:
command:
- /hyperkube
- proxy
- --v={{ kube_log_level | default('2') }}
- --v={{ kube_log_level }}
- --master={{ kube_apiserver_endpoint }}
{% if not is_kube_master %}
- --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml