Tune dnsmasq/kubedns limits, replicas, logging

* Add dns_replicas, dns_memory/cpu_limit/requests vars for
dns related apps.
* When kube_log_level=4, log dnsmasq queries as well.
* Add log level control for skydns (part of kubedns app).
* Add limits/requests vars for dnsmasq (part of kubedns app) and
  dnsmasq daemon set.
* Drop string defaults for kube_log_level as it is int and
  is defined in the global vars as well.
* Add docs

Signed-off-by: Bogdan Dobrelya <bdobrelia@mirantis.com>
pull/652/head
Bogdan Dobrelya 2016-11-25 11:33:39 +01:00
parent 6d29a5981c
commit 2d18e19263
11 changed files with 47 additions and 12 deletions

View File

@ -21,5 +21,10 @@ For a large scaled deployments, consider the following configuration changes:
load on a delegate (the first K8s master node) then retrying failed load on a delegate (the first K8s master node) then retrying failed
push or download operations. push or download operations.
* Tune parameters for DNS related applications (dnsmasq daemon set, kubedns
replication controller). Those are ``dns_replicas``, ``dns_cpu_limit``,
``dns_cpu_requests``, ``dns_memory_limit``, ``dns_memory_requests``.
Please note that limits must always be greater than or equal to requests.
For example, when deploying 200 nodes, you may want to run ansible with For example, when deploying 200 nodes, you may want to run ansible with
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``. ``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.

View File

@ -27,3 +27,9 @@ skip_dnsmasq: false
# Skip setting up dnsmasq daemonset # Skip setting up dnsmasq daemonset
skip_dnsmasq_k8s: "{{ skip_dnsmasq }}" skip_dnsmasq_k8s: "{{ skip_dnsmasq }}"
# Limits for dnsmasq/kubedns apps
dns_cpu_limit: 100m
dns_memory_limit: 170Mi
dns_cpu_requests: 70m
dns_memory_requests: 70Mi

View File

@ -20,6 +20,9 @@ server=169.254.169.254
server=8.8.4.4 server=8.8.4.4
{% endif %} {% endif %}
{% if kube_log_level == 4 %}
log-queries
{% endif %}
bogus-priv bogus-priv
no-resolv no-resolv
no-negcache no-negcache

View File

@ -29,8 +29,11 @@ spec:
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
limits: limits:
cpu: 100m cpu: {{ dns_cpu_limit }}
memory: 256M memory: {{ dns_memory_limit }}
requests:
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
ports: ports:
- name: dns - name: dns
containerPort: 53 containerPort: 53

View File

@ -3,6 +3,13 @@ kubedns_version: 1.7
kubednsmasq_version: 1.3 kubednsmasq_version: 1.3
exechealthz_version: 1.1 exechealthz_version: 1.1
# Limits for dnsmasq/kubedns apps
dns_cpu_limit: 100m
dns_memory_limit: 170Mi
dns_cpu_requests: 70m
dns_memory_requests: 70Mi
dns_replicas: 1
# Images # Images
kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64" kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64"
kubedns_image_tag: "{{ kubedns_version }}" kubedns_image_tag: "{{ kubedns_version }}"

View File

@ -8,7 +8,7 @@ metadata:
version: v19 version: v19
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
spec: spec:
replicas: 1 replicas: {{ dns_replicas }}
selector: selector:
k8s-app: kubedns k8s-app: kubedns
version: v19 version: v19
@ -29,11 +29,11 @@ spec:
# guaranteed class. Currently, this container falls into the # guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it. # "burstable" category so the kubelet doesn't backoff from restarting it.
limits: limits:
cpu: 100m cpu: {{ dns_cpu_limit }}
memory: 170Mi memory: {{ dns_memory_limit }}
requests: requests:
cpu: 100m cpu: {{ dns_cpu_requests }}
memory: 70Mi memory: {{ dns_memory_requests }}
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /healthz path: /healthz
@ -56,6 +56,7 @@ spec:
# command = "/kube-dns" # command = "/kube-dns"
- --domain={{ dns_domain }}. - --domain={{ dns_domain }}.
- --dns-port=10053 - --dns-port=10053
- --v={{ kube_log_level }}
ports: ports:
- containerPort: 10053 - containerPort: 10053
name: dns-local name: dns-local
@ -66,11 +67,21 @@ spec:
- name: dnsmasq - name: dnsmasq
image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}" image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }} imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ dns_cpu_limit }}
memory: {{ dns_memory_limit }}
requests:
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
args: args:
- --log-facility=- - --log-facility=-
- --cache-size=1000 - --cache-size=1000
- --no-resolv - --no-resolv
- --server=127.0.0.1#10053 - --server=127.0.0.1#10053
{% if kube_log_level == 4 %}
- --log-queries
{% endif %}
ports: ports:
- containerPort: 53 - containerPort: 53
name: dns name: dns

View File

@ -41,7 +41,7 @@ spec:
{% if enable_network_policy is defined and enable_network_policy == True %} {% if enable_network_policy is defined and enable_network_policy == True %}
- --runtime-config=extensions/v1beta1/networkpolicies=true - --runtime-config=extensions/v1beta1/networkpolicies=true
{% endif %} {% endif %}
- --v={{ kube_log_level | default('2') }} - --v={{ kube_log_level }}
- --allow-privileged=true - --allow-privileged=true
{% if cloud_provider is defined and cloud_provider == "openstack" %} {% if cloud_provider is defined and cloud_provider == "openstack" %}
- --cloud-provider={{ cloud_provider }} - --cloud-provider={{ cloud_provider }}

View File

@ -19,7 +19,7 @@ spec:
- --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem - --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
- --root-ca-file={{ kube_cert_dir }}/ca.pem - --root-ca-file={{ kube_cert_dir }}/ca.pem
- --enable-hostpath-provisioner={{ kube_hostpath_dynamic_provisioner }} - --enable-hostpath-provisioner={{ kube_hostpath_dynamic_provisioner }}
- --v={{ kube_log_level | default('2') }} - --v={{ kube_log_level }}
{% if cloud_provider is defined and cloud_provider == "openstack" %} {% if cloud_provider is defined and cloud_provider == "openstack" %}
- --cloud-provider={{cloud_provider}} - --cloud-provider={{cloud_provider}}
- --cloud-config={{ kube_config_dir }}/cloud_config - --cloud-config={{ kube_config_dir }}/cloud_config

View File

@ -16,7 +16,7 @@ spec:
- scheduler - scheduler
- --leader-elect=true - --leader-elect=true
- --master={{ kube_apiserver_endpoint }} - --master={{ kube_apiserver_endpoint }}
- --v={{ kube_log_level | default('2') }} - --v={{ kube_log_level }}
livenessProbe: livenessProbe:
httpGet: httpGet:
host: 127.0.0.1 host: 127.0.0.1

View File

@ -5,7 +5,7 @@ KUBE_LOGGING="--log-dir={{ kube_log_dir }} --logtostderr=true"
# logging to stderr means we get it in the systemd journal # logging to stderr means we get it in the systemd journal
KUBE_LOGGING="--logtostderr=true" KUBE_LOGGING="--logtostderr=true"
{% endif %} {% endif %}
KUBE_LOG_LEVEL="--v={{ kube_log_level | default('2') }}" KUBE_LOG_LEVEL="--v={{ kube_log_level }}"
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS="--address={{ ip | default("0.0.0.0") }}" KUBELET_ADDRESS="--address={{ ip | default("0.0.0.0") }}"
# The port for the info server to serve on # The port for the info server to serve on

View File

@ -14,7 +14,7 @@ spec:
command: command:
- /hyperkube - /hyperkube
- proxy - proxy
- --v={{ kube_log_level | default('2') }} - --v={{ kube_log_level }}
- --master={{ kube_apiserver_endpoint }} - --master={{ kube_apiserver_endpoint }}
{% if not is_kube_master %} {% if not is_kube_master %}
- --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml - --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml