diff --git a/docs/large-deployments.md b/docs/large-deployments.md index 3d37fae57..fd62f35e3 100644 --- a/docs/large-deployments.md +++ b/docs/large-deployments.md @@ -21,5 +21,10 @@ For a large scaled deployments, consider the following configuration changes: load on a delegate (the first K8s master node) then retrying failed push or download operations. +* Tune parameters for DNS related applications (dnsmasq daemon set, kubedns + replication controller). Those are ``dns_replicas``, ``dns_cpu_limit``, + ``dns_cpu_requests``, ``dns_memory_limit``, ``dns_memory_requests``. + Please note that limits must always be greater than or equal to requests. + For example, when deploying 200 nodes, you may want to run ansible with ``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``. diff --git a/roles/dnsmasq/defaults/main.yml b/roles/dnsmasq/defaults/main.yml index 4889e7c12..0d3d30200 100644 --- a/roles/dnsmasq/defaults/main.yml +++ b/roles/dnsmasq/defaults/main.yml @@ -27,3 +27,9 @@ skip_dnsmasq: false # Skip setting up dnsmasq daemonset skip_dnsmasq_k8s: "{{ skip_dnsmasq }}" + +# Limits for dnsmasq/kubedns apps +dns_cpu_limit: 100m +dns_memory_limit: 170Mi +dns_cpu_requests: 70m +dns_memory_requests: 70Mi diff --git a/roles/dnsmasq/templates/01-kube-dns.conf.j2 b/roles/dnsmasq/templates/01-kube-dns.conf.j2 index 4d73eebdb..0aa2a6bcf 100644 --- a/roles/dnsmasq/templates/01-kube-dns.conf.j2 +++ b/roles/dnsmasq/templates/01-kube-dns.conf.j2 @@ -20,6 +20,9 @@ server=169.254.169.254 server=8.8.4.4 {% endif %} +{% if kube_log_level == 4 %} +log-queries +{% endif %} bogus-priv no-resolv no-negcache diff --git a/roles/dnsmasq/templates/dnsmasq-ds.yml b/roles/dnsmasq/templates/dnsmasq-ds.yml index 50cea23c5..2f4a1cdd7 100644 --- a/roles/dnsmasq/templates/dnsmasq-ds.yml +++ b/roles/dnsmasq/templates/dnsmasq-ds.yml @@ -29,8 +29,11 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - cpu: 100m - memory: 256M + cpu: {{ dns_cpu_limit }} + memory: {{ dns_memory_limit }} + requests: + cpu: {{ dns_cpu_requests }} + memory: {{ dns_memory_requests }} ports: - name: dns containerPort: 53 diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index e064984c6..b84d46520 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -3,6 +3,13 @@ kubedns_version: 1.7 kubednsmasq_version: 1.3 exechealthz_version: 1.1 +# Limits for dnsmasq/kubedns apps +dns_cpu_limit: 100m +dns_memory_limit: 170Mi +dns_cpu_requests: 70m +dns_memory_requests: 70Mi +dns_replicas: 1 + # Images kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64" kubedns_image_tag: "{{ kubedns_version }}" diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml b/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml index fc29a0942..dc5ab5b34 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml @@ -8,7 +8,7 @@ metadata: version: v19 kubernetes.io/cluster-service: "true" spec: - replicas: 1 + replicas: {{ dns_replicas }} selector: k8s-app: kubedns version: v19 @@ -29,11 +29,11 @@ spec: # guaranteed class. Currently, this container falls into the # "burstable" category so the kubelet doesn't backoff from restarting it. limits: - cpu: 100m - memory: 170Mi + cpu: {{ dns_cpu_limit }} + memory: {{ dns_memory_limit }} requests: - cpu: 100m - memory: 70Mi + cpu: {{ dns_cpu_requests }} + memory: {{ dns_memory_requests }} livenessProbe: httpGet: path: /healthz @@ -56,6 +56,7 @@ spec: # command = "/kube-dns" - --domain={{ dns_domain }}. - --dns-port=10053 + - --v={{ kube_log_level }} ports: - containerPort: 10053 name: dns-local @@ -66,11 +67,21 @@ spec: - name: dnsmasq image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}" imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + limits: + cpu: {{ dns_cpu_limit }} + memory: {{ dns_memory_limit }} + requests: + cpu: {{ dns_cpu_requests }} + memory: {{ dns_memory_requests }} args: - --log-facility=- - --cache-size=1000 - --no-resolv - --server=127.0.0.1#10053 +{% if kube_log_level == 4 %} + - --log-queries +{% endif %} ports: - containerPort: 53 name: dns diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index d5eb2266e..83c5525b1 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -41,7 +41,7 @@ spec: {% if enable_network_policy is defined and enable_network_policy == True %} - --runtime-config=extensions/v1beta1/networkpolicies=true {% endif %} - - --v={{ kube_log_level | default('2') }} + - --v={{ kube_log_level }} - --allow-privileged=true {% if cloud_provider is defined and cloud_provider == "openstack" %} - --cloud-provider={{ cloud_provider }} diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index 02d386618..1cd58d46c 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -19,7 +19,7 @@ spec: - --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem - --root-ca-file={{ kube_cert_dir }}/ca.pem - --enable-hostpath-provisioner={{ kube_hostpath_dynamic_provisioner }} - - --v={{ kube_log_level | default('2') }} + - --v={{ kube_log_level }} {% if cloud_provider is defined and cloud_provider == "openstack" %} - --cloud-provider={{cloud_provider}} - --cloud-config={{ kube_config_dir }}/cloud_config diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 index 853e616fc..a2c4c134a 100644 --- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 @@ -16,7 +16,7 @@ spec: - scheduler - --leader-elect=true - --master={{ kube_apiserver_endpoint }} - - --v={{ kube_log_level | default('2') }} + - --v={{ kube_log_level }} livenessProbe: httpGet: host: 127.0.0.1 diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2 index fb70670c1..a575f065e 100644 --- a/roles/kubernetes/node/templates/kubelet.j2 +++ b/roles/kubernetes/node/templates/kubelet.j2 @@ -5,7 +5,7 @@ KUBE_LOGGING="--log-dir={{ kube_log_dir }} --logtostderr=true" # logging to stderr means we get it in the systemd journal KUBE_LOGGING="--logtostderr=true" {% endif %} -KUBE_LOG_LEVEL="--v={{ kube_log_level | default('2') }}" +KUBE_LOG_LEVEL="--v={{ kube_log_level }}" # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) KUBELET_ADDRESS="--address={{ ip | default("0.0.0.0") }}" # The port for the info server to serve on diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 422507acf..dc03dedd2 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -14,7 +14,7 @@ spec: command: - /hyperkube - proxy - - --v={{ kube_log_level | default('2') }} + - --v={{ kube_log_level }} - --master={{ kube_apiserver_endpoint }} {% if not is_kube_master %} - --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml