From 73bedd7c3794977655e7d809af32f83f01f3b9ac Mon Sep 17 00:00:00 2001 From: gjmzj Date: Wed, 13 Jan 2021 21:27:18 +0800 Subject: [PATCH] feat: add NodeLocal DNSCache --- example/config.yml | 4 + ezctl | 2 + ezdown | 5 + playbooks/23.addmaster.yml | 4 +- roles/cluster-addon/tasks/main.yml | 3 + roles/cluster-addon/tasks/nodelocaldns.yml | 32 +++ .../dns/nodelocaldns-iptables.yaml.j2 | 210 ++++++++++++++++++ .../templates/dns/nodelocaldns-ipvs.yaml.j2 | 210 ++++++++++++++++++ roles/cluster-addon/vars/main.yml | 5 +- .../templates/kubelet-config.yaml.j2 | 4 + 10 files changed, 475 insertions(+), 4 deletions(-) create mode 100644 roles/cluster-addon/tasks/nodelocaldns.yml create mode 100644 roles/cluster-addon/templates/dns/nodelocaldns-iptables.yaml.j2 create mode 100644 roles/cluster-addon/templates/dns/nodelocaldns-ipvs.yaml.j2 diff --git a/example/config.yml b/example/config.yml index 9820a67..0fc1638 100644 --- a/example/config.yml +++ b/example/config.yml @@ -170,6 +170,10 @@ busybox_offline: "busybox_{{ busybox_ver }}.tar" # coredns 自动安装 dns_install: "yes" corednsVer: "__coredns__" +ENABLE_LOCAL_DNS_CACHE: true +dnsNodeCacheVer: "__dns_node_cache__" +# 设置 local dns cache 地址 +LOCAL_DNS_CACHE: "169.254.20.10" # metric server 自动安装 metricsserver_install: "yes" diff --git a/ezctl b/ezctl index cf733ac..bd480ca 100755 --- a/ezctl +++ b/ezctl @@ -121,6 +121,7 @@ function new() { kubeRouterVer=$(grep 'kubeRouterVer=' ezdown|cut -d'=' -f2) kubeOvnVer=$(grep 'kubeOvnVer=' ezdown|cut -d'=' -f2) corednsVer=$(grep 'corednsVer=' ezdown|cut -d'=' -f2) + dnsNodeCacheVer=$(grep 'dnsNodeCacheVer=' ezdown|cut -d'=' -f2) dashboardVer=$(grep 'dashboardVer=' ezdown|cut -d'=' -f2) dashboardMetricsScraperVer=$(grep 'dashboardMetricsScraperVer=' ezdown|cut -d'=' -f2) metricsVer=$(grep 'metricsVer=' ezdown|cut -d'=' -f2) @@ -136,6 +137,7 @@ function new() { -e "s/__kube_ovn__/$kubeOvnVer/g" \ -e "s/__kube_router__/$kubeRouterVer/g" \ -e "s/__coredns__/$corednsVer/g" \ + -e "s/__dns_node_cache__/$dnsNodeCacheVer/g" \ -e "s/__dashboard__/$dashboardVer/g" \ -e "s/__dash_metrics__/$dashboardMetricsScraperVer/g" \ -e "s/__prom_chart__/$promChartVer/g" \ diff --git a/ezdown b/ezdown index 5635827..2a1d6d9 100755 --- a/ezdown +++ b/ezdown @@ -23,6 +23,7 @@ REGISTRY_MIRROR=CN # images needed by k8s cluster calicoVer=v3.15.3 flannelVer=v0.13.0-amd64 +dnsNodeCacheVer=1.16.0 corednsVer=1.7.1 dashboardVer=v2.1.0 dashboardMetricsScraperVer=v1.0.6 @@ -252,6 +253,10 @@ function get_offline_image() { docker pull "coredns/coredns:$corednsVer" && \ docker save -o "$imageDir/coredns_$corednsVer.tar" "coredns/coredns:$corednsVer" fi + if [[ ! -f "$imageDir/k8s-dns-node-cache_$dnsNodeCacheVer.tar" ]];then + docker pull "easzlab/k8s-dns-node-cache:$dnsNodeCacheVer" && \ + docker save -o "$imageDir/k8s-dns-node-cache_$dnsNodeCacheVer.tar" "easzlab/k8s-dns-node-cache:$dnsNodeCacheVer" + fi if [[ ! -f "$imageDir/dashboard_$dashboardVer.tar" ]];then docker pull "kubernetesui/dashboard:$dashboardVer" && \ docker save -o "$imageDir/dashboard_$dashboardVer.tar" "kubernetesui/dashboard:$dashboardVer" diff --git a/playbooks/23.addmaster.yml b/playbooks/23.addmaster.yml index 443fbe6..dfc77aa 100644 --- a/playbooks/23.addmaster.yml +++ b/playbooks/23.addmaster.yml @@ -16,10 +16,10 @@ # tasks: - name: Making master nodes SchedulingDisabled - shell: "{{ base_dir }}/bin/kubectl cordon {{ NODE_TO_ADD }} " + shell: "{{ bin_dir }}/kubectl cordon {{ NODE_TO_ADD }} " when: "inventory_hostname not in groups['kube-node']" ignore_errors: true - name: Setting master role name - shell: "{{ base_dir }}/bin/kubectl label node {{ NODE_TO_ADD }} kubernetes.io/role=master --overwrite" + shell: "{{ bin_dir }}/kubectl label node {{ NODE_TO_ADD }} kubernetes.io/role=master --overwrite" ignore_errors: true diff --git a/roles/cluster-addon/tasks/main.yml b/roles/cluster-addon/tasks/main.yml index 0043996..3a328f6 100644 --- a/roles/cluster-addon/tasks/main.yml +++ b/roles/cluster-addon/tasks/main.yml @@ -13,6 +13,9 @@ - import_tasks: coredns.yml when: '"coredns" not in pod_info.stdout and dns_install == "yes"' +- import_tasks: nodelocaldns.yml + when: '"node-local-dns" not in pod_info.stdout and ENABLE_LOCAL_DNS_CACHE|bool' + - import_tasks: metrics-server.yml when: '"metrics-server" not in pod_info.stdout and metricsserver_install == "yes"' diff --git a/roles/cluster-addon/tasks/nodelocaldns.yml b/roles/cluster-addon/tasks/nodelocaldns.yml new file mode 100644 index 0000000..aab12e5 --- /dev/null +++ b/roles/cluster-addon/tasks/nodelocaldns.yml @@ -0,0 +1,32 @@ +- name: 尝试推送离线dnscache镜像(若执行失败,可忽略) + copy: src={{ base_dir }}/down/{{ dnscache_offline }} dest=/opt/kube/images/{{ dnscache_offline }} + when: 'dnscache_offline in download_info.stdout' + +- name: 获取dnscache离线镜像推送情况 + command: "ls /opt/kube/images" + register: image_info + +- name: 导入dnscache的离线镜像(若执行失败,可忽略) + shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ dnscache_offline }}" + when: 'dnscache_offline in image_info.stdout and CONTAINER_RUNTIME == "docker"' + +- name: 导入dnscache的离线镜像(若执行失败,可忽略) + shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ dnscache_offline }}" + when: 'dnscache_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"' + +- name: 准备dnscache的部署文件 + template: src=dns/nodelocaldns-ipvs.yaml.j2 dest={{ cluster_dir }}/yml/nodelocaldns.yaml + when: "PROXY_MODE == 'ipvs'" + run_once: true + connection: local + +- name: 准备dnscache的部署文件 + template: src=dns/nodelocaldns-iptables.yaml.j2 dest={{ cluster_dir }}/yml/nodelocaldns.yaml + when: "PROXY_MODE == 'iptables'" + run_once: true + connection: local + +- name: 创建dnscache部署 + shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/nodelocaldns.yaml" + run_once: true + connection: local diff --git a/roles/cluster-addon/templates/dns/nodelocaldns-iptables.yaml.j2 b/roles/cluster-addon/templates/dns/nodelocaldns-iptables.yaml.j2 new file mode 100644 index 0000000..dd3d1ef --- /dev/null +++ b/roles/cluster-addon/templates/dns/nodelocaldns-iptables.yaml.j2 @@ -0,0 +1,210 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-local-dns + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-dns-upstream + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeDNSUpstream" +spec: + ports: + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: dns-tcp + port: 53 + protocol: TCP + targetPort: 53 + selector: + k8s-app: kube-dns +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: node-local-dns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile +data: + Corefile: | + {{ CLUSTER_DNS_DOMAIN }}:53 { + errors + cache { + success 9984 30 + denial 9984 5 + } + reload + loop + bind {{ LOCAL_DNS_CACHE }} {{ CLUSTER_DNS_SVC_IP }} + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + health {{ LOCAL_DNS_CACHE }}:8080 + } + in-addr.arpa:53 { + errors + cache 30 + reload + loop + bind {{ LOCAL_DNS_CACHE }} {{ CLUSTER_DNS_SVC_IP }} + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + } + ip6.arpa:53 { + errors + cache 30 + reload + loop + bind {{ LOCAL_DNS_CACHE }} {{ CLUSTER_DNS_SVC_IP }} + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + } + .:53 { + errors + cache 30 + reload + loop + bind {{ LOCAL_DNS_CACHE }} {{ CLUSTER_DNS_SVC_IP }} + forward . __PILLAR__UPSTREAM__SERVERS__ + prometheus :9253 + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: node-local-dns + namespace: kube-system + labels: + k8s-app: node-local-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 10% + selector: + matchLabels: + k8s-app: node-local-dns + template: + metadata: + labels: + k8s-app: node-local-dns + annotations: + prometheus.io/port: "9253" + prometheus.io/scrape: "true" + spec: + priorityClassName: system-node-critical + serviceAccountName: node-local-dns + hostNetwork: true + dnsPolicy: Default # Don't use cluster DNS. + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + containers: + - name: node-cache + #image: k8s.gcr.io/dns/k8s-dns-node-cache:1.16.0 + image: easzlab/k8s-dns-node-cache:1.16.0 + resources: + requests: + cpu: 25m + memory: 5Mi + args: [ "-localip", "{{ LOCAL_DNS_CACHE }},{{ CLUSTER_DNS_SVC_IP }}", "-conf", "/etc/Corefile", "-upstreamsvc", "kube-dns-upstream" ] + securityContext: + privileged: true + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9253 + name: metrics + protocol: TCP + livenessProbe: + httpGet: + host: {{ LOCAL_DNS_CACHE }} + path: /health + port: 8080 + initialDelaySeconds: 60 + timeoutSeconds: 5 + volumeMounts: + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - name: config-volume + mountPath: /etc/coredns + - name: kube-dns-config + mountPath: /etc/kube-dns + volumes: + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: kube-dns-config + configMap: + name: kube-dns + optional: true + - name: config-volume + configMap: + name: node-local-dns + items: + - key: Corefile + path: Corefile.base +--- +# A headless service is a service with a service IP but instead of load-balancing it will return the IPs of our associated Pods. +# We use this to expose metrics to Prometheus. +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9253" + prometheus.io/scrape: "true" + labels: + k8s-app: node-local-dns + name: node-local-dns + namespace: kube-system +spec: + clusterIP: None + ports: + - name: metrics + port: 9253 + targetPort: 9253 + selector: + k8s-app: node-local-dns diff --git a/roles/cluster-addon/templates/dns/nodelocaldns-ipvs.yaml.j2 b/roles/cluster-addon/templates/dns/nodelocaldns-ipvs.yaml.j2 new file mode 100644 index 0000000..2d9b44e --- /dev/null +++ b/roles/cluster-addon/templates/dns/nodelocaldns-ipvs.yaml.j2 @@ -0,0 +1,210 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-local-dns + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-dns-upstream + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeDNSUpstream" +spec: + ports: + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: dns-tcp + port: 53 + protocol: TCP + targetPort: 53 + selector: + k8s-app: kube-dns +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: node-local-dns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile +data: + Corefile: | + {{ CLUSTER_DNS_DOMAIN }}:53 { + errors + cache { + success 9984 30 + denial 9984 5 + } + reload + loop + bind {{ LOCAL_DNS_CACHE }} + forward . {{ CLUSTER_DNS_SVC_IP }} { + force_tcp + } + prometheus :9253 + health {{ LOCAL_DNS_CACHE }}:8080 + } + in-addr.arpa:53 { + errors + cache 30 + reload + loop + bind {{ LOCAL_DNS_CACHE }} + forward . {{ CLUSTER_DNS_SVC_IP }} { + force_tcp + } + prometheus :9253 + } + ip6.arpa:53 { + errors + cache 30 + reload + loop + bind {{ LOCAL_DNS_CACHE }} + forward . {{ CLUSTER_DNS_SVC_IP }} { + force_tcp + } + prometheus :9253 + } + .:53 { + errors + cache 30 + reload + loop + bind {{ LOCAL_DNS_CACHE }} + forward . __PILLAR__UPSTREAM__SERVERS__ + prometheus :9253 + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: node-local-dns + namespace: kube-system + labels: + k8s-app: node-local-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 10% + selector: + matchLabels: + k8s-app: node-local-dns + template: + metadata: + labels: + k8s-app: node-local-dns + annotations: + prometheus.io/port: "9253" + prometheus.io/scrape: "true" + spec: + priorityClassName: system-node-critical + serviceAccountName: node-local-dns + hostNetwork: true + dnsPolicy: Default # Don't use cluster DNS. + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + containers: + - name: node-cache + #image: k8s.gcr.io/dns/k8s-dns-node-cache:1.16.0 + image: easzlab/k8s-dns-node-cache:1.16.0 + resources: + requests: + cpu: 25m + memory: 5Mi + args: [ "-localip", "{{ LOCAL_DNS_CACHE }}", "-conf", "/etc/Corefile", "-upstreamsvc", "kube-dns-upstream" ] + securityContext: + privileged: true + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9253 + name: metrics + protocol: TCP + livenessProbe: + httpGet: + host: {{ LOCAL_DNS_CACHE }} + path: /health + port: 8080 + initialDelaySeconds: 60 + timeoutSeconds: 5 + volumeMounts: + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - name: config-volume + mountPath: /etc/coredns + - name: kube-dns-config + mountPath: /etc/kube-dns + volumes: + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: kube-dns-config + configMap: + name: kube-dns + optional: true + - name: config-volume + configMap: + name: node-local-dns + items: + - key: Corefile + path: Corefile.base +--- +# A headless service is a service with a service IP but instead of load-balancing it will return the IPs of our associated Pods. +# We use this to expose metrics to Prometheus. +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9253" + prometheus.io/scrape: "true" + labels: + k8s-app: node-local-dns + name: node-local-dns + namespace: kube-system +spec: + clusterIP: None + ports: + - name: metrics + port: 9253 + targetPort: 9253 + selector: + k8s-app: node-local-dns diff --git a/roles/cluster-addon/vars/main.yml b/roles/cluster-addon/vars/main.yml index bed431e..3d7a5b8 100644 --- a/roles/cluster-addon/vars/main.yml +++ b/roles/cluster-addon/vars/main.yml @@ -4,13 +4,14 @@ CLUSTER_DNS_SVC_IP: "{{ SERVICE_CIDR | ipaddr('net') | ipaddr(2) | ipaddr('addre # coredns_offline: "coredns_{{ corednsVer }}.tar" +dnscache_offline: "k8s-dns-node-cache_{{ dnsNodeCacheVer }}.tar" + metricsserver_offline: "metrics-server_{{ metricsVer }}.tar" dashboard_offline: "dashboard_{{ dashboardVer }}.tar" + metricsscraper_offline: "metrics-scraper_{{ dashboardMetricsScraperVer }}.tar" -# ingress 自动安装,可选 "traefik" 和 "nginx-ingress" -#ingress_install: "no" # metallb 自动安装 #metallb_install: "no" diff --git a/roles/kube-node/templates/kubelet-config.yaml.j2 b/roles/kube-node/templates/kubelet-config.yaml.j2 index e370083..51eda90 100644 --- a/roles/kube-node/templates/kubelet-config.yaml.j2 +++ b/roles/kube-node/templates/kubelet-config.yaml.j2 @@ -17,7 +17,11 @@ authorization: cgroupDriver: cgroupfs cgroupsPerQOS: true clusterDNS: +{% if ENABLE_LOCAL_DNS_CACHE %} +- {{ LOCAL_DNS_CACHE }} +{% else %} - {{ CLUSTER_DNS_SVC_IP }} +{% endif %} clusterDomain: {{ CLUSTER_DNS_DOMAIN }} configMapAndSecretChangeDetectionStrategy: Watch containerLogMaxFiles: 3