kubeasz/roles/cluster-addon/templates/coredns.yaml.j2

196 lines
4.2 KiB
Plaintext
Raw Normal View History

# __MACHINE_GENERATED_WARNING__
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
2018-12-11 17:27:34 +08:00
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
errors
health
kubernetes {{ CLUSTER_DNS_DOMAIN }} in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
2019-03-27 20:55:34 +08:00
forward . /etc/resolv.conf
cache 30
2018-10-01 17:39:44 +08:00
loop
2018-06-28 17:38:43 +08:00
reload
2018-10-01 17:39:44 +08:00
loadbalance
}
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
2018-06-28 17:38:43 +08:00
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
2018-06-28 17:38:43 +08:00
k8s-app: kube-dns
template:
metadata:
labels:
2018-06-28 17:38:43 +08:00
k8s-app: kube-dns
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
2019-03-27 20:55:34 +08:00
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
2019-03-27 20:55:34 +08:00
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: coredns
2019-03-27 20:55:34 +08:00
image: coredns/coredns:1.4.0
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
2018-06-28 17:38:43 +08:00
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
2018-06-28 17:38:43 +08:00
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
2019-03-27 20:55:34 +08:00
readinessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
2018-06-28 17:38:43 +08:00
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
2018-06-28 17:38:43 +08:00
name: kube-dns
namespace: kube-system
2018-06-28 17:38:43 +08:00
annotations:
2018-10-01 17:39:44 +08:00
prometheus.io/port: "9153"
2018-06-28 17:38:43 +08:00
prometheus.io/scrape: "true"
labels:
2018-06-28 17:38:43 +08:00
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
selector:
2018-06-28 17:38:43 +08:00
k8s-app: kube-dns
clusterIP: {{ CLUSTER_DNS_SVC_IP }}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
2019-01-24 18:23:25 +08:00
- name: metrics
port: 9153
protocol: TCP