调整cluster-addon安装方式

pull/992/head
gjmzj 2021-01-11 19:02:34 +08:00
parent d5c0873daf
commit a5a99d542e
93 changed files with 289 additions and 7672 deletions

View File

@ -180,6 +180,11 @@ dashboard_install: "yes"
dashboardVer: "__dashboard__"
dashboardMetricsScraperVer: "__dash_metrics__"
# ingress 自动安装
ingress_install: "no"
ingress_backend: "traefik"
traefik_chart_ver: "__traefik_chart__"
# prometheus 自动安装
prom_install: "no"
prom_namespace: "monitor"

2
ezctl
View File

@ -125,6 +125,7 @@ function new() {
dashboardMetricsScraperVer=$(grep 'dashboardMetricsScraperVer=' ezdown|cut -d'=' -f2)
metricsVer=$(grep 'metricsVer=' ezdown|cut -d'=' -f2)
promChartVer=$(grep 'promChartVer=' ezdown|cut -d'=' -f2)
traefikChartVer=$(grep 'traefikChartVer=' ezdown|cut -d'=' -f2)
sed -i -e "s/__flannel__/$flannelVer/g" \
-e "s/__calico__/$calicoVer/g" \
@ -135,6 +136,7 @@ function new() {
-e "s/__dashboard__/$dashboardVer/g" \
-e "s/__dash_metrics__/$dashboardMetricsScraperVer/g" \
-e "s/__prom_chart__/$promChartVer/g" \
-e "s/__traefik_chart__/$traefikChartVer/g" \
-e "s/__metrics__/$metricsVer/g" "clusters/$1/config.yml"

1
ezdown
View File

@ -31,6 +31,7 @@ export ciliumVer=v1.4.1
export kubeRouterVer=v0.3.1
export kubeOvnVer=v1.5.3
export promChartVer=12.10.6
export traefikChartVer=9.12.3
function usage() {
echo -e "\033[33mUsage:\033[0m ezdown [options] [args]"

View File

@ -1,165 +0,0 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: mirrorgooglecontainers/kubernetes-dashboard-amd64:v1.10.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
type: NodePort

View File

@ -1,100 +0,0 @@
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.6 (RBAC enabled).
#
# Example usage: kubectl create -f <this_file>
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
#image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.3
image: mirrorgooglecontainers/kubernetes-dashboard-amd64:v1.6.3
ports:
- containerPort: 9090
protocol: TCP
args:
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
livenessProbe:
httpGet:
path: /
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 80
targetPort: 9090
selector:
k8s-app: kubernetes-dashboard
type: NodePort

View File

@ -1,27 +0,0 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ui-admin
rules:
- apiGroups:
- ""
resources:
- services
- services/proxy
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ui-admin-binding
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ui-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: admin

View File

@ -1,29 +0,0 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ui-read
rules:
- apiGroups:
- ""
resources:
- services
- services/proxy
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ui-read-binding
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ui-read
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: readonly

View File

@ -1,71 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: monitoring-grafana
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: grafana
template:
metadata:
labels:
task: monitoring
k8s-app: grafana
spec:
containers:
- name: grafana
#image: gcr.io/google_containers/heapster-grafana-amd64:v4.4.3
image: mirrorgooglecontainers/heapster-grafana-amd64:v4.4.3
ports:
- containerPort: 3000
protocol: TCP
volumeMounts:
- mountPath: /var
name: grafana-storage
env:
- name: INFLUXDB_HOST
value: monitoring-influxdb
- name: GF_SERVER_HTTP_PORT
value: "3000"
# The following env variables are required to make Grafana accessible via
# the kubernetes api-server proxy. On production clusters, we recommend
# removing these env variables, setup auth for grafana, and expose the grafana
# service using a LoadBalancer or a public IP.
- name: GF_AUTH_BASIC_ENABLED
value: "false"
- name: GF_AUTH_ANONYMOUS_ENABLED
value: "true"
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
value: Admin
- name: GF_SERVER_ROOT_URL
# If you're only using the API Server proxy, set this value instead:
value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy/
#value: /
volumes:
- name: grafana-storage
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
labels:
# For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
# If you are NOT using this as an addon, you should comment out this line.
kubernetes.io/cluster-service: 'true'
kubernetes.io/name: monitoring-grafana
name: monitoring-grafana
namespace: kube-system
spec:
# In a production setup, we recommend accessing Grafana through an external Loadbalancer
# or through a public IP.
# type: LoadBalancer
# You could also use NodePort to expose the service at a randomly-generated port
# type: NodePort
ports:
- port: 80
targetPort: 3000
selector:
k8s-app: grafana

View File

@ -1,74 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: heapster
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: heapster
subjects:
- kind: ServiceAccount
name: heapster
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:heapster
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: heapster
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
template:
metadata:
labels:
task: monitoring
k8s-app: heapster
spec:
serviceAccountName: heapster
containers:
- name: heapster
#image: gcr.io/google_containers/heapster-amd64:v1.5.4
image: mirrorgooglecontainers/heapster-amd64:v1.5.4
imagePullPolicy: IfNotPresent
command:
- /heapster
#- --source=kubernetes:https://kubernetes.default
- --source=kubernetes.summary_api:''
#- --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086
livenessProbe:
httpGet:
path: /healthz
port: 8082
scheme: HTTP
initialDelaySeconds: 180
timeoutSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
labels:
task: monitoring
# For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
# If you are NOT using this as an addon, you should comment out this line.
#kubernetes.io/cluster-service: 'true'
kubernetes.io/name: Heapster
name: heapster
namespace: kube-system
spec:
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster

View File

@ -1,74 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: heapster
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: heapster
subjects:
- kind: ServiceAccount
name: heapster
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:heapster
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: heapster
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
template:
metadata:
labels:
task: monitoring
k8s-app: heapster
spec:
serviceAccountName: heapster
containers:
- name: heapster
#image: gcr.io/google_containers/heapster-amd64:v1.5.4
image: mirrorgooglecontainers/heapster-amd64:v1.5.4
imagePullPolicy: IfNotPresent
command:
- /heapster
#- --source=kubernetes:https://kubernetes.default
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086
livenessProbe:
httpGet:
path: /healthz
port: 8082
scheme: HTTP
initialDelaySeconds: 180
timeoutSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
labels:
task: monitoring
# For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
# If you are NOT using this as an addon, you should comment out this line.
#kubernetes.io/cluster-service: 'true'
kubernetes.io/name: Heapster
name: heapster
namespace: kube-system
spec:
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster

View File

@ -1,190 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: monitoring-influxdb
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: influxdb
template:
metadata:
labels:
task: monitoring
k8s-app: influxdb
spec:
containers:
- name: influxdb
#image: gcr.io/google_containers/heapster-influxdb-amd64:v1.1.1
image: mirrorgooglecontainers/heapster-influxdb-amd64:v1.1.1
volumeMounts:
- mountPath: /data
name: influxdb-storage
- mountPath: /etc/
name: influxdb-config
volumes:
- name: influxdb-storage
emptyDir: {}
- name: influxdb-config
configMap:
name: influxdb-config
---
apiVersion: v1
kind: Service
metadata:
labels:
task: monitoring
# For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
# If you are NOT using this as an addon, you should comment out this line.
# kubernetes.io/cluster-service: 'true'
kubernetes.io/name: monitoring-influxdb
name: monitoring-influxdb
namespace: kube-system
spec:
type: NodePort
ports:
- port: 8086
targetPort: 8086
name: http
- port: 8083
targetPort: 8083
name: admin
selector:
k8s-app: influxdb
---
apiVersion: v1
kind: ConfigMap
metadata:
name: influxdb-config
namespace: kube-system
data:
config.toml: |
reporting-disabled = true
bind-address = ":8088"
[meta]
dir = "/data/meta"
retention-autocreate = true
logging-enabled = true
[data]
dir = "/data/data"
wal-dir = "/data/wal"
query-log-enabled = true
cache-max-memory-size = 1073741824
cache-snapshot-memory-size = 26214400
cache-snapshot-write-cold-duration = "10m0s"
compact-full-write-cold-duration = "4h0m0s"
max-series-per-database = 1000000
max-values-per-tag = 100000
trace-logging-enabled = false
[coordinator]
write-timeout = "10s"
max-concurrent-queries = 0
query-timeout = "0s"
log-queries-after = "0s"
max-select-point = 0
max-select-series = 0
max-select-buckets = 0
[retention]
enabled = true
check-interval = "30m0s"
[admin]
enabled = true
bind-address = ":8083"
https-enabled = false
https-certificate = "/etc/ssl/influxdb.pem"
[shard-precreation]
enabled = true
check-interval = "10m0s"
advance-period = "30m0s"
[monitor]
store-enabled = true
store-database = "_internal"
store-interval = "10s"
[subscriber]
enabled = true
http-timeout = "30s"
insecure-skip-verify = false
ca-certs = ""
write-concurrency = 40
write-buffer-size = 1000
[http]
enabled = true
bind-address = ":8086"
auth-enabled = false
log-enabled = true
write-tracing = false
pprof-enabled = false
https-enabled = false
https-certificate = "/etc/ssl/influxdb.pem"
https-private-key = ""
max-row-limit = 10000
max-connection-limit = 0
shared-secret = ""
realm = "InfluxDB"
unix-socket-enabled = false
bind-socket = "/var/run/influxdb.sock"
[[graphite]]
enabled = false
bind-address = ":2003"
database = "graphite"
retention-policy = ""
protocol = "tcp"
batch-size = 5000
batch-pending = 10
batch-timeout = "1s"
consistency-level = "one"
separator = "."
udp-read-buffer = 0
[[collectd]]
enabled = false
bind-address = ":25826"
database = "collectd"
retention-policy = ""
batch-size = 5000
batch-pending = 10
batch-timeout = "10s"
read-buffer = 0
typesdb = "/usr/share/collectd/types.db"
[[opentsdb]]
enabled = false
bind-address = ":4242"
database = "opentsdb"
retention-policy = ""
consistency-level = "one"
tls-enabled = false
certificate = "/etc/ssl/influxdb.pem"
batch-size = 1000
batch-pending = 5
batch-timeout = "1s"
log-point-errors = true
[[udp]]
enabled = false
bind-address = ":8089"
database = "udp"
retention-policy = ""
batch-size = 5000
batch-pending = 10
read-buffer = 0
batch-timeout = "1s"
precision = ""
[continuous_queries]
log-enabled = true
enabled = true
run-interval = "1s"

View File

@ -1,17 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-influxdb
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteMany
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Recycle
storageClassName: slow
nfs:
# 根据实际共享目录修改
path: /share
# 根据实际 nfs服务器地址修改
server: 192.168.1.208

View File

@ -1,19 +0,0 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: influxdb-claim
namespace: kube-system
spec:
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 3Gi
storageClassName: slow
#selector:
# matchLabels:
# release: "stable"
# matchExpressions:
# - {key: environment, operator: In, values: [dev]}

View File

@ -1,47 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: monitoring-influxdb
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: influxdb
template:
metadata:
labels:
task: monitoring
k8s-app: influxdb
spec:
containers:
- name: influxdb
#image: gcr.io/google_containers/heapster-influxdb-amd64:v1.5.2
image: mirrorgooglecontainers/heapster-influxdb-amd64:v1.5.2
volumeMounts:
- mountPath: /data
name: influxdb-storage
volumes:
- name: influxdb-storage
persistentVolumeClaim:
claimName: influxdb-claim
---
apiVersion: v1
kind: Service
metadata:
labels:
task: monitoring
# For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
# If you are NOT using this as an addon, you should comment out this line.
# kubernetes.io/cluster-service: 'true'
kubernetes.io/name: monitoring-influxdb
name: monitoring-influxdb
namespace: kube-system
spec:
ports:
- port: 8086
targetPort: 8086
name: http
selector:
k8s-app: influxdb

View File

@ -1,48 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: monitoring-influxdb
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: influxdb
template:
metadata:
labels:
task: monitoring
k8s-app: influxdb
spec:
containers:
- name: influxdb
#image: gcr.io/google_containers/heapster-influxdb-amd64:v1.5.2
image: mirrorgooglecontainers/heapster-influxdb-amd64:v1.5.2
volumeMounts:
- mountPath: /data
name: influxdb-storage
volumes:
- name: influxdb-storage
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
labels:
task: monitoring
# For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
# If you are NOT using this as an addon, you should comment out this line.
# kubernetes.io/cluster-service: 'true'
kubernetes.io/name: monitoring-influxdb
name: monitoring-influxdb
namespace: kube-system
spec:
ports:
- port: 8086
targetPort: 8086
name: http
selector:
k8s-app: influxdb
---

File diff suppressed because it is too large Load Diff

View File

@ -1,32 +0,0 @@
service:
type: NodePort
nodePort: 39002
adminUser: admin
adminPassword: admin
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: MYDS_Prometheus
type: prometheus
#url: http:// + 集群里prometheus-server的服务名
#可以用 kubectl get svc --all-namespaces |grep prometheus-server查看
url: http://monitor-prometheus-server
access: proxy
isDefault: true
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards

View File

@ -1,15 +0,0 @@
name: grafana
version: 1.16.0
appVersion: 5.2.4
kubeVersion: "^1.8.0-0"
description: The leading tool for querying and visualizing time series and metrics.
home: https://grafana.net
icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
sources:
- https://github.com/grafana/grafana
maintainers:
- name: zanhsieh
email: zanhsieh@gmail.com
- name: rtluckie
email: rluckie@cisco.com
engine: gotpl

View File

@ -1,162 +0,0 @@
# Grafana Helm Chart
* Installs the web dashboarding system [Grafana](http://grafana.org/)
## TL;DR;
```console
$ helm install stable/grafana
```
## Installing the Chart
To install the chart with the release name `my-release`:
```console
$ helm install --name my-release stable/grafana
```
## Uninstalling the Chart
To uninstall/delete the my-release deployment:
```console
$ helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
| Parameter | Description | Default |
|---------------------------------|-----------------------------------------------|---------------------------------------------------------|
| `replicas` | Number of nodes | `1` |
| `deploymentStrategy` | Deployment strategy | `RollingUpdate` |
| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }` |
| `readinessProbe` | Rediness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10, "periodSeconds": 10 }` |
| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "fsGroup": 472}` |
| `image.repository` | Image repository | `grafana/grafana` |
| `image.tag` | Image tag. (`Must be >= 5.0.0`) | `5.2.4` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `service.type` | Kubernetes service type | `ClusterIP` |
| `service.port` | Kubernetes port where service is exposed | `80` |
| `service.annotations` | Service annotations | `{}` |
| `service.labels` | Custom labels | `{}` |
| `ingress.enabled` | Enables Ingress | `false` |
| `ingress.annotations` | Ingress annotations | `{}` |
| `ingress.labels` | Custom labels | `{}` |
| `ingress.hosts` | Ingress accepted hostnames | `[]` |
| `ingress.tls` | Ingress TLS configuration | `[]` |
| `resources` | CPU/Memory resource requests/limits | `{}` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `tolerations` | Toleration labels for pod assignment | `[]` |
| `affinity` | Affinity settings for pod assignment | `{}` |
| `persistence.enabled` | Use persistent volume to store data | `false` |
| `persistence.size` | Size of persistent volume claim | `10Gi` |
| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` |
| `persistence.storageClassName` | Type of persistent volume claim | `nil` |
| `persistence.accessModes` | Persistence access modes | `[]` |
| `persistence.subPath` | Mount a sub dir of the persistent volume | `""` |
| `schedulerName` | Alternate scheduler name | `nil` |
| `env` | Extra environment variables passed to pods | `{}` |
| `envFromSecret` | Name of a Kubenretes secret (must be manually created in the same namespace) containing values to be added to the environment | `""` |
| `extraSecretMounts` | Additional grafana server secret mounts | `[]` |
| `plugins` | Plugins to be loaded along with Grafana | `[]` |
| `datasources` | Configure grafana datasources | `{}` |
| `dashboardProviders` | Configure grafana dashboard providers | `{}` |
| `dashboards` | Dashboards to import | `{}` |
| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` |
| `grafana.ini` | Grafana's primary configuration | `{}` |
| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` |
| `ldap.config ` | Grafana's LDAP configuration | `""` |
| `annotations` | Deployment annotations | `{}` |
| `podAnnotations` | Pod annotations | `{}` |
| `sidecar.dashboards.enabled` | Enabled the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` |
| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `false` |
| `sidecar.datasources.enabled` | Enabled the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` |
| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `false` |
| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials, this must have the keys `user` and `password`. | `""` |
## Sidecar for dashboards
If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana pod. This container watches all config maps in the cluster and filters out the ones with a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported dashboards are deleted/updated. A recommendation is to use one configmap per dashboard, as an reduction of multiple dashboards inside one configmap is currently not properly mirrored in grafana.
Example dashboard config:
```
apiVersion: v1
kind: ConfigMap
metadata:
name: sample-grafana-dashboard
labels:
grafana_dashboard: 1
data:
k8s-dashboard.json: |-
[...]
```
## Sidecar for datasources
If the parameter `sidecar.datasource.enabled` is set, a sidecar container is deployed in the grafana pod. This container watches all config maps in the cluster and filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in those configmaps are written to a folder and accessed by grafana on startup. Using these yaml files, the data sources in grafana can be modified.
Example datasource config adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file):
```
apiVersion: v1
kind: ConfigMap
metadata:
name: sample-grafana-datasource
labels:
grafana_datasource: 1
data:
datasource.yaml: |-
# config file version
apiVersion: 1
# list of datasources that should be deleted from the database
deleteDatasources:
- name: Graphite
orgId: 1
# list of datasources to insert/update depending
# whats available in the database
datasources:
# <string, required> name of the datasource. Required
- name: Graphite
# <string, required> datasource type. Required
type: graphite
# <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
access: proxy
# <int> org id. will default to orgId 1 if not specified
orgId: 1
# <string> url
url: http://localhost:8080
# <string> database password, if used
password:
# <string> database user, if used
user:
# <string> database name, if used
database:
# <bool> enable/disable basic auth
basicAuth:
# <string> basic auth username
basicAuthUser:
# <string> basic auth password
basicAuthPassword:
# <bool> enable/disable with credentials headers
withCredentials:
# <bool> mark as default datasource. Max one per org
isDefault:
# <map> fields that will be converted to json and stored in json_data
jsonData:
graphiteVersion: "1.1"
tlsAuth: true
tlsAuthWithCACert: true
# <string> json object of data that will be encrypted.
secureJsonData:
tlsCACert: "..."
tlsClientCert: "..."
tlsClientKey: "..."
version: 1
# <bool> allow users to edit datasources from the UI.
editable: false
```

View File

@ -1,37 +0,0 @@
1. Get your '{{ .Values.adminUser }}' user password by running:
kubectl get secret --namespace {{ .Release.Namespace }} {{ template "grafana.fullname" . }} -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster:
{{ template "grafana.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
{{ if .Values.ingress.enabled }}
From outside the cluster, the server URL(s) are:
{{- range .Values.ingress.hosts }}
http://{{ . }}
{{- end }}
{{ else }}
Get the Grafana URL to visit by running these commands in the same shell:
{{ if contains "NodePort" .Values.service.type -}}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{ else if contains "LoadBalancer" .Values.service.type -}}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "grafana.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
http://$SERVICE_IP:{{ .Values.service.port -}}
{{ else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "grafana.fullname" . }},component={{ .Values.name }}" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 3000
{{- end }}
{{- end }}
3. Login with the password from step 1 and the username: {{ .Values.adminUser }}
{{- if not .Values.persistence.enabled }}
#################################################################################
###### WARNING: Persistence is disabled!!! You will lose your data when #####
###### the Grafana pod is terminated. #####
#################################################################################
{{- end }}

View File

@ -1,43 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "grafana.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "grafana.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "grafana.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account
*/}}
{{- define "grafana.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "grafana.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@ -1,23 +0,0 @@
{{- if .Values.rbac.create }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
app: {{ template "grafana.name" . }}
chart: {{ template "grafana.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
name: {{ template "grafana.fullname" . }}-clusterrole
{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }}
rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["configmaps"]
verbs: ["get", "watch", "list"]
{{- else }}
rules: []
{{- end}}
{{- end}}

View File

@ -1,23 +0,0 @@
{{- if .Values.rbac.create }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "grafana.fullname" . }}-clusterrolebinding
labels:
app: {{ template "grafana.name" . }}
chart: {{ template "grafana.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ template "grafana.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ template "grafana.fullname" . }}-clusterrole
apiGroup: rbac.authorization.k8s.io
{{- end}}

View File

@ -1,26 +0,0 @@
{{- if .Values.sidecar.dashboards.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app: {{ template "grafana.name" . }}
chart: {{ template "grafana.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
name: {{ template "grafana.fullname" . }}-config-dashboards
data:
provider.yaml: |-
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
options:
path: {{ .Values.sidecar.dashboards.folder }}
{{- end}}

View File

@ -1,61 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "grafana.fullname" . }}
labels:
app: {{ template "grafana.name" . }}
chart: {{ template "grafana.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
{{- if .Values.plugins }}
plugins: {{ join "," .Values.plugins }}
{{- end }}
grafana.ini: |
{{- range $key, $value := index .Values "grafana.ini" }}
[{{ $key }}]
{{- range $elem, $elemVal := $value }}
{{ $elem }} = {{ $elemVal }}
{{- end }}
{{- end }}
{{- if .Values.datasources }}
{{- range $key, $value := .Values.datasources }}
{{ $key }}: |
{{ toYaml $value | indent 4 }}
{{- end -}}
{{- end -}}
{{- if .Values.dashboardProviders }}
{{- range $key, $value := .Values.dashboardProviders }}
{{ $key }}: |
{{ toYaml $value | indent 4 }}
{{- end -}}
{{- end -}}
{{- if .Values.dashboards }}
download_dashboards.sh: |
#!/usr/bin/env sh
set -euf
{{- if .Values.dashboardProviders }}
{{- range $key, $value := .Values.dashboardProviders }}
{{- range $value.providers }}
mkdir -p {{ .options.path }}
{{- end }}
{{- end }}
{{- end }}
{{- range $provider, $dashboards := .Values.dashboards }}
{{- range $key, $value := $dashboards }}
{{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }}
curl -sk \
--connect-timeout 60 \
--max-time 60 \
-H "Accept: application/json" \
-H "Content-Type: application/json;charset=UTF-8" \
{{- if $value.url -}}{{ $value.url }}{{- else -}} https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download{{- end -}}{{ if $value.datasource }}| sed 's|\"datasource\":[^,]*|\"datasource\": \"{{ $value.datasource }}\"|g'{{ end }} \
> /var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,22 +0,0 @@
{{- if .Values.dashboards }}
{{- range $provider, $dashboards := .Values.dashboards }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "grafana.fullname" $ }}-dashboards-{{ $provider }}
labels:
app: {{ template "grafana.name" $ }}
chart: {{ template "grafana.chart" $ }}
release: {{ $.Release.Name }}
heritage: {{ $.Release.Service }}
dashboard-provider: {{ $provider }}
data:
{{- range $key, $value := $dashboards }}
{{- if hasKey $value "json" }}
{{ $key }}.json: |
{{ $value.json | indent 4 }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,270 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "grafana.fullname" . }}
labels:
app: {{ template "grafana.name" . }}
chart: {{ template "grafana.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
replicas: {{ .Values.replicas }}
selector:
matchLabels:
app: {{ template "grafana.name" . }}
release: {{ .Release.Name }}
strategy:
type: {{ .Values.deploymentStrategy }}
{{- if ne .Values.deploymentStrategy "RollingUpdate" }}
rollingUpdate: null
{{- end }}
template:
metadata:
labels:
app: {{ template "grafana.name" . }}
release: {{ .Release.Name }}
{{- with .Values.podAnnotations }}
annotations:
{{ toYaml . | indent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "grafana.serviceAccountName" . }}
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
{{- if .Values.securityContext }}
securityContext:
{{ toYaml .Values.securityContext | indent 8 }}
{{- end }}
{{- if .Values.dashboards }}
initContainers:
- name: download-dashboards
image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}"
imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }}
command: ["sh", "/etc/grafana/download_dashboards.sh"]
volumeMounts:
- name: config
mountPath: "/etc/grafana/download_dashboards.sh"
subPath: download_dashboards.sh
- name: storage
mountPath: "/var/lib/grafana"
subPath: {{ .Values.persistence.subPath }}
{{- range .Values.extraSecretMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- end }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
{{- end}}
{{- end }}
containers:
{{- if .Values.sidecar.dashboards.enabled }}
- name: {{ template "grafana.name" . }}-sc-dashboard
image: "{{ .Values.sidecar.image }}"
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
env:
- name: LABEL
value: "{{ .Values.sidecar.dashboards.label }}"
- name: FOLDER
value: "{{ .Values.sidecar.dashboards.folder }}"
resources:
{{ toYaml .Values.sidecar.resources | indent 12 }}
volumeMounts:
- name: sc-dashboard-volume
mountPath: {{ .Values.sidecar.dashboards.folder | quote }}
{{- end}}
{{- if .Values.sidecar.datasources.enabled }}
- name: {{ template "grafana.name" . }}-sc-datasources
image: "{{ .Values.sidecar.image }}"
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
env:
- name: LABEL
value: "{{ .Values.sidecar.datasources.label }}"
- name: FOLDER
value: "/etc/grafana/provisioning/datasources"
resources:
{{ toYaml .Values.sidecar.resources | indent 12 }}
volumeMounts:
- name: sc-datasources-volume
mountPath: "/etc/grafana/provisioning/datasources"
{{- end}}
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
volumeMounts:
- name: config
mountPath: "/etc/grafana/grafana.ini"
subPath: grafana.ini
- name: ldap
mountPath: "/etc/grafana/ldap.toml"
subPath: ldap.toml
{{- if .Values.dashboards }}
{{- range $provider, $dashboards := .Values.dashboards }}
{{- range $key, $value := $dashboards }}
{{- if hasKey $value "json" }}
- name: dashboards-{{ $provider }}
mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json"
subPath: "{{ $key }}.json"
{{- end }}
{{- end }}
{{- end }}
{{- end -}}
{{- if .Values.dashboardsConfigMaps }}
{{- range keys .Values.dashboardsConfigMaps }}
- name: dashboards-{{ . }}
mountPath: "/var/lib/grafana/dashboards/{{ . }}"
{{- end }}
{{- end }}
{{- if .Values.datasources }}
- name: config
mountPath: "/etc/grafana/provisioning/datasources/datasources.yaml"
subPath: datasources.yaml
{{- end }}
{{- if .Values.dashboardProviders }}
- name: config
mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml"
subPath: dashboardproviders.yaml
{{- end }}
{{- if .Values.sidecar.dashboards.enabled }}
- name: sc-dashboard-volume
mountPath: {{ .Values.sidecar.dashboards.folder | quote }}
- name: sc-dashboard-provider
mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml"
subPath: provider.yaml
{{- end}}
{{- if .Values.sidecar.datasources.enabled }}
- name: sc-datasources-volume
mountPath: "/etc/grafana/provisioning/datasources"
{{- end}}
- name: storage
mountPath: "/var/lib/grafana"
subPath: {{ .Values.persistence.subPath }}
{{- range .Values.extraSecretMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- end }}
ports:
- name: service
containerPort: {{ .Values.service.port }}
protocol: TCP
- name: grafana
containerPort: 3000
protocol: TCP
env:
- name: GF_SECURITY_ADMIN_USER
valueFrom:
secretKeyRef:
name: {{ template "grafana.fullname" . }}
key: admin-user
- name: GF_SECURITY_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "grafana.fullname" . }}
key: admin-password
{{- if .Values.plugins }}
- name: GF_INSTALL_PLUGINS
valueFrom:
configMapKeyRef:
name: {{ template "grafana.fullname" . }}
key: plugins
{{- end }}
{{- if .Values.smtp.existingSecret }}
- name: GF_SMTP_USER
valueFrom:
secretKeyRef:
name: {{ .Values.smtp.existingSecret }}
key: user
- name: GF_SMTP_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.smtp.existingSecret }}
key: password
{{- end }}
{{- range $key, $value := .Values.env }}
- name: "{{ $key }}"
value: "{{ $value }}"
{{- end }}
{{- if .Values.envFromSecret }}
envFrom:
- secretRef:
name: {{ .Values.envFromSecret }}
{{- end }}
livenessProbe:
{{ toYaml .Values.livenessProbe | indent 12 }}
readinessProbe:
{{ toYaml .Values.readinessProbe | indent 12 }}
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ template "grafana.fullname" . }}
{{- if .Values.dashboards }}
{{- range keys .Values.dashboards }}
- name: dashboards-{{ . }}
configMap:
name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }}
{{- end }}
{{- end }}
{{- if .Values.dashboardsConfigMaps }}
{{- range $provider, $name := .Values.dashboardsConfigMaps }}
- name: dashboards-{{ $provider }}
configMap:
name: {{ $name }}
{{- end }}
{{- end }}
- name: ldap
secret:
{{- if .Values.ldap.existingSecret }}
secretName: {{ .Values.ldap.existingSecret }}
{{- else }}
secretName: {{ template "grafana.fullname" . }}
{{- end }}
items:
- key: ldap-toml
path: ldap.toml
- name: storage
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }}
{{- else }}
emptyDir: {}
{{- end -}}
{{- if .Values.sidecar.dashboards.enabled }}
- name: sc-dashboard-volume
emptyDir: {}
- name: sc-dashboard-provider
configMap:
name: {{ template "grafana.fullname" . }}-config-dashboards
{{- end }}
{{- if .Values.sidecar.datasources.enabled }}
- name: sc-datasources-volume
emptyDir: {}
{{- end -}}
{{- range .Values.extraSecretMounts }}
- name: {{ .name }}
secret:
secretName: {{ .secretName }}
defaultMode: {{ .defaultMode }}
{{- end }}

View File

@ -1,42 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "grafana.fullname" . -}}
{{- $servicePort := .Values.service.port -}}
{{- $ingressPath := .Values.ingress.path -}}
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
app: {{ template "grafana.name" . }}
chart: {{ template "grafana.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.ingress.labels }}
{{ toYaml .Values.ingress.labels | indent 4 }}
{{- end }}
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ . }}
http:
paths:
- path: {{ $ingressPath }}
backend:
serviceName: {{ $fullName }}
servicePort: {{ $servicePort }}
{{- end }}
{{- end }}

View File

@ -1,40 +0,0 @@
{{- if .Values.rbac.pspEnabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "grafana.fullname" . }}
labels:
app: {{ template "grafana.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false
{{- end }}

View File

@ -1,24 +0,0 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ template "grafana.fullname" . }}
labels:
app: {{ template "grafana.name" . }}
chart: {{ template "grafana.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.persistence.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
accessModes:
{{- range .Values.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
storageClassName: {{ .Values.persistence.storageClassName }}
{{- end -}}

View File

@ -1,18 +0,0 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: {{ template "grafana.fullname" . }}
labels:
app: {{ template "grafana.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
{{- if .Values.rbac.pspEnabled }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: [{{ template "grafana.fullname" . }}]
{{- end }}
{{- end }}

View File

@ -1,18 +0,0 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: {{ template "grafana.fullname" . }}
labels:
app: {{ template "grafana.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "grafana.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "grafana.serviceAccountName" . }}
{{- end -}}

View File

@ -1,20 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ template "grafana.fullname" . }}
labels:
app: {{ template "grafana.name" . }}
chart: {{ template "grafana.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
type: Opaque
data:
admin-user: {{ .Values.adminUser | b64enc | quote }}
{{- if .Values.adminPassword }}
admin-password: {{ .Values.adminPassword | b64enc | quote }}
{{- else }}
admin-password: {{ randAlphaNum 40 | b64enc | quote }}
{{- end }}
{{- if not .Values.ldap.existingSecret }}
ldap-toml: {{ .Values.ldap.config | b64enc | quote }}
{{- end }}

View File

@ -1,49 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "grafana.fullname" . }}
labels:
app: {{ template "grafana.name" . }}
chart: {{ template "grafana.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.service.labels }}
{{ toYaml .Values.service.labels | indent 4 }}
{{- end }}
{{- with .Values.service.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
type: ClusterIP
{{- if .Values.service.clusterIP }}
clusterIP: {{ .Values.service.clusterIP }}
{{end}}
{{- else if eq .Values.service.type "LoadBalancer" }}
type: {{ .Values.service.type }}
{{- if .Values.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- if .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
{{- end -}}
{{- else }}
type: {{ .Values.service.type }}
{{- end }}
{{- if .Values.service.externalIPs }}
externalIPs:
{{ toYaml .Values.service.externalIPs | indent 4 }}
{{- end }}
ports:
- name: service
port: {{ .Values.service.port }}
protocol: TCP
targetPort: 3000
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
nodePort: {{.Values.service.nodePort}}
{{ end }}
selector:
app: {{ template "grafana.name" . }}
release: {{ .Release.Name }}

View File

@ -1,11 +0,0 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: {{ template "grafana.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "grafana.serviceAccountName" . }}
{{- end }}

View File

@ -1,276 +0,0 @@
rbac:
create: true
pspEnabled: true
serviceAccount:
create: true
name:
replicas: 1
deploymentStrategy: RollingUpdate
livenessProbe:
httpGet:
path: /api/health
port: 3000
readinessProbe:
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 60
timeoutSeconds: 30
failureThreshold: 10
periodSeconds: 10
image:
repository: grafana/grafana
tag: 5.2.4
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistrKeySecretName
securityContext:
runAsUser: 472
fsGroup: 472
downloadDashboardsImage:
repository: appropriate/curl
tag: latest
pullPolicy: IfNotPresent
## Pod Annotations
# podAnnotations: {}
## Deployment annotations
# annotations: {}
## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
type: ClusterIP
port: 80
annotations: {}
labels: {}
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Affinity for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: false
# storageClassName: default
# accessModes:
# - ReadWriteOnce
# size: 10Gi
# annotations: {}
# subPath: ""
# existingClaim:
adminUser: admin
# adminPassword: strongpassword
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Extra environment variables that will be pass onto deployment pods
env: {}
## The name of a secret in the same kubernetes namespace which contain values to be added to the environment
## This can be useful for auth tokens, etc
envFromSecret: ""
## Additional grafana server secret mounts
# Defines additional mounts with secrets. Secrets must be manually created in the namespace.
extraSecretMounts: []
# - name: secret-files
# mountPath: /etc/secrets
# secretName: grafana-secret-files
# readOnly: true
## Pass the plugins you want installed as a list.
##
plugins: []
# - digrich-bubblechart-panel
# - grafana-clock-panel
## Configure grafana datasources
## ref: http://docs.grafana.org/administration/provisioning/#datasources
##
datasources: {}
# datasources.yaml:
# apiVersion: 1
# datasources:
# - name: Prometheus
# type: prometheus
# url: http://prometheus-prometheus-server
# access: proxy
# isDefault: true
## Configure grafana dashboard providers
## ref: http://docs.grafana.org/administration/provisioning/#dashboards
##
## `path` must be /var/lib/grafana/dashboards/<provider_name>
##
dashboardProviders: {}
# dashboardproviders.yaml:
# apiVersion: 1
# providers:
# - name: 'default'
# orgId: 1
# folder: ''
# type: file
# disableDeletion: false
# editable: true
# options:
# path: /var/lib/grafana/dashboards/default
## Configure grafana dashboard to import
## NOTE: To use dashboards you must also enable/configure dashboardProviders
## ref: https://grafana.com/dashboards
##
## dashboards per provider, use provider name as key.
##
dashboards: {}
# default:
# some-dashboard:
# json: |
# $RAW_JSON
# prometheus-stats:
# gnetId: 2
# revision: 2
# datasource: Prometheus
# local-dashboard:
# url: https://example.com/repository/test.json
## Reference to external ConfigMap per provider. Use provider name as key and ConfiMap name as value.
## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.
## ConfigMap data example:
##
## data:
## example-dashboard.json: |
## RAW_JSON
##
dashboardsConfigMaps: {}
# default: ""
## Grafana's primary configuration
## NOTE: values in map will be converted to ini format
## ref: http://docs.grafana.org/installation/configuration/
##
grafana.ini:
paths:
data: /var/lib/grafana/data
logs: /var/log/grafana
plugins: /var/lib/grafana/plugins
provisioning: /etc/grafana/provisioning
analytics:
check_for_updates: true
log:
mode: console
grafana_net:
url: https://grafana.net
## LDAP Authentication can be enabled with the following values on grafana.ini
## NOTE: Grafana will fail to start if the value for ldap.toml is invalid
# auth.ldap:
# enabled: true
# allow_sign_up: true
# config_file: /etc/grafana/ldap.toml
## Grafana's LDAP configuration
## Templated by the template in _helpers.tpl
## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled
## ref: http://docs.grafana.org/installation/configuration/#auth-ldap
## ref: http://docs.grafana.org/installation/ldap/#configuration
ldap:
# `existingSecret` is a reference to an existing secret containing the ldap configuration
# for Grafana in a key `ldap-toml`.
existingSecret: ""
# `config` is the content of `ldap.toml` that will be stored in the created secret
config: ""
# config: |-
# verbose_logging = true
# [[servers]]
# host = "my-ldap-server"
# port = 636
# use_ssl = true
# start_tls = false
# ssl_skip_verify = false
# bind_dn = "uid=%s,ou=users,dc=myorg,dc=com"
## Grafana's SMTP configuration
## NOTE: To enable, grafana.ini must be configured with smtp.enabled
## ref: http://docs.grafana.org/installation/configuration/#smtp
smtp:
# `existingSecret` is a reference to an existing secret containing the smtp configuration
# for Grafana in keys `user` and `password`.
existingSecret: ""
## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
sidecar:
image: kiwigrid/k8s-sidecar:0.0.3
imagePullPolicy: IfNotPresent
resources:
# limits:
# cpu: 100m
# memory: 100Mi
# requests:
# cpu: 50m
# memory: 50Mi
dashboards:
enabled: false
# label that the configmaps with dashboards are marked with
label: grafana_dashboard
# folder in the pod that should hold the collected dashboards
folder: /tmp/dashboards
datasources:
enabled: false
# label that the configmaps with datasources are marked with
label: grafana_datasource

View File

@ -1,21 +0,0 @@
serverFiles:
alerts:
groups:
- name: k8s_alert_rules
rules:
# ALERT when container memory usage exceed 90%
- alert: container_mem_over_90
expr: (sum(container_memory_working_set_bytes{image!="",name=~"^k8s_.*", pod_name!=""}) by (pod_name)) / (sum (container_spec_memory_limit_bytes{image!="",name=~"^k8s_.*", pod_name!=""}) by (pod_name)) > 0.9 and (sum(container_memory_working_set_bytes{image!="",name=~"^k8s_.*", pod_name!=""}) by (pod_name)) / (sum (container_spec_memory_limit_bytes{image!="",name=~"^k8s_.*", pod_name!=""}) by (pod_name)) < 2
for: 2m
annotations:
summary: "{{ $labels.pod_name }}'s memory usage alert"
description: "Memory Usage of Pod {{ $labels.pod_name }} on {{ $labels.kubernetes_io_hostname }} has exceeded 90% for more than 2 minutes."
# ALERT when node is down
- alert: node_down
expr: up == 0
for: 60s
annotations:
summary: "Node {{ $labels.kubernetes_io_hostname }} is down"
description: "Node {{ $labels.kubernetes_io_hostname }} is down"

View File

@ -1,20 +0,0 @@
alertmanager:
persistentVolume:
enabled: false
service:
type: NodePort
nodePort: 39001
server:
persistentVolume:
enabled: false
service:
type: NodePort
nodePort: 39000
pushgateway:
enabled: false
kubeStateMetrics:
image:
repository: mirrorgooglecontainers/kube-state-metrics

View File

@ -1,21 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -1,19 +0,0 @@
name: prometheus
version: 7.1.4
appVersion: 2.4.3
description: Prometheus is a monitoring system and time series database.
home: https://prometheus.io/
icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
sources:
- https://github.com/prometheus/alertmanager
- https://github.com/prometheus/prometheus
- https://github.com/prometheus/pushgateway
- https://github.com/prometheus/node_exporter
- https://github.com/kubernetes/kube-state-metrics
maintainers:
- name: mgoodness
email: mgoodness@gmail.com
- name: gianrubio
email: gianrubio@gmail.com
engine: gotpl
tillerVersion: ">=2.8.0"

View File

@ -1,6 +0,0 @@
approvers:
- mgoodness
- gianrubio
reviewers:
- mgoodness
- gianrubio

View File

@ -1,349 +0,0 @@
# Prometheus
[Prometheus](https://prometheus.io/), a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true.
## TL;DR;
```console
$ helm install stable/prometheus
```
## Introduction
This chart bootstraps a [Prometheus](https://prometheus.io/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.3+ with Beta APIs enabled
## Installing the Chart
To install the chart with the release name `my-release`:
```console
$ helm install --name my-release stable/prometheus
```
The command deploys Prometheus on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
$ helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Prometheus 2.x
Prometheus version 2.x has made changes to alertmanager, storage and recording rules. Check out the migration guide [here](https://prometheus.io/docs/prometheus/2.0/migration/)
Users of this chart will need to update their alerting rules to the new format before they can upgrade.
## Upgrading from previous chart versions.
As of version 5.0, this chart uses Prometheus 2.1. This version of prometheus introduces a new data format and is not compatible with prometheus 1.x. It is recommended to install this as a new release, as updating existing releases will not work. See the [prometheus docs](https://prometheus.io/docs/prometheus/latest/migration/#storage) for instructions on retaining your old data.
### Example migration
Assuming you have an existing release of the prometheus chart, named `prometheus-old`. In order to update to prometheus 2.1 while keeping your old data do the following:
1. Update the `prometheus-old` release. Disable scraping on every component besides the prometheus server, similar to the configuration below:
```
alertmanager:
enabled: false
alertmanagerFiles:
alertmanager.yml: ""
kubeStateMetrics:
enabled: false
nodeExporter:
enabled: false
pushgateway:
enabled: false
server:
extraArgs:
storage.local.retention: 720h
serverFiles:
alerts: ""
prometheus.yml: ""
rules: ""
```
1. Deploy a new release of the chart with version 5.0+ using prometheus 2.x. In the values.yaml set the scrape config as usual, and also add the `prometheus-old` instance as a remote-read target.
```
prometheus.yml:
...
remote_read:
- url: http://prometheus-old/api/v1/read
...
```
Old data will be available when you query the new prometheus instance.
## Configuration
The following table lists the configurable parameters of the Prometheus chart and their default values.
Parameter | Description | Default
--------- | ----------- | -------
`alertmanager.enabled` | If true, create alertmanager | `true`
`alertmanager.name` | alertmanager container name | `alertmanager`
`alertmanager.image.repository` | alertmanager container image repository | `prom/alertmanager`
`alertmanager.image.tag` | alertmanager container image tag | `v0.15.2`
`alertmanager.image.pullPolicy` | alertmanager container image pull policy | `IfNotPresent`
`alertmanager.prefixURL` | The prefix slug at which the server can be accessed | ``
`alertmanager.baseURL` | The external url at which the server can be accessed | `/`
`alertmanager.extraArgs` | Additional alertmanager container arguments | `{}`
`alertmanager.configMapOverrideName` | Prometheus alertmanager ConfigMap override where full-name is `{{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}}` and setting this value will prevent the default alertmanager ConfigMap from being generated | `""`
`alertmanager.ingress.enabled` | If true, alertmanager Ingress will be created | `false`
`alertmanager.ingress.annotations` | alertmanager Ingress annotations | `{}`
`alertmanager.ingress.extraLabels` | alertmanager Ingress additional labels | `{}`
`alertmanager.ingress.hosts` | alertmanager Ingress hostnames | `[]`
`alertmanager.ingress.tls` | alertmanager Ingress TLS configuration (YAML) | `[]`
`alertmanager.nodeSelector` | node labels for alertmanager pod assignment | `{}`
`alertmanager.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]`
`alertmanager.affinity` | pod affinity | `{}`
`alertmanager.schedulerName` | alertmanager alternate scheduler name | `nil`
`alertmanager.persistentVolume.enabled` | If true, alertmanager will create a Persistent Volume Claim | `true`
`alertmanager.persistentVolume.accessModes` | alertmanager data Persistent Volume access modes | `[ReadWriteOnce]`
`alertmanager.persistentVolume.annotations` | Annotations for alertmanager Persistent Volume Claim | `{}`
`alertmanager.persistentVolume.existingClaim` | alertmanager data Persistent Volume existing claim name | `""`
`alertmanager.persistentVolume.mountPath` | alertmanager data Persistent Volume mount root path | `/data`
`alertmanager.persistentVolume.size` | alertmanager data Persistent Volume size | `2Gi`
`alertmanager.persistentVolume.storageClass` | alertmanager data Persistent Volume Storage Class | `unset`
`alertmanager.persistentVolume.subPath` | Subdirectory of alertmanager data Persistent Volume to mount | `""`
`alertmanager.podAnnotations` | annotations to be added to alertmanager pods | `{}`
`alertmanager.replicaCount` | desired number of alertmanager pods | `1`
`alertmanager.priorityClassName` | alertmanager priorityClassName | `nil`
`alertmanager.resources` | alertmanager pod resource requests & limits | `{}`
`alertmanager.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for Alert Manager containers | `{}`
`alertmanager.service.annotations` | annotations for alertmanager service | `{}`
`alertmanager.service.clusterIP` | internal alertmanager cluster service IP | `""`
`alertmanager.service.externalIPs` | alertmanager service external IP addresses | `[]`
`alertmanager.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""`
`alertmanager.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]`
`alertmanager.service.servicePort` | alertmanager service port | `80`
`alertmanager.service.type` | type of alertmanager service to create | `ClusterIP`
`alertmanagerFiles.alertmanager.yml` | Prometheus alertmanager configuration | example configuration
`configmapReload.name` | configmap-reload container name | `configmap-reload`
`configmapReload.image.repository` | configmap-reload container image repository | `jimmidyson/configmap-reload`
`configmapReload.image.tag` | configmap-reload container image tag | `v0.2.2`
`configmapReload.image.pullPolicy` | configmap-reload container image pull policy | `IfNotPresent`
`configmapReload.extraArgs` | Additional configmap-reload container arguments | `{}`
`configmapReload.extraConfigmapMounts` | Additional configmap-reload configMap mounts | `[]`
`configmapReload.resources` | configmap-reload pod resource requests & limits | `{}`
`initChownData.enabled` | If false, don't reset data ownership at startup | true
`initChownData.name` | init-chown-data container name | `init-chown-data`
`initChownData.image.repository` | init-chown-data container image repository | `busybox`
`initChownData.image.tag` | init-chown-data container image tag | `latest`
`initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent`
`initChownData.resources` | init-chown-data pod resource requests & limits | `{}`
`kubeStateMetrics.enabled` | If true, create kube-state-metrics | `true`
`kubeStateMetrics.name` | kube-state-metrics container name | `kube-state-metrics`
`kubeStateMetrics.image.repository` | kube-state-metrics container image repository| `quay.io/coreos/kube-state-metrics`
`kubeStateMetrics.image.tag` | kube-state-metrics container image tag | `v1.4.0`
`kubeStateMetrics.image.pullPolicy` | kube-state-metrics container image pull policy | `IfNotPresent`
`kubeStateMetrics.args` | kube-state-metrics container arguments | `{}`
`kubeStateMetrics.nodeSelector` | node labels for kube-state-metrics pod assignment | `{}`
`kubeStateMetrics.podAnnotations` | annotations to be added to kube-state-metrics pods | `{}`
`kubeStateMetrics.deploymentAnnotations` | annotations to be added to kube-state-metrics deployment | `{}`
`kubeStateMetrics.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]`
`kubeStateMetrics.replicaCount` | desired number of kube-state-metrics pods | `1`
`kubeStateMetrics.priorityClassName` | kube-state-metrics priorityClassName | `nil`
`kubeStateMetrics.resources` | kube-state-metrics resource requests and limits (YAML) | `{}`
`kubeStateMetrics.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for kube-state-metrics containers | `{}`
`kubeStateMetrics.service.annotations` | annotations for kube-state-metrics service | `{prometheus.io/scrape: "true"}`
`kubeStateMetrics.service.clusterIP` | internal kube-state-metrics cluster service IP | `None`
`kubeStateMetrics.service.externalIPs` | kube-state-metrics service external IP addresses | `[]`
`kubeStateMetrics.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""`
`kubeStateMetrics.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]`
`kubeStateMetrics.service.servicePort` | kube-state-metrics service port | `80`
`kubeStateMetrics.service.type` | type of kube-state-metrics service to create | `ClusterIP`
`nodeExporter.enabled` | If true, create node-exporter | `true`
`nodeExporter.name` | node-exporter container name | `node-exporter`
`nodeExporter.image.repository` | node-exporter container image repository| `prom/node-exporter`
`nodeExporter.image.tag` | node-exporter container image tag | `v0.16.0`
`nodeExporter.image.pullPolicy` | node-exporter container image pull policy | `IfNotPresent`
`nodeExporter.extraArgs` | Additional node-exporter container arguments | `{}`
`nodeExporter.extraHostPathMounts` | Additional node-exporter hostPath mounts | `[]`
`nodeExporter.extraConfigmapMounts` | Additional node-exporter configMap mounts | `[]`
`nodeExporter.hostNetwork` | If true, node-exporter pods share the host network namespace | `true`
`nodeExporter.hostPID` | If true, node-exporter pods share the host PID namespace | `true`
`nodeExporter.nodeSelector` | node labels for node-exporter pod assignment | `{}`
`nodeExporter.podAnnotations` | annotations to be added to node-exporter pods | `{}`
`nodeExporter.pod.labels` | labels to be added to node-exporter pods | `{}`
`nodeExporter.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]`
`nodeExporter.priorityClassName` | node-exporter priorityClassName | `nil`
`nodeExporter.resources` | node-exporter resource requests and limits (YAML) | `{}`
`nodeExporter.securityContext` | securityContext for containers in pod | `{}`
`nodeExporter.service.annotations` | annotations for node-exporter service | `{prometheus.io/scrape: "true"}`
`nodeExporter.service.clusterIP` | internal node-exporter cluster service IP | `None`
`nodeExporter.service.externalIPs` | node-exporter service external IP addresses | `[]`
`nodeExporter.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""`
`nodeExporter.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]`
`nodeExporter.service.servicePort` | node-exporter service port | `9100`
`nodeExporter.service.type` | type of node-exporter service to create | `ClusterIP`
`pushgateway.enabled` | If true, create pushgateway | `true`
`pushgateway.name` | pushgateway container name | `pushgateway`
`pushgateway.image.repository` | pushgateway container image repository | `prom/pushgateway`
`pushgateway.image.tag` | pushgateway container image tag | `v0.5.2`
`pushgateway.image.pullPolicy` | pushgateway container image pull policy | `IfNotPresent`
`pushgateway.extraArgs` | Additional pushgateway container arguments | `{}`
`pushgateway.ingress.enabled` | If true, pushgateway Ingress will be created | `false`
`pushgateway.ingress.annotations` | pushgateway Ingress annotations | `{}`
`pushgateway.ingress.hosts` | pushgateway Ingress hostnames | `[]`
`pushgateway.ingress.tls` | pushgateway Ingress TLS configuration (YAML) | `[]`
`pushgateway.nodeSelector` | node labels for pushgateway pod assignment | `{}`
`pushgateway.podAnnotations` | annotations to be added to pushgateway pods | `{}`
`pushgateway.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]`
`pushgateway.replicaCount` | desired number of pushgateway pods | `1`
`pushgateway.priorityClassName` | pushgateway priorityClassName | `nil`
`pushgateway.resources` | pushgateway pod resource requests & limits | `{}`
`pushgateway.service.annotations` | annotations for pushgateway service | `{}`
`pushgateway.service.clusterIP` | internal pushgateway cluster service IP | `""`
`pushgateway.service.externalIPs` | pushgateway service external IP addresses | `[]`
`pushgateway.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""`
`pushgateway.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]`
`pushgateway.service.servicePort` | pushgateway service port | `9091`
`pushgateway.service.type` | type of pushgateway service to create | `ClusterIP`
`rbac.create` | If true, create & use RBAC resources | `true`
`server.name` | Prometheus server container name | `server`
`server.image.repository` | Prometheus server container image repository | `prom/prometheus`
`server.image.tag` | Prometheus server container image tag | `v2.4.3`
`server.image.pullPolicy` | Prometheus server container image pull policy | `IfNotPresent`
`server.enableAdminApi` | If true, Prometheus administrative HTTP API will be enabled. Please note, that you should take care of administrative API access protection (ingress or some frontend Nginx with auth) before enabling it. | `false`
`server.global.scrape_interval` | How frequently to scrape targets by default | `1m`
`server.global.scrape_timeout` | How long until a scrape request times out | `10s`
`server.global.evaluation_interval` | How frequently to evaluate rules | `1m`
`server.extraArgs` | Additional Prometheus server container arguments | `{}`
`server.prefixURL` | The prefix slug at which the server can be accessed | ``
`server.baseURL` | The external url at which the server can be accessed | ``
`server.extraHostPathMounts` | Additional Prometheus server hostPath mounts | `[]`
`server.extraConfigmapMounts` | Additional Prometheus server configMap mounts | `[]`
`server.extraSecretMounts` | Additional Prometheus server Secret mounts | `[]`
`server.configMapOverrideName` | Prometheus server ConfigMap override where full-name is `{{.Release.Name}}-{{.Values.server.configMapOverrideName}}` and setting this value will prevent the default server ConfigMap from being generated | `""`
`server.ingress.enabled` | If true, Prometheus server Ingress will be created | `false`
`server.ingress.annotations` | Prometheus server Ingress annotations | `[]`
`server.ingress.extraLabels` | Prometheus server Ingress additional labels | `{}`
`server.ingress.hosts` | Prometheus server Ingress hostnames | `[]`
`server.ingress.tls` | Prometheus server Ingress TLS configuration (YAML) | `[]`
`server.nodeSelector` | node labels for Prometheus server pod assignment | `{}`
`server.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]`
`server.affinity` | pod affinity | `{}`
`server.priorityClassName` | Prometheus server priorityClassName | `nil`
`server.schedulerName` | Prometheus server alternate scheduler name | `nil`
`server.persistentVolume.enabled` | If true, Prometheus server will create a Persistent Volume Claim | `true`
`server.persistentVolume.accessModes` | Prometheus server data Persistent Volume access modes | `[ReadWriteOnce]`
`server.persistentVolume.annotations` | Prometheus server data Persistent Volume annotations | `{}`
`server.persistentVolume.existingClaim` | Prometheus server data Persistent Volume existing claim name | `""`
`server.persistentVolume.mountPath` | Prometheus server data Persistent Volume mount root path | `/data`
`server.persistentVolume.size` | Prometheus server data Persistent Volume size | `8Gi`
`server.persistentVolume.storageClass` | Prometheus server data Persistent Volume Storage Class | `unset`
`server.persistentVolume.subPath` | Subdirectory of Prometheus server data Persistent Volume to mount | `""`
`server.podAnnotations` | annotations to be added to Prometheus server pods | `{}`
`server.deploymentAnnotations` | annotations to be added to Prometheus server deployment | `{}'
`server.replicaCount` | desired number of Prometheus server pods | `1`
`server.resources` | Prometheus server resource requests and limits | `{}`
`server.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for server containers | `{}`
`server.service.annotations` | annotations for Prometheus server service | `{}`
`server.service.clusterIP` | internal Prometheus server cluster service IP | `""`
`server.service.externalIPs` | Prometheus server service external IP addresses | `[]`
`server.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""`
`server.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]`
`server.service.nodePort` | Port to be used as the service NodePort (ignored if `server.service.type` is not `NodePort`) | `0`
`server.service.servicePort` | Prometheus server service port | `80`
`server.service.type` | type of Prometheus server service to create | `ClusterIP`
`serviceAccounts.alertmanager.create` | If true, create the alertmanager service account | `true`
`serviceAccounts.alertmanager.name` | name of the alertmanager service account to use or create | `{{ prometheus.alertmanager.fullname }}`
`serviceAccounts.kubeStateMetrics.create` | If true, create the kubeStateMetrics service account | `true`
`serviceAccounts.kubeStateMetrics.name` | name of the kubeStateMetrics service account to use or create | `{{ prometheus.kubeStateMetrics.fullname }}`
`serviceAccounts.nodeExporter.create` | If true, create the nodeExporter service account | `true`
`serviceAccounts.nodeExporter.name` | name of the nodeExporter service account to use or create | `{{ prometheus.nodeExporter.fullname }}`
`serviceAccounts.pushgateway.create` | If true, create the pushgateway service account | `true`
`serviceAccounts.pushgateway.name` | name of the pushgateway service account to use or create | `{{ prometheus.pushgateway.fullname }}`
`serviceAccounts.server.create` | If true, create the server service account | `true`
`serviceAccounts.server.name` | name of the server service account to use or create | `{{ prometheus.server.fullname }}`
`server.terminationGracePeriodSeconds` | Prometheus server Pod termination grace period | `300`
`server.retention` | (optional) Prometheus data retention | `""`
`serverFiles.alerts` | Prometheus server alerts configuration | `{}`
`serverFiles.rules` | Prometheus server rules configuration | `{}`
`serverFiles.prometheus.yml` | Prometheus server scrape configuration | example configuration
`networkPolicy.enabled` | Enable NetworkPolicy | `false` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
$ helm install stable/prometheus --name my-release \
--set server.terminationGracePeriodSeconds=360
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
$ helm install stable/prometheus --name my-release -f values.yaml
```
> **Tip**: You can use the default [values.yaml](values.yaml)
### RBAC Configuration
Roles and RoleBindings resources will be created automatically for `server` and `kubeStateMetrics` services.
To manually setup RBAC you need to set the parameter `rbac.create=false` and specify the service account to be used for each service by setting the parameters: `serviceAccounts.{{ component }}.create` to `false` and `serviceAccounts.{{ component }}.name` to the name of a pre-existing service account.
> **Tip**: You can refer to the default `*-clusterrole.yaml` and `*-clusterrolebinding.yaml` files in [templates](templates/) to customize your own.
### ConfigMap Files
AlertManager is configured through [alertmanager.yml](https://prometheus.io/docs/alerting/configuration/). This file (and any others listed in `alertmanagerFiles`) will be mounted into the `alertmanager` pod.
Prometheus is configured through [prometheus.yml](https://prometheus.io/docs/operating/configuration/). This file (and any others listed in `serverFiles`) will be mounted into the `server` pod.
### Ingress TLS
If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [kube-lego](https://github.com/jetstack/kube-lego)), please refer to the documentation for that mechanism.
To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace:
```console
kubectl create secret tls prometheus-server-tls --cert=path/to/tls.cert --key=path/to/tls.key
```
Include the secret's name, along with the desired hostnames, in the alertmanager/server Ingress TLS section of your custom `values.yaml` file:
```yaml
server:
ingress:
## If true, Prometheus server Ingress will be created
##
enabled: true
## Prometheus server Ingress hostnames
## Must be provided if Ingress is enabled
##
hosts:
- prometheus.domain.com
## Prometheus server Ingress TLS configuration
## Secrets must be manually created in the namespace
##
tls:
- secretName: prometheus-server-tls
hosts:
- prometheus.domain.com
```
### NetworkPolicy
Enabling Network Policy for Prometheus will secure connections to Alert Manager
and Kube State Metrics by only accepting connections from Prometheus Server.
All inbound connections to Prometheus Server are still allowed.
To enable network policy for Prometheus, install a networking plugin that
implements the Kubernetes NetworkPolicy spec, and set `networkPolicy.enabled` to true.
If NetworkPolicy is enabled for Prometheus' scrape targets, you may also need
to manually create a networkpolicy which allows it.

View File

@ -1,100 +0,0 @@
The Prometheus server can be accessed via port {{ .Values.server.service.servicePort }} on the following DNS name from within your cluster:
{{ template "prometheus.server.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
{{ if .Values.server.ingress.enabled -}}
From outside the cluster, the server URL(s) are:
{{- range .Values.server.ingress.hosts }}
http://{{ . }}
{{- end }}
{{- else }}
Get the Prometheus server URL by running these commands in the same shell:
{{- if contains "NodePort" .Values.server.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.server.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.server.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.server.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.server.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.server.service.servicePort }}
{{- else if contains "ClusterIP" .Values.server.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.server.name }}" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9090
{{- end }}
{{- end }}
{{- if .Values.server.persistentVolume.enabled }}
{{- else }}
#################################################################################
###### WARNING: Persistence is disabled!!! You will lose your data when #####
###### the Server pod is terminated. #####
#################################################################################
{{- end }}
{{ if .Values.alertmanager.enabled }}
The Prometheus alertmanager can be accessed via port {{ .Values.alertmanager.service.servicePort }} on the following DNS name from within your cluster:
{{ template "prometheus.alertmanager.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
{{ if .Values.alertmanager.ingress.enabled -}}
From outside the cluster, the alertmanager URL(s) are:
{{- range .Values.alertmanager.ingress.hosts }}
http://{{ . }}
{{- end }}
{{- else }}
Get the Alertmanager URL by running these commands in the same shell:
{{- if contains "NodePort" .Values.alertmanager.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.alertmanager.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.alertmanager.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.alertmanager.service.servicePort }}
{{- else if contains "ClusterIP" .Values.alertmanager.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.alertmanager.name }}" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9093
{{- end }}
{{- end }}
{{- if .Values.alertmanager.persistentVolume.enabled }}
{{- else }}
#################################################################################
###### WARNING: Persistence is disabled!!! You will lose your data when #####
###### the AlertManager pod is terminated. #####
#################################################################################
{{- end }}
{{- end }}
{{ if .Values.pushgateway.enabled }}
The Prometheus PushGateway can be accessed via port {{ .Values.pushgateway.service.servicePort }} on the following DNS name from within your cluster:
{{ template "prometheus.pushgateway.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
{{ if .Values.pushgateway.ingress.enabled -}}
From outside the cluster, the pushgateway URL(s) are:
{{- range .Values.pushgateway.ingress.hosts }}
http://{{ . }}
{{- end }}
{{- else }}
Get the PushGateway URL by running these commands in the same shell:
{{- if contains "NodePort" .Values.pushgateway.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.pushgateway.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.pushgateway.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.pushgateway.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.pushgateway.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.pushgateway.service.servicePort }}
{{- else if contains "ClusterIP" .Values.pushgateway.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.pushgateway.name }}" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9091
{{- end }}
{{- end }}
{{- end }}
For more information on running Prometheus, visit:
https://prometheus.io/

View File

@ -1,176 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "prometheus.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "prometheus.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a fully qualified alertmanager name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "prometheus.alertmanager.fullname" -}}
{{- if .Values.alertmanager.fullnameOverride -}}
{{- .Values.alertmanager.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- printf "%s-%s" .Release.Name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s-%s" .Release.Name $name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a fully qualified kube-state-metrics name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "prometheus.kubeStateMetrics.fullname" -}}
{{- if .Values.kubeStateMetrics.fullnameOverride -}}
{{- .Values.kubeStateMetrics.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- printf "%s-%s" .Release.Name .Values.kubeStateMetrics.name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s-%s" .Release.Name $name .Values.kubeStateMetrics.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a fully qualified node-exporter name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "prometheus.nodeExporter.fullname" -}}
{{- if .Values.nodeExporter.fullnameOverride -}}
{{- .Values.nodeExporter.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- printf "%s-%s" .Release.Name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s-%s" .Release.Name $name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a fully qualified Prometheus server name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "prometheus.server.fullname" -}}
{{- if .Values.server.fullnameOverride -}}
{{- .Values.server.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- printf "%s-%s" .Release.Name .Values.server.name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a fully qualified pushgateway name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "prometheus.pushgateway.fullname" -}}
{{- if .Values.pushgateway.fullnameOverride -}}
{{- .Values.pushgateway.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- printf "%s-%s" .Release.Name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s-%s" .Release.Name $name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for networkpolicy.
*/}}
{{- define "prometheus.networkPolicy.apiVersion" -}}
{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "extensions/v1beta1" -}}
{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "networking.k8s.io/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use for the alertmanager component
*/}}
{{- define "prometheus.serviceAccountName.alertmanager" -}}
{{- if .Values.serviceAccounts.alertmanager.create -}}
{{ default (include "prometheus.alertmanager.fullname" .) .Values.serviceAccounts.alertmanager.name }}
{{- else -}}
{{ default "default" .Values.serviceAccounts.alertmanager.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use for the kubeStateMetrics component
*/}}
{{- define "prometheus.serviceAccountName.kubeStateMetrics" -}}
{{- if .Values.serviceAccounts.kubeStateMetrics.create -}}
{{ default (include "prometheus.kubeStateMetrics.fullname" .) .Values.serviceAccounts.kubeStateMetrics.name }}
{{- else -}}
{{ default "default" .Values.serviceAccounts.kubeStateMetrics.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use for the nodeExporter component
*/}}
{{- define "prometheus.serviceAccountName.nodeExporter" -}}
{{- if .Values.serviceAccounts.nodeExporter.create -}}
{{ default (include "prometheus.nodeExporter.fullname" .) .Values.serviceAccounts.nodeExporter.name }}
{{- else -}}
{{ default "default" .Values.serviceAccounts.nodeExporter.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use for the pushgateway component
*/}}
{{- define "prometheus.serviceAccountName.pushgateway" -}}
{{- if .Values.serviceAccounts.pushgateway.create -}}
{{ default (include "prometheus.pushgateway.fullname" .) .Values.serviceAccounts.pushgateway.name }}
{{- else -}}
{{ default "default" .Values.serviceAccounts.pushgateway.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use for the server component
*/}}
{{- define "prometheus.serviceAccountName.server" -}}
{{- if .Values.serviceAccounts.server.create -}}
{{ default (include "prometheus.server.fullname" .) .Values.serviceAccounts.server.name }}
{{- else -}}
{{ default "default" .Values.serviceAccounts.server.name }}
{{- end -}}
{{- end -}}

View File

@ -1,18 +0,0 @@
{{- if and .Values.alertmanager.enabled (empty .Values.alertmanager.configMapOverrideName) -}}
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.alertmanager.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.alertmanager.fullname" . }}
data:
{{- $root := . -}}
{{- range $key, $value := .Values.alertmanagerFiles }}
{{ $key }}: |
{{ toYaml $value | default "{}" | indent 4 }}
{{- end -}}
{{- end -}}

View File

@ -1,115 +0,0 @@
{{- if .Values.alertmanager.enabled -}}
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.alertmanager.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.alertmanager.fullname" . }}
spec:
replicas: {{ .Values.alertmanager.replicaCount }}
{{- if .Values.server.strategy }}
strategy:
{{ toYaml .Values.server.strategy | indent 4 }}
{{- end }}
template:
metadata:
{{- if .Values.alertmanager.podAnnotations }}
annotations:
{{ toYaml .Values.alertmanager.podAnnotations | indent 8 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
component: "{{ .Values.alertmanager.name }}"
release: {{ .Release.Name }}
spec:
{{- if .Values.alertmanager.affinity }}
affinity:
{{ toYaml .Values.alertmanager.affinity | indent 8 }}
{{- end }}
{{- if .Values.alertmanager.schedulerName }}
schedulerName: "{{ .Values.alertmanager.schedulerName }}"
{{- end }}
serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }}
{{- if .Values.alertmanager.priorityClassName }}
priorityClassName: "{{ .Values.alertmanager.priorityClassName }}"
{{- end }}
containers:
- name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}
image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}"
imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}"
env:
{{- range $key, $value := .Values.alertmanager.extraEnv }}
- name: {{ $key }}
value: {{ $value }}
{{- end }}
args:
- --config.file=/etc/config/alertmanager.yml
- --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }}
{{- range $key, $value := .Values.alertmanager.extraArgs }}
- --{{ $key }}={{ $value }}
{{- end }}
{{- if .Values.alertmanager.baseURL }}
- --web.external-url={{ .Values.alertmanager.baseURL }}
{{- end }}
ports:
- containerPort: 9093
readinessProbe:
httpGet:
path: {{ .Values.alertmanager.prefixURL }}/#/status
port: 9093
initialDelaySeconds: 30
timeoutSeconds: 30
resources:
{{ toYaml .Values.alertmanager.resources | indent 12 }}
volumeMounts:
- name: config-volume
mountPath: /etc/config
- name: storage-volume
mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}"
subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}"
- name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.name }}
image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}"
imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}"
args:
- --volume-dir=/etc/config
- --webhook-url=http://localhost:9093{{ .Values.alertmanager.prefixURL }}/-/reload
resources:
{{ toYaml .Values.configmapReload.resources | indent 12 }}
volumeMounts:
- name: config-volume
mountPath: /etc/config
readOnly: true
{{- if .Values.alertmanager.nodeSelector }}
nodeSelector:
{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.alertmanager.securityContext }}
securityContext:
{{ toYaml .Values.alertmanager.securityContext | indent 8 }}
{{- end }}
{{- if .Values.alertmanager.tolerations }}
tolerations:
{{ toYaml .Values.alertmanager.tolerations | indent 8 }}
{{- end }}
{{- if .Values.alertmanager.affinity }}
affinity:
{{ toYaml .Values.alertmanager.affinity | indent 8 }}
{{- end }}
volumes:
- name: config-volume
configMap:
name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }}
- name: storage-volume
{{- if .Values.alertmanager.persistentVolume.enabled }}
persistentVolumeClaim:
claimName: {{ if .Values.alertmanager.persistentVolume.existingClaim }}{{ .Values.alertmanager.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }}
{{- else }}
emptyDir: {}
{{- end -}}
{{- end }}

View File

@ -1,38 +0,0 @@
{{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled -}}
{{- $releaseName := .Release.Name -}}
{{- $serviceName := include "prometheus.alertmanager.fullname" . }}
{{- $servicePort := .Values.alertmanager.service.servicePort -}}
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
{{- if .Values.alertmanager.ingress.annotations }}
annotations:
{{ toYaml .Values.alertmanager.ingress.annotations | indent 4 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.alertmanager.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
{{- range $key, $value := .Values.alertmanager.ingress.extraLabels }}
{{ $key }}: {{ $value }}
{{- end }}
name: {{ template "prometheus.alertmanager.fullname" . }}
spec:
rules:
{{- range .Values.alertmanager.ingress.hosts }}
{{- $url := splitList "/" . }}
- host: {{ first $url }}
http:
paths:
- path: /{{ rest $url | join "/" }}
backend:
serviceName: {{ $serviceName }}
servicePort: {{ $servicePort }}
{{- end -}}
{{- if .Values.alertmanager.ingress.tls }}
tls:
{{ toYaml .Values.alertmanager.ingress.tls | indent 4 }}
{{- end -}}
{{- end -}}

View File

@ -1,26 +0,0 @@
{{- if .Values.networkPolicy.enabled }}
apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }}
kind: NetworkPolicy
metadata:
name: {{ template "prometheus.alertmanager.fullname" . }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.alertmanager.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
podSelector:
matchLabels:
app: {{ template "prometheus.name" . }}
component: "{{ .Values.alertmanager.name }}"
release: {{ .Release.Name }}
ingress:
- from:
- podSelector:
matchLabels:
release: {{ .Release.Name }}
component: "{{ .Values.server.name }}"
- ports:
- port: 9093
{{- end }}

View File

@ -1,31 +0,0 @@
{{- if and .Values.alertmanager.enabled .Values.alertmanager.persistentVolume.enabled -}}
{{- if not .Values.alertmanager.persistentVolume.existingClaim -}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
{{- if .Values.alertmanager.persistentVolume.annotations }}
annotations:
{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 4 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.alertmanager.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.alertmanager.fullname" . }}
spec:
accessModes:
{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 4 }}
{{- if .Values.alertmanager.persistentVolume.storageClass }}
{{- if (eq "-" .Values.alertmanager.persistentVolume.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}"
{{- end }}
{{- end }}
resources:
requests:
storage: "{{ .Values.alertmanager.persistentVolume.size }}"
{{- end -}}
{{- end -}}

View File

@ -1,55 +0,0 @@
{{- if .Values.alertmanager.enabled -}}
apiVersion: v1
kind: Service
metadata:
{{- if .Values.alertmanager.service.annotations }}
annotations:
{{ toYaml .Values.alertmanager.service.annotations | indent 4 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.alertmanager.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
{{- if .Values.alertmanager.service.labels }}
{{ toYaml .Values.alertmanager.service.labels | indent 4 }}
{{- end }}
name: {{ template "prometheus.alertmanager.fullname" . }}
spec:
{{- if .Values.alertmanager.service.clusterIP }}
clusterIP: {{ .Values.alertmanager.service.clusterIP }}
{{- end }}
{{- if .Values.alertmanager.service.externalIPs }}
externalIPs:
{{ toYaml .Values.alertmanager.service.externalIPs | indent 4 }}
{{- end }}
{{- if .Values.alertmanager.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }}
{{- end }}
{{- if .Values.alertmanager.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- range $cidr := .Values.alertmanager.service.loadBalancerSourceRanges }}
- {{ $cidr }}
{{- end }}
{{- end }}
ports:
- name: http
port: {{ .Values.alertmanager.service.servicePort }}
protocol: TCP
targetPort: 9093
{{- if .Values.alertmanager.service.nodePort }}
nodePort: {{ .Values.alertmanager.service.nodePort }}
{{- end }}
{{- if .Values.alertmanager.service.enableMeshPeer }}
- name: meshpeer
port: 6783
protocol: TCP
targetPort: 6783
{{- end }}
selector:
app: {{ template "prometheus.name" . }}
component: "{{ .Values.alertmanager.name }}"
release: {{ .Release.Name }}
type: "{{ .Values.alertmanager.service.type }}"
{{- end }}

View File

@ -1,12 +0,0 @@
{{- if .Values.serviceAccounts.alertmanager.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.alertmanager.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.serviceAccountName.alertmanager" . }}
{{- end }}

View File

@ -1,64 +0,0 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.kubeStateMetrics.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.kubeStateMetrics.fullname" . }}
rules:
- apiGroups:
- ""
resources:
- namespaces
- nodes
- persistentvolumeclaims
- pods
- services
- resourcequotas
- replicationcontrollers
- limitranges
- persistentvolumeclaims
- persistentvolumes
- endpoints
- secrets
- configmaps
verbs:
- list
- watch
- apiGroups:
- extensions
resources:
- daemonsets
- deployments
- replicasets
verbs:
- list
- watch
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- get
- list
- watch
- apiGroups:
- batch
resources:
- cronjobs
- jobs
verbs:
- list
- watch
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- list
- watch
{{- end }}

View File

@ -1,20 +0,0 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.kubeStateMetrics.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.kubeStateMetrics.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "prometheus.serviceAccountName.kubeStateMetrics" . }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "prometheus.kubeStateMetrics.fullname" . }}
{{- end }}

View File

@ -1,67 +0,0 @@
{{- if .Values.kubeStateMetrics.enabled -}}
apiVersion: apps/v1
kind: Deployment
metadata:
{{- if .Values.kubeStateMetrics.deploymentAnnotations }}
annotations:
{{ toYaml .Values.kubeStateMetrics.deploymentAnnotations | indent 4 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.kubeStateMetrics.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.kubeStateMetrics.fullname" . }}
spec:
replicas: {{ .Values.kubeStateMetrics.replicaCount }}
template:
metadata:
{{- if .Values.kubeStateMetrics.podAnnotations }}
annotations:
{{ toYaml .Values.kubeStateMetrics.podAnnotations | indent 8 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
component: "{{ .Values.kubeStateMetrics.name }}"
release: {{ .Release.Name }}
{{- if .Values.kubeStateMetrics.pod.labels }}
{{ toYaml .Values.kubeStateMetrics.pod.labels | indent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "prometheus.serviceAccountName.kubeStateMetrics" . }}
{{- if .Values.kubeStateMetrics.priorityClassName }}
priorityClassName: "{{ .Values.kubeStateMetrics.priorityClassName }}"
{{- end }}
containers:
- name: {{ template "prometheus.name" . }}-{{ .Values.kubeStateMetrics.name }}
image: "{{ .Values.kubeStateMetrics.image.repository }}:{{ .Values.kubeStateMetrics.image.tag }}"
imagePullPolicy: "{{ .Values.kubeStateMetrics.image.pullPolicy }}"
{{- if .Values.kubeStateMetrics.args }}
args:
{{- range $key, $value := .Values.kubeStateMetrics.args }}
- --{{ $key }}={{ $value }}
{{- end }}
{{- end }}
ports:
- name: metrics
containerPort: 8080
resources:
{{ toYaml .Values.kubeStateMetrics.resources | indent 12 }}
{{- if .Values.kubeStateMetrics.nodeSelector }}
nodeSelector:
{{ toYaml .Values.kubeStateMetrics.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.kubeStateMetrics.securityContext }}
securityContext:
{{ toYaml .Values.kubeStateMetrics.securityContext | indent 8 }}
{{- end }}
{{- if .Values.kubeStateMetrics.tolerations }}
tolerations:
{{ toYaml .Values.kubeStateMetrics.tolerations | indent 8 }}
{{- end }}
{{- if .Values.kubeStateMetrics.affinity }}
affinity:
{{ toYaml .Values.kubeStateMetrics.affinity | indent 8 }}
{{- end }}
{{- end }}

View File

@ -1,26 +0,0 @@
{{- if .Values.networkPolicy.enabled }}
apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }}
kind: NetworkPolicy
metadata:
name: {{ template "prometheus.kubeStateMetrics.fullname" . }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.kubeStateMetrics.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
podSelector:
matchLabels:
app: {{ template "prometheus.name" . }}
component: "{{ .Values.kubeStateMetrics.name }}"
release: {{ .Release.Name }}
ingress:
- from:
- podSelector:
matchLabels:
release: {{ .Release.Name }}
component: "{{ .Values.server.name }}"
- ports:
- port: 8080
{{- end }}

View File

@ -1,12 +0,0 @@
{{- if .Values.serviceAccounts.kubeStateMetrics.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.kubeStateMetrics.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.serviceAccountName.kubeStateMetrics" . }}
{{- end }}

View File

@ -1,46 +0,0 @@
{{- if .Values.kubeStateMetrics.enabled -}}
apiVersion: v1
kind: Service
metadata:
{{- if .Values.kubeStateMetrics.service.annotations }}
annotations:
{{ toYaml .Values.kubeStateMetrics.service.annotations | indent 4 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.kubeStateMetrics.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
{{- if .Values.kubeStateMetrics.service.labels }}
{{ toYaml .Values.kubeStateMetrics.service.labels | indent 4 }}
{{- end }}
name: {{ template "prometheus.kubeStateMetrics.fullname" . }}
spec:
{{- if .Values.kubeStateMetrics.service.clusterIP }}
clusterIP: {{ .Values.kubeStateMetrics.service.clusterIP }}
{{- end }}
{{- if .Values.kubeStateMetrics.service.externalIPs }}
externalIPs:
{{ toYaml .Values.kubeStateMetrics.service.externalIPs | indent 4 }}
{{- end }}
{{- if .Values.kubeStateMetrics.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.kubeStateMetrics.service.loadBalancerIP }}
{{- end }}
{{- if .Values.kubeStateMetrics.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- range $cidr := .Values.kubeStateMetrics.service.loadBalancerSourceRanges }}
- {{ $cidr }}
{{- end }}
{{- end }}
ports:
- name: http
port: {{ .Values.kubeStateMetrics.service.servicePort }}
protocol: TCP
targetPort: 8080
selector:
app: {{ template "prometheus.name" . }}
component: "{{ .Values.kubeStateMetrics.name }}"
release: {{ .Release.Name }}
type: "{{ .Values.kubeStateMetrics.service.type }}"
{{- end }}

View File

@ -1,112 +0,0 @@
{{- if .Values.nodeExporter.enabled -}}
apiVersion: apps/v1
kind: DaemonSet
metadata:
{{- if .Values.nodeExporter.deploymentAnnotations }}
annotations:
{{ toYaml .Values.nodeExporter.deploymentAnnotations | indent 4 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.nodeExporter.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.nodeExporter.fullname" . }}
spec:
{{- if .Values.nodeExporter.updateStrategy }}
updateStrategy:
{{ toYaml .Values.nodeExporter.updateStrategy | indent 4 }}
{{- end }}
template:
metadata:
{{- if .Values.nodeExporter.podAnnotations }}
annotations:
{{ toYaml .Values.nodeExporter.podAnnotations | indent 8 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
component: "{{ .Values.nodeExporter.name }}"
release: {{ .Release.Name }}
{{- if .Values.nodeExporter.pod.labels }}
{{ toYaml .Values.nodeExporter.pod.labels | indent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "prometheus.serviceAccountName.nodeExporter" . }}
{{- if .Values.nodeExporter.priorityClassName }}
priorityClassName: "{{ .Values.nodeExporter.priorityClassName }}"
{{- end }}
containers:
- name: {{ template "prometheus.name" . }}-{{ .Values.nodeExporter.name }}
image: "{{ .Values.nodeExporter.image.repository }}:{{ .Values.nodeExporter.image.tag }}"
imagePullPolicy: "{{ .Values.nodeExporter.image.pullPolicy }}"
args:
- --path.procfs=/host/proc
- --path.sysfs=/host/sys
{{- range $key, $value := .Values.nodeExporter.extraArgs }}
{{- if $value }}
- --{{ $key }}={{ $value }}
{{- else }}
- --{{ $key }}
{{- end }}
{{- end }}
ports:
- name: metrics
containerPort: 9100
hostPort: {{ .Values.nodeExporter.service.hostPort }}
resources:
{{ toYaml .Values.nodeExporter.resources | indent 12 }}
volumeMounts:
- name: proc
mountPath: /host/proc
readOnly: true
- name: sys
mountPath: /host/sys
readOnly: true
{{- range .Values.nodeExporter.extraHostPathMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- end }}
{{- range .Values.nodeExporter.extraConfigmapMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- end }}
{{- if .Values.nodeExporter.hostNetwork }}
hostNetwork: true
{{- end }}
{{- if .Values.nodeExporter.hostPID }}
hostPID: true
{{- end }}
{{- if .Values.nodeExporter.tolerations }}
tolerations:
{{ toYaml .Values.nodeExporter.tolerations | indent 8 }}
{{- end }}
{{- if .Values.nodeExporter.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeExporter.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.nodeExporter.securityContext }}
securityContext:
{{ toYaml .Values.nodeExporter.securityContext | indent 8 }}
{{- end }}
volumes:
- name: proc
hostPath:
path: /proc
- name: sys
hostPath:
path: /sys
{{- range .Values.nodeExporter.extraHostPathMounts }}
- name: {{ .name }}
hostPath:
path: {{ .hostPath }}
{{- end }}
{{- range .Values.nodeExporter.extraConfigmapMounts }}
- name: {{ .name }}
configMap:
name: {{ .configMap }}
{{- end }}
{{- end -}}

View File

@ -1,46 +0,0 @@
{{- if .Values.nodeExporter.enabled -}}
apiVersion: v1
kind: Service
metadata:
{{- if .Values.nodeExporter.service.annotations }}
annotations:
{{ toYaml .Values.nodeExporter.service.annotations | indent 4 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.nodeExporter.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
{{- if .Values.nodeExporter.service.labels }}
{{ toYaml .Values.nodeExporter.service.labels | indent 4 }}
{{- end }}
name: {{ template "prometheus.nodeExporter.fullname" . }}
spec:
{{- if .Values.nodeExporter.service.clusterIP }}
clusterIP: {{ .Values.nodeExporter.service.clusterIP }}
{{- end }}
{{- if .Values.nodeExporter.service.externalIPs }}
externalIPs:
{{ toYaml .Values.nodeExporter.service.externalIPs | indent 4 }}
{{- end }}
{{- if .Values.nodeExporter.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.nodeExporter.service.loadBalancerIP }}
{{- end }}
{{- if .Values.nodeExporter.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- range $cidr := .Values.nodeExporter.service.loadBalancerSourceRanges }}
- {{ $cidr }}
{{- end }}
{{- end }}
ports:
- name: metrics
port: {{ .Values.nodeExporter.service.servicePort }}
protocol: TCP
targetPort: 9100
selector:
app: {{ template "prometheus.name" . }}
component: "{{ .Values.nodeExporter.name }}"
release: {{ .Release.Name }}
type: "{{ .Values.nodeExporter.service.type }}"
{{- end -}}

View File

@ -1,12 +0,0 @@
{{- if .Values.serviceAccounts.nodeExporter.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.nodeExporter.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.serviceAccountName.nodeExporter" . }}
{{- end }}

View File

@ -1,67 +0,0 @@
{{- if .Values.pushgateway.enabled -}}
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.pushgateway.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.pushgateway.fullname" . }}
spec:
replicas: {{ .Values.pushgateway.replicaCount }}
template:
metadata:
{{- if .Values.pushgateway.podAnnotations }}
annotations:
{{ toYaml .Values.pushgateway.podAnnotations | indent 8 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
component: "{{ .Values.pushgateway.name }}"
release: {{ .Release.Name }}
spec:
serviceAccountName: {{ template "prometheus.serviceAccountName.pushgateway" . }}
{{- if .Values.pushgateway.priorityClassName }}
priorityClassName: "{{ .Values.pushgateway.priorityClassName }}"
{{- end }}
containers:
- name: {{ template "prometheus.name" . }}-{{ .Values.pushgateway.name }}
image: "{{ .Values.pushgateway.image.repository }}:{{ .Values.pushgateway.image.tag }}"
imagePullPolicy: "{{ .Values.pushgateway.image.pullPolicy }}"
args:
{{- range $key, $value := .Values.pushgateway.extraArgs }}
- --{{ $key }}={{ $value }}
{{- end }}
ports:
- containerPort: 9091
readinessProbe:
httpGet:
{{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }}
path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/#/status
{{- else }}
path: /#/status
{{- end }}
port: 9091
initialDelaySeconds: 10
timeoutSeconds: 10
resources:
{{ toYaml .Values.pushgateway.resources | indent 12 }}
{{- if .Values.pushgateway.nodeSelector }}
nodeSelector:
{{ toYaml .Values.pushgateway.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.pushgateway.securityContext }}
securityContext:
{{ toYaml .Values.pushgateway.securityContext | indent 8 }}
{{- end }}
{{- if .Values.pushgateway.tolerations }}
tolerations:
{{ toYaml .Values.pushgateway.tolerations | indent 8 }}
{{- end }}
{{- if .Values.pushgateway.affinity }}
affinity:
{{ toYaml .Values.pushgateway.affinity | indent 8 }}
{{- end }}
{{- end }}

View File

@ -1,35 +0,0 @@
{{- if and .Values.pushgateway.enabled .Values.pushgateway.ingress.enabled -}}
{{- $releaseName := .Release.Name -}}
{{- $serviceName := include "prometheus.pushgateway.fullname" . }}
{{- $servicePort := .Values.pushgateway.service.servicePort -}}
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
{{- if .Values.pushgateway.ingress.annotations }}
annotations:
{{ toYaml .Values.pushgateway.ingress.annotations | indent 4}}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.pushgateway.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.pushgateway.fullname" . }}
spec:
rules:
{{- range .Values.pushgateway.ingress.hosts }}
{{- $url := splitList "/" . }}
- host: {{ first $url }}
http:
paths:
- path: /{{ rest $url | join "/" }}
backend:
serviceName: {{ $serviceName }}
servicePort: {{ $servicePort }}
{{- end -}}
{{- if .Values.pushgateway.ingress.tls }}
tls:
{{ toYaml .Values.pushgateway.ingress.tls | indent 4 }}
{{- end -}}
{{- end -}}

View File

@ -1,46 +0,0 @@
{{- if .Values.pushgateway.enabled -}}
apiVersion: v1
kind: Service
metadata:
{{- if .Values.pushgateway.service.annotations }}
annotations:
{{ toYaml .Values.pushgateway.service.annotations | indent 4}}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.pushgateway.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
{{- if .Values.pushgateway.service.labels }}
{{ toYaml .Values.pushgateway.service.labels | indent 4}}
{{- end }}
name: {{ template "prometheus.pushgateway.fullname" . }}
spec:
{{- if .Values.pushgateway.service.clusterIP }}
clusterIP: {{ .Values.pushgateway.service.clusterIP }}
{{- end }}
{{- if .Values.pushgateway.service.externalIPs }}
externalIPs:
{{ toYaml .Values.pushgateway.service.externalIPs | indent 4 }}
{{- end }}
{{- if .Values.pushgateway.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.pushgateway.service.loadBalancerIP }}
{{- end }}
{{- if .Values.pushgateway.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- range $cidr := .Values.pushgateway.service.loadBalancerSourceRanges }}
- {{ $cidr }}
{{- end }}
{{- end }}
ports:
- name: http
port: {{ .Values.pushgateway.service.servicePort }}
protocol: TCP
targetPort: 9091
selector:
app: {{ template "prometheus.name" . }}
component: "{{ .Values.pushgateway.name }}"
release: {{ .Release.Name }}
type: "{{ .Values.pushgateway.service.type }}"
{{- end }}

View File

@ -1,12 +0,0 @@
{{- if .Values.serviceAccounts.pushgateway.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.pushgateway.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.serviceAccountName.pushgateway" . }}
{{- end }}

View File

@ -1,45 +0,0 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.server.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.server.fullname" . }}
rules:
- apiGroups:
- ""
resources:
- nodes
- nodes/proxy
- services
- endpoints
- pods
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- apiGroups:
- "extensions"
resources:
- ingresses/status
- ingresses
verbs:
- get
- list
- watch
- nonResourceURLs:
- "/metrics"
verbs:
- get
{{- end }}

View File

@ -1,20 +0,0 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.server.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.server.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "prometheus.serviceAccountName.server" . }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "prometheus.server.fullname" . }}
{{- end }}

View File

@ -1,49 +0,0 @@
{{- if (empty .Values.server.configMapOverrideName) -}}
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.server.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.server.fullname" . }}
data:
{{- $root := . -}}
{{- range $key, $value := .Values.serverFiles }}
{{ $key }}: |
{{- if eq $key "prometheus.yml" }}
global:
{{ $root.Values.server.global | toYaml | indent 6 }}
{{- end }}
{{ toYaml $value | default "{}" | indent 4 }}
{{- if eq $key "prometheus.yml" -}}
{{- if $root.Values.alertmanager.enabled }}
alerting:
alertmanagers:
- kubernetes_sd_configs:
- role: pod
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
{{- if $root.Values.alertmanager.prefixURL }}
path_prefix: {{ $root.Values.alertmanager.prefixURL }}
{{- end }}
relabel_configs:
- source_labels: [__meta_kubernetes_namespace]
regex: {{ $root.Release.Namespace }}
action: keep
- source_labels: [__meta_kubernetes_pod_label_app]
regex: {{ template "prometheus.name" $root }}
action: keep
- source_labels: [__meta_kubernetes_pod_label_component]
regex: alertmanager
action: keep
- source_labels: [__meta_kubernetes_pod_container_port_number]
regex:
action: drop
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -1,189 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
{{- if .Values.server.deploymentAnnotations }}
annotations:
{{ toYaml .Values.server.deploymentAnnotations | indent 4 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.server.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.server.fullname" . }}
spec:
replicas: {{ .Values.server.replicaCount }}
{{- if .Values.server.strategy }}
strategy:
{{ toYaml .Values.server.strategy | indent 4 }}
{{- end }}
template:
metadata:
{{- if .Values.server.podAnnotations }}
annotations:
{{ toYaml .Values.server.podAnnotations | indent 8 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
component: "{{ .Values.server.name }}"
release: {{ .Release.Name }}
spec:
{{- if .Values.server.affinity }}
affinity:
{{ toYaml .Values.server.affinity | indent 8 }}
{{- end }}
{{- if .Values.server.priorityClassName }}
priorityClassName: "{{ .Values.server.priorityClassName }}"
{{- end }}
{{- if .Values.server.schedulerName }}
schedulerName: "{{ .Values.server.schedulerName }}"
{{- end }}
serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }}
{{- if .Values.initChownData.enabled }}
initContainers:
- name: "{{ .Values.initChownData.name }}"
image: "{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}"
imagePullPolicy: "{{ .Values.initChownData.image.pullPolicy }}"
resources:
{{ toYaml .Values.initChownData.resources | indent 12 }}
# 65534 is the nobody user that prometheus uses.
command: ["chown", "-R", "65534:65534", "{{ .Values.server.persistentVolume.mountPath }}"]
volumeMounts:
- name: storage-volume
mountPath: {{ .Values.server.persistentVolume.mountPath }}
subPath: "{{ .Values.server.persistentVolume.subPath }}"
{{- end }}
containers:
- name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.name }}
image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}"
imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}"
args:
- --volume-dir=/etc/config
- --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload
{{- range $key, $value := .Values.configmapReload.extraArgs }}
- --{{ $key }}={{ $value }}
{{- end }}
resources:
{{ toYaml .Values.configmapReload.resources | indent 12 }}
volumeMounts:
- name: config-volume
mountPath: /etc/config
readOnly: true
{{- range .Values.configmapReload.extraConfigmapMounts }}
- name: {{ $.Values.configmapReload.name }}-{{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- end }}
- name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}
image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}"
imagePullPolicy: "{{ .Values.server.image.pullPolicy }}"
args:
{{- if .Values.server.retention }}
- --storage.tsdb.retention={{ .Values.server.retention }}
{{- end }}
- --config.file=/etc/config/prometheus.yml
- --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }}
- --web.console.libraries=/etc/prometheus/console_libraries
- --web.console.templates=/etc/prometheus/consoles
- --web.enable-lifecycle
{{- range $key, $value := .Values.server.extraArgs }}
- --{{ $key }}={{ $value }}
{{- end }}
{{- if .Values.server.baseURL }}
- --web.external-url={{ .Values.server.baseURL }}
{{- end }}
{{- if .Values.server.enableAdminApi }}
- --web.enable-admin-api
{{- end }}
ports:
- containerPort: 9090
readinessProbe:
httpGet:
path: {{ .Values.server.prefixURL }}/-/ready
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
livenessProbe:
httpGet:
path: {{ .Values.server.prefixURL }}/-/healthy
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
resources:
{{ toYaml .Values.server.resources | indent 12 }}
volumeMounts:
- name: config-volume
mountPath: /etc/config
- name: storage-volume
mountPath: {{ .Values.server.persistentVolume.mountPath }}
subPath: "{{ .Values.server.persistentVolume.subPath }}"
{{- range .Values.server.extraHostPathMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- end }}
{{- range .Values.server.extraConfigmapMounts }}
- name: {{ $.Values.server.name }}-{{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- end }}
{{- range .Values.server.extraSecretMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- end }}
{{- if .Values.server.nodeSelector }}
nodeSelector:
{{ toYaml .Values.server.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.server.securityContext }}
securityContext:
{{ toYaml .Values.server.securityContext | indent 8 }}
{{- end }}
{{- if .Values.server.tolerations }}
tolerations:
{{ toYaml .Values.server.tolerations | indent 8 }}
{{- end }}
{{- if .Values.server.affinity }}
affinity:
{{ toYaml .Values.server.affinity | indent 8 }}
{{- end }}
terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }}
volumes:
- name: config-volume
configMap:
name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }}
- name: storage-volume
{{- if .Values.server.persistentVolume.enabled }}
persistentVolumeClaim:
claimName: {{ if .Values.server.persistentVolume.existingClaim }}{{ .Values.server.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }}
{{- else }}
emptyDir: {}
{{- end -}}
{{- range .Values.server.extraHostPathMounts }}
- name: {{ .name }}
hostPath:
path: {{ .hostPath }}
{{- end }}
{{- range .Values.configmapReload.extraConfigmapMounts }}
- name: {{ $.Values.configmapReload.name }}-{{ .name }}
configMap:
name: {{ .configMap }}
{{- end }}
{{- range .Values.server.extraConfigmapMounts }}
- name: {{ $.Values.server.name }}-{{ .name }}
configMap:
name: {{ .configMap }}
{{- end }}
{{- range .Values.server.extraSecretMounts }}
- name: {{ .name }}
secret:
secretName: {{ .secretName }}
{{- end }}
{{- range .Values.configmapReload.extraConfigmapMounts }}
- name: {{ .name }}
configMap:
name: {{ .configMap }}
{{- end }}

View File

@ -1,38 +0,0 @@
{{- if .Values.server.ingress.enabled -}}
{{- $releaseName := .Release.Name -}}
{{- $serviceName := include "prometheus.server.fullname" . }}
{{- $servicePort := .Values.server.service.servicePort -}}
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
{{- if .Values.server.ingress.annotations }}
annotations:
{{ toYaml .Values.server.ingress.annotations | indent 4 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.server.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
{{- range $key, $value := .Values.server.ingress.extraLabels }}
{{ $key }}: {{ $value }}
{{- end }}
name: {{ template "prometheus.server.fullname" . }}
spec:
rules:
{{- range .Values.server.ingress.hosts }}
{{- $url := splitList "/" . }}
- host: {{ first $url }}
http:
paths:
- path: /{{ rest $url | join "/" }}
backend:
serviceName: {{ $serviceName }}
servicePort: {{ $servicePort }}
{{- end -}}
{{- if .Values.server.ingress.tls }}
tls:
{{ toYaml .Values.server.ingress.tls | indent 4 }}
{{- end -}}
{{- end -}}

View File

@ -1,21 +0,0 @@
{{- if .Values.networkPolicy.enabled }}
apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }}
kind: NetworkPolicy
metadata:
name: {{ template "prometheus.server.fullname" . }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.server.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
podSelector:
matchLabels:
app: {{ template "prometheus.name" . }}
component: "{{ .Values.server.name }}"
release: {{ .Release.Name }}
ingress:
- ports:
- port: 9090
{{- end }}

View File

@ -1,31 +0,0 @@
{{- if .Values.server.persistentVolume.enabled -}}
{{- if not .Values.server.persistentVolume.existingClaim -}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
{{- if .Values.server.persistentVolume.annotations }}
annotations:
{{ toYaml .Values.server.persistentVolume.annotations | indent 4 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.server.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.server.fullname" . }}
spec:
accessModes:
{{ toYaml .Values.server.persistentVolume.accessModes | indent 4 }}
{{- if .Values.server.persistentVolume.storageClass }}
{{- if (eq "-" .Values.server.persistentVolume.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.server.persistentVolume.storageClass }}"
{{- end }}
{{- end }}
resources:
requests:
storage: "{{ .Values.server.persistentVolume.size }}"
{{- end -}}
{{- end -}}

View File

@ -1,47 +0,0 @@
apiVersion: v1
kind: Service
metadata:
{{- if .Values.server.service.annotations }}
annotations:
{{ toYaml .Values.server.service.annotations | indent 4 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.server.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
{{- if .Values.server.service.labels }}
{{ toYaml .Values.server.service.labels | indent 4 }}
{{- end }}
name: {{ template "prometheus.server.fullname" . }}
spec:
{{- if .Values.server.service.clusterIP }}
clusterIP: {{ .Values.server.service.clusterIP }}
{{- end }}
{{- if .Values.server.service.externalIPs }}
externalIPs:
{{ toYaml .Values.server.service.externalIPs | indent 4 }}
{{- end }}
{{- if .Values.server.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.server.service.loadBalancerIP }}
{{- end }}
{{- if .Values.server.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- range $cidr := .Values.server.service.loadBalancerSourceRanges }}
- {{ $cidr }}
{{- end }}
{{- end }}
ports:
- name: http
port: {{ .Values.server.service.servicePort }}
protocol: TCP
targetPort: 9090
{{- if .Values.server.service.nodePort }}
nodePort: {{ .Values.server.service.nodePort }}
{{- end }}
selector:
app: {{ template "prometheus.name" . }}
component: "{{ .Values.server.name }}"
release: {{ .Release.Name }}
type: "{{ .Values.server.service.type }}"

View File

@ -1,12 +0,0 @@
{{- if .Values.serviceAccounts.server.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.server.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "prometheus.serviceAccountName.server" . }}
{{- end }}

File diff suppressed because it is too large Load Diff

View File

@ -13,9 +13,6 @@
- { role: kube-router, when: "CLUSTER_NETWORK == 'kube-router'" }
- { role: kube-ovn, when: "CLUSTER_NETWORK == 'kube-ovn'" }
tasks:
- name: 创建文件夹/opt/kube/images
file: dest=/opt/kube/images state=directory
- name: 推送cluster-addon的离线镜像包
copy: src={{ item }} dest=/opt/kube/images/
with_fileglob:

Binary file not shown.

View File

@ -0,0 +1,25 @@
- name: 尝试推送离线coredns镜像若执行失败可忽略
copy: src={{ base_dir }}/down/{{ coredns_offline }} dest=/opt/kube/images/{{ coredns_offline }}
when: 'coredns_offline in download_info.stdout'
- name: 获取coredns离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
- name: 导入coredns的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ coredns_offline }}"
when: 'coredns_offline in image_info.stdout and CONTAINER_RUNTIME == "docker"'
- name: 导入coredns的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ coredns_offline }}"
when: 'coredns_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
- name: 准备 DNS的部署文件
template: src=dns/coredns.yaml.j2 dest={{ cluster_dir }}/yml/coredns.yaml
run_once: true
connection: local
- name: 创建coredns部署
shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/coredns.yaml"
run_once: true
connection: local

View File

@ -0,0 +1,29 @@
- name: 尝试推送离线 dashboard 镜像(若执行失败,可忽略)
copy: src={{ base_dir }}/down/{{ item }} dest=/opt/kube/images/{{ item }}
when: 'item in download_info.stdout'
with_items:
- "{{ dashboard_offline }}"
- "{{ metricsscraper_offline }}"
- name: 获取dashboard离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
- name: 导入 dashboard 的离线镜像docker
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ item }}"
with_items:
- "{{ dashboard_offline }}"
- "{{ metricsscraper_offline }}"
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'docker'"
- name: 导入 dashboard 的离线镜像containerd
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ item }}"
with_items:
- "{{ dashboard_offline }}"
- "{{ metricsscraper_offline }}"
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'containerd'"
- name: 创建 dashboard部署
shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/roles/cluster-addon/templates/dashboard"
run_once: true
connection: local

View File

@ -1,49 +1,37 @@
# https://github.com/traefik/traefik-helm-chart
- block:
- block:
- name: 尝试推送离线 traefik镜像若执行失败可忽略
copy: src={{ base_dir }}/down/{{ traefik_offline }} dest=/opt/kube/images/{{ traefik_offline }}
when: 'traefik_offline in download_info.stdout'
- name: 获取traefik离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
- name: 导入 traefik的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ traefik_offline }}"
when: 'traefik_offline in image_info.stdout and CONTAINER_RUNTIME == "docker"'
- name: 创建 traefik chart 个性化设置
template: src=traefik/values.yaml.j2 dest={{ cluster_dir }}/yml/traefik-values.yaml
- name: 导入 traefik的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ traefik_offline }}"
when: 'traefik_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
- name: 创建 traefik部署
shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/ingress/traefik/traefik-ingress.yaml"
connection: local
run_once: true
- name: helm 创建 traefik chart {{ traefik_chart_ver }}
shell: "{{ base_dir }}/bin/helm install -n kube-system traefik \
-f {{ cluster_dir }}/yml/traefik-values.yaml \
{{ base_dir }}/roles/cluster-addon/files/traefik-{{ traefik_chart_ver }}.tgz"
run_once: true
connection: local
when: 'ingress_backend == "traefik"'
ignore_errors: true
- block:
- block:
- name: 尝试推送离线 nginx-ingress镜像若执行失败可忽略
copy: src={{ base_dir }}/down/{{ nginx_ingress_offline }} dest=/opt/kube/images/{{ nginx_ingress_offline }}
when: 'nginx_ingress_offline in download_info.stdout'
- name: 获取nginx_ingress离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
- name: 导入 nginx_ingress的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ nginx_ingress_offline }}"
when: 'nginx_ingress_offline in image_info.stdout and CONTAINER_RUNTIME == "docker"'
- name: 导入 nginx_ingress的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ nginx_ingress_offline }}"
when: 'nginx_ingress_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
- name: 创建 nginx_ingress部署
shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/ingress/nginx-ingress/nginx-ingress.yaml"
connection: local
run_once: true
when: 'ingress_backend == "nginx-ingress"'
ignore_errors: true
#- block:
# - block:
# - name: 尝试推送离线 nginx-ingress镜像若执行失败可忽略
# copy: src={{ base_dir }}/down/{{ nginx_ingress_offline }} dest=/opt/kube/images/{{ nginx_ingress_offline }}
# when: 'nginx_ingress_offline in download_info.stdout'
#
# - name: 获取nginx_ingress离线镜像推送情况
# command: "ls /opt/kube/images"
# register: image_info
#
# - name: 导入 nginx_ingress的离线镜像若执行失败可忽略
# shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ nginx_ingress_offline }}"
# when: 'nginx_ingress_offline in image_info.stdout and CONTAINER_RUNTIME == "docker"'
#
# - name: 导入 nginx_ingress的离线镜像若执行失败可忽略
# shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ nginx_ingress_offline }}"
# when: 'nginx_ingress_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
#
# - name: 创建 nginx_ingress部署
# shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/ingress/nginx-ingress/nginx-ingress.yaml"
# connection: local
# run_once: true
# when: 'ingress_backend == "nginx-ingress"'

View File

@ -1,11 +1,7 @@
- name: 在 node 节点创建相关目录
file: path={{ item }} state=directory
with_items:
- /opt/kube/kube-system
- name: 获取所有已经创建的POD信息
command: "{{ bin_dir }}/kubectl get pod --all-namespaces"
command: "{{ base_dir }}/bin/kubectl get pod --all-namespaces"
register: pod_info
connection: local
run_once: true
- name: 获取已下载离线镜像信息
@ -14,97 +10,17 @@
connection: local
run_once: true
- block:
- block:
- name: 尝试推送离线coredns镜像若执行失败可忽略
copy: src={{ base_dir }}/down/{{ coredns_offline }} dest=/opt/kube/images/{{ coredns_offline }}
when: 'coredns_offline in download_info.stdout'
- name: 获取coredns离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
- name: 导入coredns的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ coredns_offline }}"
when: 'coredns_offline in image_info.stdout and CONTAINER_RUNTIME == "docker"'
- name: 导入coredns的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ coredns_offline }}"
when: 'coredns_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
- name: 准备 DNS的部署文件
template: src=coredns.yaml.j2 dest=/opt/kube/kube-system/coredns.yaml
- import_tasks: coredns.yml
when: '"coredns" not in pod_info.stdout and dns_install == "yes"'
- name: 创建coredns部署
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/coredns.yaml"
run_once: true
when:
- '"coredns" not in pod_info.stdout'
- 'dns_install == "yes"'
ignore_errors: true
- block:
- block:
- name: 尝试推送离线 metrics-server镜像若执行失败可忽略
copy: src={{ base_dir }}/down/{{ metricsserver_offline }} dest=/opt/kube/images/{{ metricsserver_offline }}
when: 'metricsserver_offline in download_info.stdout'
- name: 获取metrics-server离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
- name: 导入 metrics-server的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ metricsserver_offline }}"
when: 'metricsserver_offline in image_info.stdout and CONTAINER_RUNTIME == "docker"'
- name: 导入 metrics-server的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ metricsserver_offline }}"
when: 'metricsserver_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
- name: 创建 metrics-server部署
shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/metrics-server"
run_once: true
connection: local
- import_tasks: metrics-server.yml
when: '"metrics-server" not in pod_info.stdout and metricsserver_install == "yes"'
ignore_errors: true
# dashboard v2.x.x 不依赖于heapster
- block:
- block:
- name: 尝试推送离线 dashboard 镜像(若执行失败,可忽略)
copy: src={{ base_dir }}/down/{{ item }} dest=/opt/kube/images/{{ item }}
when: 'item in download_info.stdout'
with_items:
- "{{ dashboard_offline }}"
- "{{ metricsscraper_offline }}"
- name: 获取dashboard离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
- name: 导入 dashboard 的离线镜像docker
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ item }}"
with_items:
- "{{ dashboard_offline }}"
- "{{ metricsscraper_offline }}"
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'docker'"
- name: 导入 dashboard 的离线镜像containerd
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ item }}"
with_items:
- "{{ dashboard_offline }}"
- "{{ metricsscraper_offline }}"
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'containerd'"
- name: 创建 dashboard部署
shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/dashboard"
run_once: true
connection: local
- import_tasks: dashboard.yml
when: '"kubernetes-dashboard" not in pod_info.stdout and dashboard_install == "yes"'
ignore_errors: true
#- import_tasks: ingress.yml
# when: '"ingress-controller" not in pod_info.stdout and ingress_install == "yes"'
- import_tasks: ingress.yml
when: '"traefik" not in pod_info.stdout and ingress_install == "yes"'
- import_tasks: prometheus.yml
when: '"kube-prometheus-operator" not in pod_info.stdout and prom_install == "yes"'

View File

@ -0,0 +1,20 @@
- name: 尝试推送离线 metrics-server镜像若执行失败可忽略
copy: src={{ base_dir }}/down/{{ metricsserver_offline }} dest=/opt/kube/images/{{ metricsserver_offline }}
when: 'metricsserver_offline in download_info.stdout'
- name: 获取metrics-server离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
- name: 导入 metrics-server的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ metricsserver_offline }}"
when: 'metricsserver_offline in image_info.stdout and CONTAINER_RUNTIME == "docker"'
- name: 导入 metrics-server的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ metricsserver_offline }}"
when: 'metricsserver_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
- name: 创建 metrics-server部署
shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/roles/cluster-addon/templates/metrics-server"
run_once: true
connection: local

View File

@ -1,3 +1,5 @@
# https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack
- block:
- name: 获取是否已创建命名空间{{ prom_namespace }}
shell: "{{ base_dir }}/bin/kubectl get ns"

View File

@ -0,0 +1,163 @@
# Configure the deployment
deployment:
enabled: true
# Can be either Deployment or DaemonSet
kind: Deployment
replicas: 1
# Activate Pilot integration
pilot:
enabled: false
token: ""
# Create an IngressRoute for the dashboard
ingressRoute:
dashboard:
enabled: true
# Configure providers
providers:
kubernetesCRD:
enabled: true
namespaces: []
# - "default"
kubernetesIngress:
enabled: true
namespaces: []
# - "default"
# IP used for Kubernetes Ingress endpoints
publishedService:
enabled: false
# Published Kubernetes Service to copy status from. Format: namespace/servicename
# By default this Traefik service
# pathOverride: ""
# Add volumes to the traefik pod. The volume name will be passed to tpl.
# This can be used to mount a cert pair or a configmap that holds a config.toml file.
# After the volume has been mounted, add the configs into traefik by using the `additionalArguments` list below, eg:
# additionalArguments:
# - "--providers.file.filename=/config/dynamic.toml"
volumes: []
# - name: public-cert
# mountPath: "/certs"
# type: secret
# - name: xxx
# mountPath: "/config"
# type: configMap
# Additional volumeMounts to add to the Traefik container
additionalVolumeMounts: []
# For instance when using a logshipper for access logs
# - name: traefik-logs
# mountPath: /var/log/traefik
# https://docs.traefik.io/observability/logs/
logs:
# Traefik logs concern everything that happens to Traefik itself (startup, configuration, events, shutdown, and so on).
general:
# By default, the logs use a text format (common), but you can
# also ask for the json format in the format option
# format: json
# By default, the level is set to ERROR. Alternative logging levels are DEBUG, PANIC, FATAL, ERROR, WARN, and INFO.
level: ERROR
access:
# To enable access logs
enabled: false
# By default, logs are written using the Common Log Format (CLF).
# To write logs in JSON, use json in the format option.
# If the given format is unsupported, the default (CLF) is used instead.
# format: json
# To write the logs in an asynchronous fashion, specify a bufferingSize option.
# This option represents the number of log lines Traefik will keep in memory before writing
# them to the selected output. In some cases, this option can greatly help performances.
# bufferingSize: 100
# Filtering https://docs.traefik.io/observability/access-logs/#filtering
filters: {}
# statuscodes: "200,300-302"
# retryattempts: true
# minduration: 10ms
# Fields
# https://docs.traefik.io/observability/access-logs/#limiting-the-fieldsincluding-headers
fields:
general:
defaultmode: keep
names: {}
# Examples:
# ClientUsername: drop
headers:
defaultmode: drop
names: {}
# Examples:
# User-Agent: redact
# Authorization: drop
# Content-Type: keep
globalArguments:
- "--global.checknewversion"
# Configure ports
ports:
traefik:
port: 9000
expose: false
web:
port: 8000
expose: true
exposedPort: 80
protocol: TCP
nodePort: 32080
# Port Redirections
# Added in 2.2, you can make permanent redirects via entrypoints.
# https://docs.traefik.io/routing/entrypoints/#redirection
# redirectTo: websecure
websecure:
port: 8443
expose: true
exposedPort: 443
protocol: TCP
nodePort: 32443
# Set TLS at the entrypoint
# https://doc.traefik.io/traefik/routing/entrypoints/#tls
tls:
enabled: false
# this is the name of a TLSOption definition
options: ""
certResolver: ""
domains: []
# - main: example.com
# sans:
# - foo.example.com
# - bar.example.com
# Options for the main traefik service, where the entrypoints traffic comes from.
service:
enabled: true
type: NodePort
# If hostNetwork is true, runs traefik in the host network namespace
hostNetwork: false
rbac:
enabled: true
resources: {}
# requests:
# cpu: "100m"
# memory: "50Mi"
# limits:
# cpu: "300m"
# memory: "150Mi"
nodeSelector: {}
# Set the container security context
# To run the container with ports below 1024 this will need to be adjust to run as root
securityContext:
capabilities:
drop: [ALL]
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
podSecurityContext:
fsGroup: 65532

View File

@ -21,6 +21,7 @@
- "{{ bin_dir }}"
- "{{ ca_dir }}"
- /root/.kube
- /opt/kube/images
- name: 写入环境变量$PATH
lineinfile: