update: promethus chart 7.1.4, grafana chart 1.16.0

pull/341/head
gjmzj 2018-10-07 16:39:59 +08:00
parent aa869e17ff
commit deef038f92
56 changed files with 2913 additions and 2509 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,15 +1,15 @@
appVersion: 5.1.2
name: grafana
version: 1.16.0
appVersion: 5.2.4
kubeVersion: "^1.8.0-0"
description: The leading tool for querying and visualizing time series and metrics.
engine: gotpl
home: https://grafana.net
icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
kubeVersion: ^1.8.0-0
maintainers:
- email: zanhsieh@gmail.com
name: Ming Hsieh
- email: rluckie@cisco.com
name: rtluckie
name: grafana
sources:
- https://github.com/grafana/grafana
version: 1.10.0
- https://github.com/grafana/grafana
maintainers:
- name: zanhsieh
email: zanhsieh@gmail.com
- name: rtluckie
email: rluckie@cisco.com
engine: gotpl

View File

@ -29,43 +29,134 @@ The command removes all the Kubernetes components associated with the chart and
## Configuration
| Parameter | Description | Default |
|---------------------------------|-----------------------------------------------|---------------------------------------------------------|
| `replicas` | Number of nodes | `1` |
| `deploymentStrategy` | Deployment strategy | `RollingUpdate` |
| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }` |
| `readinessProbe` | Rediness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10, "periodSeconds": 10 }` |
| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "fsGroup": 472}` |
| `image.repository` | Image repository | `grafana/grafana` |
| `image.tag` | Image tag. (`Must be >= 5.0.0`) | `5.2.4` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `service.type` | Kubernetes service type | `ClusterIP` |
| `service.port` | Kubernetes port where service is exposed | `80` |
| `service.annotations` | Service annotations | `{}` |
| `service.labels` | Custom labels | `{}` |
| `ingress.enabled` | Enables Ingress | `false` |
| `ingress.annotations` | Ingress annotations | `{}` |
| `ingress.labels` | Custom labels | `{}` |
| `ingress.hosts` | Ingress accepted hostnames | `[]` |
| `ingress.tls` | Ingress TLS configuration | `[]` |
| `resources` | CPU/Memory resource requests/limits | `{}` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `tolerations` | Toleration labels for pod assignment | `[]` |
| `affinity` | Affinity settings for pod assignment | `{}` |
| `persistence.enabled` | Use persistent volume to store data | `false` |
| `persistence.size` | Size of persistent volume claim | `10Gi` |
| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` |
| `persistence.storageClassName` | Type of persistent volume claim | `nil` |
| `persistence.accessModes` | Persistence access modes | `[]` |
| `persistence.subPath` | Mount a sub dir of the persistent volume | `""` |
| `schedulerName` | Alternate scheduler name | `nil` |
| `env` | Extra environment variables passed to pods | `{}` |
| `envFromSecret` | Name of a Kubenretes secret (must be manually created in the same namespace) containing values to be added to the environment | `""` |
| `extraSecretMounts` | Additional grafana server secret mounts | `[]` |
| `plugins` | Plugins to be loaded along with Grafana | `[]` |
| `datasources` | Configure grafana datasources | `{}` |
| `dashboardProviders` | Configure grafana dashboard providers | `{}` |
| `dashboards` | Dashboards to import | `{}` |
| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` |
| `grafana.ini` | Grafana's primary configuration | `{}` |
| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` |
| `ldap.config ` | Grafana's LDAP configuration | `""` |
| `annotations` | Deployment annotations | `{}` |
| `podAnnotations` | Pod annotations | `{}` |
| `sidecar.dashboards.enabled` | Enabled the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` |
| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `false` |
| `sidecar.datasources.enabled` | Enabled the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` |
| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `false` |
| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials, this must have the keys `user` and `password`. | `""` |
| Parameter | Description | Default |
|----------------------------|-------------------------------------|---------------------------------------------------------|
| `replicas` | Number of nodes | `1` |
| `deploymentStrategy` | Deployment strategy | `RollingUpdate` |
| `image.repository` | Image repository | `grafana/grafana` |
| `image.tag` | Image tag. (`Must be >= 5.0.0`) Possible values listed [here](https://hub.docker.com/r/grafana/grafana/tags/).| `5.0.4`|
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `service.type` | Kubernetes service type | `ClusterIP` |
| `service.port` | Kubernetes port where service is exposed| `9000` |
| `service.annotations` | Service annotations | `80` |
| `service.labels` | Custom labels | `{}`
| `ingress.enabled` | Enables Ingress | `false` |
| `ingress.annotations` | Ingress annotations | `{}` |
| `ingress.labels` | Custom labels | `{}`
| `ingress.hosts` | Ingress accepted hostnames | `[]` |
| `ingress.tls` | Ingress TLS configuration | `[]` |
| `resources` | CPU/Memory resource requests/limits | `{}` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `tolerations` | Toleration labels for pod assignment | `[]` |
| `affinity` | Affinity settings for pod assignment | `{}` |
| `persistence.enabled` | Use persistent volume to store data | `false` |
| `persistence.size` | Size of persistent volume claim | `10Gi` |
| `persistence.existingClaim`| Use an existing PVC to persist data | `nil` |
| `persistence.storageClassName` | Type of persistent volume claim | `nil` |
| `persistence.accessModes` | Persistence access modes | `[]` |
| `persistence.subPath` | Mount a sub directory of the persistent volume if set | `""` |
| `schedulerName` | Alternate scheduler name | `nil` |
| `env` | Extra environment variables passed to pods | `{}` |
| `envFromSecret` | The name of a Kubenretes secret (must be manually created in the same namespace) containing values to be added to the environment | `""` |
| `extraSecretMounts` | Additional grafana server secret mounts | `[]` |
| `datasource` | Configure grafana datasources | `{}` |
| `dashboardProviders` | Configure grafana dashboard providers | `{}` |
| `dashboards` | Dashboards to import | `{}` |
| `grafana.ini` | Grafana's primary configuration | `{}` |
| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` |
| `ldap.config ` | Grafana's LDAP configuration | `""` |
| `annotations` | Deployment annotations | `{}` |
| `podAnnotations` | Pod annotations | `{}` |
| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials, this must have the keys `user` and `password`. | `""` |
## Sidecar for dashboards
If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana pod. This container watches all config maps in the cluster and filters out the ones with a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported dashboards are deleted/updated. A recommendation is to use one configmap per dashboard, as an reduction of multiple dashboards inside one configmap is currently not properly mirrored in grafana.
Example dashboard config:
```
apiVersion: v1
kind: ConfigMap
metadata:
name: sample-grafana-dashboard
labels:
grafana_dashboard: 1
data:
k8s-dashboard.json: |-
[...]
```
## Sidecar for datasources
If the parameter `sidecar.datasource.enabled` is set, a sidecar container is deployed in the grafana pod. This container watches all config maps in the cluster and filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in those configmaps are written to a folder and accessed by grafana on startup. Using these yaml files, the data sources in grafana can be modified.
Example datasource config adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file):
```
apiVersion: v1
kind: ConfigMap
metadata:
name: sample-grafana-datasource
labels:
grafana_datasource: 1
data:
datasource.yaml: |-
# config file version
apiVersion: 1
# list of datasources that should be deleted from the database
deleteDatasources:
- name: Graphite
orgId: 1
# list of datasources to insert/update depending
# whats available in the database
datasources:
# <string, required> name of the datasource. Required
- name: Graphite
# <string, required> datasource type. Required
type: graphite
# <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
access: proxy
# <int> org id. will default to orgId 1 if not specified
orgId: 1
# <string> url
url: http://localhost:8080
# <string> database password, if used
password:
# <string> database user, if used
user:
# <string> database name, if used
database:
# <bool> enable/disable basic auth
basicAuth:
# <string> basic auth username
basicAuthUser:
# <string> basic auth password
basicAuthPassword:
# <bool> enable/disable with credentials headers
withCredentials:
# <bool> mark as default datasource. Max one per org
isDefault:
# <map> fields that will be converted to json and stored in json_data
jsonData:
graphiteVersion: "1.1"
tlsAuth: true
tlsAuthWithCACert: true
# <string> json object of data that will be encrypted.
secureJsonData:
tlsCACert: "..."
tlsClientCert: "..."
tlsClientKey: "..."
version: 1
# <bool> allow users to edit datasources from the UI.
editable: false
```

View File

View File

@ -0,0 +1,23 @@
{{- if .Values.rbac.create }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
app: {{ template "grafana.name" . }}
chart: {{ template "grafana.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
name: {{ template "grafana.fullname" . }}-clusterrole
{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }}
rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["configmaps"]
verbs: ["get", "watch", "list"]
{{- else }}
rules: []
{{- end}}
{{- end}}

View File

@ -0,0 +1,23 @@
{{- if .Values.rbac.create }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "grafana.fullname" . }}-clusterrolebinding
labels:
app: {{ template "grafana.name" . }}
chart: {{ template "grafana.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ template "grafana.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ template "grafana.fullname" . }}-clusterrole
apiGroup: rbac.authorization.k8s.io
{{- end}}

View File

@ -0,0 +1,26 @@
{{- if .Values.sidecar.dashboards.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app: {{ template "grafana.name" . }}
chart: {{ template "grafana.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
name: {{ template "grafana.fullname" . }}-config-dashboards
data:
provider.yaml: |-
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
options:
path: {{ .Values.sidecar.dashboards.folder }}
{{- end}}

View File

@ -8,8 +8,8 @@ metadata:
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
{{- with .Values.plugins }}
plugins: {{ . | quote }}
{{- if .Values.plugins }}
plugins: {{ join "," .Values.plugins }}
{{- end }}
grafana.ini: |
{{- range $key, $value := index .Values "grafana.ini" }}
@ -37,16 +37,25 @@ data:
download_dashboards.sh: |
#!/usr/bin/env sh
set -euf
mkdir -p /var/lib/grafana/dashboards
{{- range $key, $value := .Values.dashboards }}
{{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }}
{{- if .Values.dashboardProviders }}
{{- range $key, $value := .Values.dashboardProviders }}
{{- range $value.providers }}
mkdir -p {{ .options.path }}
{{- end }}
{{- end }}
{{- end }}
{{- range $provider, $dashboards := .Values.dashboards }}
{{- range $key, $value := $dashboards }}
{{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }}
curl -sk \
--connect-timeout 60 \
--max-time 60 \
-H "Accept: application/json" \
-H "Content-Type: application/json;charset=UTF-8" \
{{- if $value.url -}}{{ $value.url }}{{- else -}} https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download{{- end -}}{{ if $value.datasource }}| sed 's|\"datasource\":[^,]*|\"datasource\": \"{{ $value.datasource }}\"|g'{{ end }} \
> /var/lib/grafana/dashboards/{{ $key }}.json
> /var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,18 +1,22 @@
{{- if .Values.dashboards }}
{{- range $provider, $dashboards := .Values.dashboards }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "grafana.fullname" . }}-dashboards-json
name: {{ template "grafana.fullname" $ }}-dashboards-{{ $provider }}
labels:
app: {{ template "grafana.name" . }}
chart: {{ template "grafana.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
app: {{ template "grafana.name" $ }}
chart: {{ template "grafana.chart" $ }}
release: {{ $.Release.Name }}
heritage: {{ $.Release.Service }}
dashboard-provider: {{ $provider }}
data:
{{- if .Values.dashboards }}
{{- range $key, $value := .Values.dashboards }}
{{- range $key, $value := $dashboards }}
{{- if hasKey $value "json" }}
{{ $key }}.json: |
{{ $value.json | indent 4 }}
{{- end }}
{{- end }}
{{- end -}}
{{- end }}
{{- end }}

View File

@ -19,6 +19,9 @@ spec:
release: {{ .Release.Name }}
strategy:
type: {{ .Values.deploymentStrategy }}
{{- if ne .Values.deploymentStrategy "RollingUpdate" }}
rollingUpdate: null
{{- end }}
template:
metadata:
labels:
@ -33,6 +36,10 @@ spec:
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
{{- if .Values.securityContext }}
securityContext:
{{ toYaml .Values.securityContext | indent 8 }}
{{- end }}
{{- if .Values.dashboards }}
initContainers:
- name: download-dashboards
@ -59,6 +66,36 @@ spec:
{{- end}}
{{- end }}
containers:
{{- if .Values.sidecar.dashboards.enabled }}
- name: {{ template "grafana.name" . }}-sc-dashboard
image: "{{ .Values.sidecar.image }}"
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
env:
- name: LABEL
value: "{{ .Values.sidecar.dashboards.label }}"
- name: FOLDER
value: "{{ .Values.sidecar.dashboards.folder }}"
resources:
{{ toYaml .Values.sidecar.resources | indent 12 }}
volumeMounts:
- name: sc-dashboard-volume
mountPath: {{ .Values.sidecar.dashboards.folder | quote }}
{{- end}}
{{- if .Values.sidecar.datasources.enabled }}
- name: {{ template "grafana.name" . }}-sc-datasources
image: "{{ .Values.sidecar.image }}"
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
env:
- name: LABEL
value: "{{ .Values.sidecar.datasources.label }}"
- name: FOLDER
value: "/etc/grafana/provisioning/datasources"
resources:
{{ toYaml .Values.sidecar.resources | indent 12 }}
volumeMounts:
- name: sc-datasources-volume
mountPath: "/etc/grafana/provisioning/datasources"
{{- end}}
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
@ -70,14 +107,22 @@ spec:
mountPath: "/etc/grafana/ldap.toml"
subPath: ldap.toml
{{- if .Values.dashboards }}
{{- range $key, $value := .Values.dashboards }}
{{ if hasKey $value "json" }}
- name: dashboards-json
mountPath: "/var/lib/grafana/dashboards/{{ $key }}.json"
subPath: {{ $key }}.json
{{- range $provider, $dashboards := .Values.dashboards }}
{{- range $key, $value := $dashboards }}
{{- if hasKey $value "json" }}
- name: dashboards-{{ $provider }}
mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json"
subPath: "{{ $key }}.json"
{{- end }}
{{- end }}
{{- end }}
{{- end -}}
{{- if .Values.dashboardsConfigMaps }}
{{- range keys .Values.dashboardsConfigMaps }}
- name: dashboards-{{ . }}
mountPath: "/var/lib/grafana/dashboards/{{ . }}"
{{- end }}
{{- end }}
{{- if .Values.datasources }}
- name: config
mountPath: "/etc/grafana/provisioning/datasources/datasources.yaml"
@ -88,9 +133,25 @@ spec:
mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml"
subPath: dashboardproviders.yaml
{{- end }}
{{- if .Values.sidecar.dashboards.enabled }}
- name: sc-dashboard-volume
mountPath: {{ .Values.sidecar.dashboards.folder | quote }}
- name: sc-dashboard-provider
mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml"
subPath: provider.yaml
{{- end}}
{{- if .Values.sidecar.datasources.enabled }}
- name: sc-datasources-volume
mountPath: "/etc/grafana/provisioning/datasources"
{{- end}}
- name: storage
mountPath: "/var/lib/grafana"
subPath: {{ .Values.persistence.subPath }}
{{- range .Values.extraSecretMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- end }}
ports:
- name: service
containerPort: {{ .Values.service.port }}
@ -138,17 +199,9 @@ spec:
name: {{ .Values.envFromSecret }}
{{- end }}
livenessProbe:
httpGet:
path: /api/health
port: 3000
{{ toYaml .Values.livenessProbe | indent 12 }}
readinessProbe:
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 60
timeoutSeconds: 30
failureThreshold: 10
periodSeconds: 10
{{ toYaml .Values.readinessProbe | indent 12 }}
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- with .Values.nodeSelector }}
@ -167,9 +220,20 @@ spec:
- name: config
configMap:
name: {{ template "grafana.fullname" . }}
- name: dashboards-json
{{- if .Values.dashboards }}
{{- range keys .Values.dashboards }}
- name: dashboards-{{ . }}
configMap:
name: {{ template "grafana.fullname" . }}-dashboards-json
name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }}
{{- end }}
{{- end }}
{{- if .Values.dashboardsConfigMaps }}
{{- range $provider, $name := .Values.dashboardsConfigMaps }}
- name: dashboards-{{ $provider }}
configMap:
name: {{ $name }}
{{- end }}
{{- end }}
- name: ldap
secret:
{{- if .Values.ldap.existingSecret }}
@ -187,9 +251,20 @@ spec:
{{- else }}
emptyDir: {}
{{- end -}}
{{- if .Values.sidecar.dashboards.enabled }}
- name: sc-dashboard-volume
emptyDir: {}
- name: sc-dashboard-provider
configMap:
name: {{ template "grafana.fullname" . }}-config-dashboards
{{- end }}
{{- if .Values.sidecar.datasources.enabled }}
- name: sc-datasources-volume
emptyDir: {}
{{- end -}}
{{- range .Values.extraSecretMounts }}
- name: {{ .name }}
secret:
secretName: {{ .secretName }}
defaultMode: {{ .defaultMode }}
{{- end }}

View File

@ -24,7 +24,7 @@ spec:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}

View File

@ -29,18 +29,12 @@ spec:
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
rule: 'RunAsAny'
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
rule: 'RunAsAny'
readOnlyRootFilesystem: false
{{- end }}

View File

View File

View File

@ -9,9 +9,23 @@ replicas: 1
deploymentStrategy: RollingUpdate
livenessProbe:
httpGet:
path: /api/health
port: 3000
readinessProbe:
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 60
timeoutSeconds: 30
failureThreshold: 10
periodSeconds: 10
image:
repository: grafana/grafana
tag: 5.1.2
tag: 5.2.4
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
@ -21,6 +35,10 @@ image:
# pullSecrets:
# - myRegistrKeySecretName
securityContext:
runAsUser: 472
fsGroup: 472
downloadDashboardsImage:
repository: appropriate/curl
tag: latest
@ -115,9 +133,11 @@ extraSecretMounts: []
# secretName: grafana-secret-files
# readOnly: true
# Pass the plugins you want installed as a comma separated list.
# plugins: "digrich-bubblechart-panel,grafana-clock-panel"
plugins: ""
## Pass the plugins you want installed as a list.
##
plugins: []
# - digrich-bubblechart-panel
# - grafana-clock-panel
## Configure grafana datasources
## ref: http://docs.grafana.org/administration/provisioning/#datasources
@ -135,6 +155,8 @@ datasources: {}
## Configure grafana dashboard providers
## ref: http://docs.grafana.org/administration/provisioning/#dashboards
##
## `path` must be /var/lib/grafana/dashboards/<provider_name>
##
dashboardProviders: {}
# dashboardproviders.yaml:
# apiVersion: 1
@ -146,22 +168,36 @@ dashboardProviders: {}
# disableDeletion: false
# editable: true
# options:
# path: /var/lib/grafana/dashboards
# path: /var/lib/grafana/dashboards/default
## Configure grafana dashboard to import
## NOTE: To use dashboards you must also enable/configure dashboardProviders
## ref: https://grafana.com/dashboards
##
## dashboards per provider, use provider name as key.
##
dashboards: {}
# some-dashboard:
# json: |
# $RAW_JSON
# prometheus-stats:
# gnetId: 2
# revision: 2
# datasource: Prometheus
# local-dashboard:
# url: https://example.com/repository/test.json
# default:
# some-dashboard:
# json: |
# $RAW_JSON
# prometheus-stats:
# gnetId: 2
# revision: 2
# datasource: Prometheus
# local-dashboard:
# url: https://example.com/repository/test.json
## Reference to external ConfigMap per provider. Use provider name as key and ConfiMap name as value.
## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.
## ConfigMap data example:
##
## data:
## example-dashboard.json: |
## RAW_JSON
##
dashboardsConfigMaps: {}
# default: ""
## Grafana's primary configuration
## NOTE: values in map will be converted to ini format
@ -172,6 +208,7 @@ grafana.ini:
data: /var/lib/grafana/data
logs: /var/log/grafana
plugins: /var/lib/grafana/plugins
provisioning: /etc/grafana/provisioning
analytics:
check_for_updates: true
log:
@ -214,3 +251,26 @@ smtp:
# `existingSecret` is a reference to an existing secret containing the smtp configuration
# for Grafana in keys `user` and `password`.
existingSecret: ""
## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
sidecar:
image: kiwigrid/k8s-sidecar:0.0.3
imagePullPolicy: IfNotPresent
resources:
# limits:
# cpu: 100m
# memory: 100Mi
# requests:
# cpu: 50m
# memory: 50Mi
dashboards:
enabled: false
# label that the configmaps with dashboards are marked with
label: grafana_dashboard
# folder in the pod that should hold the collected dashboards
folder: /tmp/dashboards
datasources:
enabled: false
# label that the configmaps with datasources are marked with
label: grafana_datasource

View File

View File

@ -1,18 +1,19 @@
appVersion: 2.2.1
name: prometheus
version: 7.1.4
appVersion: 2.4.3
description: Prometheus is a monitoring system and time series database.
engine: gotpl
home: https://prometheus.io/
icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
maintainers:
- email: mgoodness@gmail.com
name: mgoodness
- email: gianrubio@gmail.com
name: gianrubio
name: prometheus
sources:
- https://github.com/prometheus/alertmanager
- https://github.com/prometheus/prometheus
- https://github.com/prometheus/pushgateway
- https://github.com/prometheus/node_exporter
- https://github.com/kubernetes/kube-state-metrics
version: 6.7.0
- https://github.com/prometheus/alertmanager
- https://github.com/prometheus/prometheus
- https://github.com/prometheus/pushgateway
- https://github.com/prometheus/node_exporter
- https://github.com/kubernetes/kube-state-metrics
maintainers:
- name: mgoodness
email: mgoodness@gmail.com
- name: gianrubio
email: gianrubio@gmail.com
engine: gotpl
tillerVersion: ">=2.8.0"

View File

View File

@ -95,7 +95,7 @@ Parameter | Description | Default
`alertmanager.enabled` | If true, create alertmanager | `true`
`alertmanager.name` | alertmanager container name | `alertmanager`
`alertmanager.image.repository` | alertmanager container image repository | `prom/alertmanager`
`alertmanager.image.tag` | alertmanager container image tag | `v0.13.0`
`alertmanager.image.tag` | alertmanager container image tag | `v0.15.2`
`alertmanager.image.pullPolicy` | alertmanager container image pull policy | `IfNotPresent`
`alertmanager.prefixURL` | The prefix slug at which the server can be accessed | ``
`alertmanager.baseURL` | The external url at which the server can be accessed | `/`
@ -108,6 +108,7 @@ Parameter | Description | Default
`alertmanager.ingress.tls` | alertmanager Ingress TLS configuration (YAML) | `[]`
`alertmanager.nodeSelector` | node labels for alertmanager pod assignment | `{}`
`alertmanager.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]`
`alertmanager.affinity` | pod affinity | `{}`
`alertmanager.schedulerName` | alertmanager alternate scheduler name | `nil`
`alertmanager.persistentVolume.enabled` | If true, alertmanager will create a Persistent Volume Claim | `true`
`alertmanager.persistentVolume.accessModes` | alertmanager data Persistent Volume access modes | `[ReadWriteOnce]`
@ -119,6 +120,7 @@ Parameter | Description | Default
`alertmanager.persistentVolume.subPath` | Subdirectory of alertmanager data Persistent Volume to mount | `""`
`alertmanager.podAnnotations` | annotations to be added to alertmanager pods | `{}`
`alertmanager.replicaCount` | desired number of alertmanager pods | `1`
`alertmanager.priorityClassName` | alertmanager priorityClassName | `nil`
`alertmanager.resources` | alertmanager pod resource requests & limits | `{}`
`alertmanager.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for Alert Manager containers | `{}`
`alertmanager.service.annotations` | annotations for alertmanager service | `{}`
@ -131,7 +133,7 @@ Parameter | Description | Default
`alertmanagerFiles.alertmanager.yml` | Prometheus alertmanager configuration | example configuration
`configmapReload.name` | configmap-reload container name | `configmap-reload`
`configmapReload.image.repository` | configmap-reload container image repository | `jimmidyson/configmap-reload`
`configmapReload.image.tag` | configmap-reload container image tag | `v0.1`
`configmapReload.image.tag` | configmap-reload container image tag | `v0.2.2`
`configmapReload.image.pullPolicy` | configmap-reload container image pull policy | `IfNotPresent`
`configmapReload.extraArgs` | Additional configmap-reload container arguments | `{}`
`configmapReload.extraConfigmapMounts` | Additional configmap-reload configMap mounts | `[]`
@ -145,13 +147,15 @@ Parameter | Description | Default
`kubeStateMetrics.enabled` | If true, create kube-state-metrics | `true`
`kubeStateMetrics.name` | kube-state-metrics container name | `kube-state-metrics`
`kubeStateMetrics.image.repository` | kube-state-metrics container image repository| `quay.io/coreos/kube-state-metrics`
`kubeStateMetrics.image.tag` | kube-state-metrics container image tag | `v1.3.1`
`kubeStateMetrics.image.tag` | kube-state-metrics container image tag | `v1.4.0`
`kubeStateMetrics.image.pullPolicy` | kube-state-metrics container image pull policy | `IfNotPresent`
`kubeStateMetrics.args` | kube-state-metrics container arguments | `{}`
`kubeStateMetrics.nodeSelector` | node labels for kube-state-metrics pod assignment | `{}`
`kubeStateMetrics.podAnnotations` | annotations to be added to kube-state-metrics pods | `{}`
`kubeStateMetrics.deploymentAnnotations` | annotations to be added to kube-state-metrics deployment | `{}`
`kubeStateMetrics.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]`
`kubeStateMetrics.replicaCount` | desired number of kube-state-metrics pods | `1`
`kubeStateMetrics.priorityClassName` | kube-state-metrics priorityClassName | `nil`
`kubeStateMetrics.resources` | kube-state-metrics resource requests and limits (YAML) | `{}`
`kubeStateMetrics.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for kube-state-metrics containers | `{}`
`kubeStateMetrics.service.annotations` | annotations for kube-state-metrics service | `{prometheus.io/scrape: "true"}`
@ -164,15 +168,18 @@ Parameter | Description | Default
`nodeExporter.enabled` | If true, create node-exporter | `true`
`nodeExporter.name` | node-exporter container name | `node-exporter`
`nodeExporter.image.repository` | node-exporter container image repository| `prom/node-exporter`
`nodeExporter.image.tag` | node-exporter container image tag | `v0.15.2`
`nodeExporter.image.tag` | node-exporter container image tag | `v0.16.0`
`nodeExporter.image.pullPolicy` | node-exporter container image pull policy | `IfNotPresent`
`nodeExporter.extraArgs` | Additional node-exporter container arguments | `{}`
`nodeExporter.extraHostPathMounts` | Additional node-exporter hostPath mounts | `[]`
`nodeExporter.extraConfigmapMounts` | Additional node-exporter configMap mounts | `[]`
`nodeExporter.hostNetwork` | If true, node-exporter pods share the host network namespace | `true`
`nodeExporter.hostPID` | If true, node-exporter pods share the host PID namespace | `true`
`nodeExporter.nodeSelector` | node labels for node-exporter pod assignment | `{}`
`nodeExporter.podAnnotations` | annotations to be added to node-exporter pods | `{}`
`nodeExporter.pod.labels` | labels to be added to node-exporter pods | `{}`
`nodeExporter.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]`
`nodeExporter.priorityClassName` | node-exporter priorityClassName | `nil`
`nodeExporter.resources` | node-exporter resource requests and limits (YAML) | `{}`
`nodeExporter.securityContext` | securityContext for containers in pod | `{}`
`nodeExporter.service.annotations` | annotations for node-exporter service | `{prometheus.io/scrape: "true"}`
@ -185,7 +192,7 @@ Parameter | Description | Default
`pushgateway.enabled` | If true, create pushgateway | `true`
`pushgateway.name` | pushgateway container name | `pushgateway`
`pushgateway.image.repository` | pushgateway container image repository | `prom/pushgateway`
`pushgateway.image.tag` | pushgateway container image tag | `v0.4.0`
`pushgateway.image.tag` | pushgateway container image tag | `v0.5.2`
`pushgateway.image.pullPolicy` | pushgateway container image pull policy | `IfNotPresent`
`pushgateway.extraArgs` | Additional pushgateway container arguments | `{}`
`pushgateway.ingress.enabled` | If true, pushgateway Ingress will be created | `false`
@ -196,6 +203,7 @@ Parameter | Description | Default
`pushgateway.podAnnotations` | annotations to be added to pushgateway pods | `{}`
`pushgateway.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]`
`pushgateway.replicaCount` | desired number of pushgateway pods | `1`
`pushgateway.priorityClassName` | pushgateway priorityClassName | `nil`
`pushgateway.resources` | pushgateway pod resource requests & limits | `{}`
`pushgateway.service.annotations` | annotations for pushgateway service | `{}`
`pushgateway.service.clusterIP` | internal pushgateway cluster service IP | `""`
@ -207,9 +215,12 @@ Parameter | Description | Default
`rbac.create` | If true, create & use RBAC resources | `true`
`server.name` | Prometheus server container name | `server`
`server.image.repository` | Prometheus server container image repository | `prom/prometheus`
`server.image.tag` | Prometheus server container image tag | `v2.1.0`
`server.image.tag` | Prometheus server container image tag | `v2.4.3`
`server.image.pullPolicy` | Prometheus server container image pull policy | `IfNotPresent`
`server.enableAdminApi` | If true, Prometheus administrative HTTP API will be enabled. Please note, that you should take care of administrative API access protection (ingress or some frontend Nginx with auth) before enabling it. | `false`
`server.global.scrape_interval` | How frequently to scrape targets by default | `1m`
`server.global.scrape_timeout` | How long until a scrape request times out | `10s`
`server.global.evaluation_interval` | How frequently to evaluate rules | `1m`
`server.extraArgs` | Additional Prometheus server container arguments | `{}`
`server.prefixURL` | The prefix slug at which the server can be accessed | ``
`server.baseURL` | The external url at which the server can be accessed | ``
@ -224,6 +235,8 @@ Parameter | Description | Default
`server.ingress.tls` | Prometheus server Ingress TLS configuration (YAML) | `[]`
`server.nodeSelector` | node labels for Prometheus server pod assignment | `{}`
`server.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]`
`server.affinity` | pod affinity | `{}`
`server.priorityClassName` | Prometheus server priorityClassName | `nil`
`server.schedulerName` | Prometheus server alternate scheduler name | `nil`
`server.persistentVolume.enabled` | If true, Prometheus server will create a Persistent Volume Claim | `true`
`server.persistentVolume.accessModes` | Prometheus server data Persistent Volume access modes | `[ReadWriteOnce]`
@ -234,6 +247,7 @@ Parameter | Description | Default
`server.persistentVolume.storageClass` | Prometheus server data Persistent Volume Storage Class | `unset`
`server.persistentVolume.subPath` | Subdirectory of Prometheus server data Persistent Volume to mount | `""`
`server.podAnnotations` | annotations to be added to Prometheus server pods | `{}`
`server.deploymentAnnotations` | annotations to be added to Prometheus server deployment | `{}'
`server.replicaCount` | desired number of Prometheus server pods | `1`
`server.resources` | Prometheus server resource requests and limits | `{}`
`server.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for server containers | `{}`

View File

@ -26,10 +26,17 @@ spec:
component: "{{ .Values.alertmanager.name }}"
release: {{ .Release.Name }}
spec:
{{- if .Values.alertmanager.affinity }}
affinity:
{{ toYaml .Values.alertmanager.affinity | indent 8 }}
{{- end }}
{{- if .Values.alertmanager.schedulerName }}
schedulerName: "{{ .Values.alertmanager.schedulerName }}"
{{- end }}
serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }}
{{- if .Values.alertmanager.priorityClassName }}
priorityClassName: "{{ .Values.alertmanager.priorityClassName }}"
{{- end }}
containers:
- name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}
image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}"

View File

@ -2,6 +2,10 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
{{- if .Values.kubeStateMetrics.deploymentAnnotations }}
annotations:
{{ toYaml .Values.kubeStateMetrics.deploymentAnnotations | indent 4 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
@ -26,6 +30,9 @@ spec:
{{- end }}
spec:
serviceAccountName: {{ template "prometheus.serviceAccountName.kubeStateMetrics" . }}
{{- if .Values.kubeStateMetrics.priorityClassName }}
priorityClassName: "{{ .Values.kubeStateMetrics.priorityClassName }}"
{{- end }}
containers:
- name: {{ template "prometheus.name" . }}-{{ .Values.kubeStateMetrics.name }}
image: "{{ .Values.kubeStateMetrics.image.repository }}:{{ .Values.kubeStateMetrics.image.tag }}"

View File

@ -2,6 +2,10 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
{{- if .Values.nodeExporter.deploymentAnnotations }}
annotations:
{{ toYaml .Values.nodeExporter.deploymentAnnotations | indent 4 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
@ -29,6 +33,9 @@ spec:
{{- end }}
spec:
serviceAccountName: {{ template "prometheus.serviceAccountName.nodeExporter" . }}
{{- if .Values.nodeExporter.priorityClassName }}
priorityClassName: "{{ .Values.nodeExporter.priorityClassName }}"
{{- end }}
containers:
- name: {{ template "prometheus.name" . }}-{{ .Values.nodeExporter.name }}
image: "{{ .Values.nodeExporter.image.repository }}:{{ .Values.nodeExporter.image.tag }}"
@ -66,8 +73,12 @@ spec:
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- end }}
{{- if .Values.nodeExporter.hostNetwork }}
hostNetwork: true
{{- end }}
{{- if .Values.nodeExporter.hostPID }}
hostPID: true
{{- end }}
{{- if .Values.nodeExporter.tolerations }}
tolerations:
{{ toYaml .Values.nodeExporter.tolerations | indent 8 }}

View File

@ -23,6 +23,9 @@ spec:
release: {{ .Release.Name }}
spec:
serviceAccountName: {{ template "prometheus.serviceAccountName.pushgateway" . }}
{{- if .Values.pushgateway.priorityClassName }}
priorityClassName: "{{ .Values.pushgateway.priorityClassName }}"
{{- end }}
containers:
- name: {{ template "prometheus.name" . }}-{{ .Values.pushgateway.name }}
image: "{{ .Values.pushgateway.image.repository }}:{{ .Values.pushgateway.image.tag }}"
@ -35,7 +38,11 @@ spec:
- containerPort: 9091
readinessProbe:
httpGet:
{{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }}
path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/#/status
{{- else }}
path: /#/status
{{- end }}
port: 9091
initialDelaySeconds: 10
timeoutSeconds: 10

View File

@ -13,6 +13,10 @@ data:
{{- $root := . -}}
{{- range $key, $value := .Values.serverFiles }}
{{ $key }}: |
{{- if eq $key "prometheus.yml" }}
global:
{{ $root.Values.server.global | toYaml | indent 6 }}
{{- end }}
{{ toYaml $value | default "{}" | indent 4 }}
{{- if eq $key "prometheus.yml" -}}
{{- if $root.Values.alertmanager.enabled }}
@ -31,7 +35,7 @@ data:
regex: {{ $root.Release.Namespace }}
action: keep
- source_labels: [__meta_kubernetes_pod_label_app]
regex: prometheus
regex: {{ template "prometheus.name" $root }}
action: keep
- source_labels: [__meta_kubernetes_pod_label_component]
regex: alertmanager

View File

@ -1,6 +1,10 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
{{- if .Values.server.deploymentAnnotations }}
annotations:
{{ toYaml .Values.server.deploymentAnnotations | indent 4 }}
{{- end }}
labels:
app: {{ template "prometheus.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
@ -25,6 +29,13 @@ spec:
component: "{{ .Values.server.name }}"
release: {{ .Release.Name }}
spec:
{{- if .Values.server.affinity }}
affinity:
{{ toYaml .Values.server.affinity | indent 8 }}
{{- end }}
{{- if .Values.server.priorityClassName }}
priorityClassName: "{{ .Values.server.priorityClassName }}"
{{- end }}
{{- if .Values.server.schedulerName }}
schedulerName: "{{ .Values.server.schedulerName }}"
{{- end }}
@ -49,7 +60,7 @@ spec:
imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}"
args:
- --volume-dir=/etc/config
- --webhook-url=http://localhost:9090{{ .Values.server.prefixURL }}/-/reload
- --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload
{{- range $key, $value := .Values.configmapReload.extraArgs }}
- --{{ $key }}={{ $value }}
{{- end }}

View File

@ -33,9 +33,13 @@ alertmanager:
##
image:
repository: prom/alertmanager
tag: v0.14.0
tag: v0.15.2
pullPolicy: IfNotPresent
## alertmanager priorityClassName
##
priorityClassName: ""
## Additional alertmanager container arguments
##
extraArgs: {}
@ -108,6 +112,10 @@ alertmanager:
##
nodeSelector: {}
## Pod affinity
##
affinity: {}
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
@ -210,7 +218,7 @@ configmapReload:
##
image:
repository: jimmidyson/configmap-reload
tag: v0.1
tag: v0.2.2
pullPolicy: IfNotPresent
## Additional configmap-reload container arguments
@ -266,9 +274,13 @@ kubeStateMetrics:
##
image:
repository: quay.io/coreos/kube-state-metrics
tag: v1.3.1
tag: v1.4.0
pullPolicy: IfNotPresent
## kube-state-metrics priorityClassName
##
priorityClassName: ""
## kube-state-metrics container arguments
##
args: {}
@ -335,6 +347,14 @@ nodeExporter:
##
enabled: true
## If true, node-exporter pods share the host network namespace
##
hostNetwork: true
## If true, node-exporter pods share the host PID namespace
##
hostPID: true
## node-exporter container name
##
name: node-exporter
@ -346,6 +366,10 @@ nodeExporter:
tag: v0.15.2
pullPolicy: IfNotPresent
## node-exporter priorityClassName
##
priorityClassName: ""
## Custom Update Strategy
##
updateStrategy:
@ -437,9 +461,13 @@ server:
##
image:
repository: prom/prometheus
tag: v2.2.1
tag: v2.4.3
pullPolicy: IfNotPresent
## prometheus server priorityClassName
##
priorityClassName: ""
## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug
## so that the various internal URLs are still able to access as they are in the default case.
## (Optional)
@ -453,6 +481,17 @@ server:
## series. This is disabled by default.
enableAdminApi: false
global:
## How frequently to scrape targets by default
##
scrape_interval: 1m
## How long until a scrape request times out
##
scrape_timeout: 10s
## How frequently to evaluate rules
##
evaluation_interval: 1m
## Additional Prometheus server container arguments
##
extraArgs: {}
@ -533,6 +572,10 @@ server:
##
nodeSelector: {}
## Pod affinity
##
affinity: {}
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
@ -640,9 +683,13 @@ pushgateway:
##
image:
repository: prom/pushgateway
tag: v0.4.0
tag: v0.5.2
pullPolicy: IfNotPresent
## pushgateway priorityClassName
##
priorityClassName: ""
## Additional pushgateway container arguments
##
extraArgs: {}
@ -728,7 +775,7 @@ pushgateway:
##
alertmanagerFiles:
alertmanager.yml:
global:
global: {}
# slack_api_url: ''
receivers:
@ -750,17 +797,6 @@ serverFiles:
rules: {}
prometheus.yml:
global:
## How frequently to scrape targets by default
##
scrape_interval: 1m
## How long until a scrape request times out
##
scrape_timeout: 10s
## How frequently to evaluate rules
##
evaluation_interval: 1m
rule_files:
- /etc/config/rules
- /etc/config/alerts