commit
1dda89dbe3
|
@ -2,9 +2,6 @@
|
|||
# RBAC required. see docs/getting-started.md for access details.
|
||||
dashboard_enabled: true
|
||||
|
||||
# Monitoring apps for k8s
|
||||
efk_enabled: false
|
||||
|
||||
# Helm deployment
|
||||
helm_enabled: false
|
||||
|
||||
|
|
|
@ -169,15 +169,6 @@ kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-aut
|
|||
kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
|
||||
test_image_repo: busybox
|
||||
test_image_tag: latest
|
||||
elasticsearch_version: "v5.6.4"
|
||||
elasticsearch_image_repo: "k8s.gcr.io/elasticsearch"
|
||||
elasticsearch_image_tag: "{{ elasticsearch_version }}"
|
||||
fluentd_version: "v2.0.4"
|
||||
fluentd_image_repo: "k8s.gcr.io/fluentd-elasticsearch"
|
||||
fluentd_image_tag: "{{ fluentd_version }}"
|
||||
kibana_version: "5.6.4"
|
||||
kibana_image_repo: "docker.elastic.co/kibana/kibana"
|
||||
kibana_image_tag: "{{ kibana_version }}"
|
||||
helm_version: "v2.9.1"
|
||||
helm_image_repo: "lachlanevenson/k8s-helm"
|
||||
helm_image_tag: "{{ helm_version }}"
|
||||
|
@ -479,33 +470,6 @@ downloads:
|
|||
tag: "{{ test_image_tag }}"
|
||||
sha256: "{{ testbox_digest_checksum|default(None) }}"
|
||||
|
||||
elasticsearch:
|
||||
enabled: "{{ efk_enabled }}"
|
||||
container: true
|
||||
repo: "{{ elasticsearch_image_repo }}"
|
||||
tag: "{{ elasticsearch_image_tag }}"
|
||||
sha256: "{{ elasticsearch_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
|
||||
fluentd:
|
||||
enabled: "{{ efk_enabled }}"
|
||||
container: true
|
||||
repo: "{{ fluentd_image_repo }}"
|
||||
tag: "{{ fluentd_image_tag }}"
|
||||
sha256: "{{ fluentd_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
|
||||
kibana:
|
||||
enabled: "{{ efk_enabled }}"
|
||||
container: true
|
||||
repo: "{{ kibana_image_repo }}"
|
||||
tag: "{{ kibana_image_tag }}"
|
||||
sha256: "{{ kibana_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
|
||||
helm:
|
||||
enabled: "{{ helm_enabled }}"
|
||||
container: true
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
---
|
||||
elasticsearch_cpu_limit: 1000m
|
||||
elasticsearch_mem_limit: 0M
|
||||
elasticsearch_cpu_requests: 100m
|
||||
elasticsearch_mem_requests: 0M
|
||||
elasticsearch_service_port: 9200
|
|
@ -1,5 +0,0 @@
|
|||
---
|
||||
# TODO: bradbeam add in curator
|
||||
# https://github.com/Skillshare/kubernetes-efk/blob/master/configs/elasticsearch.yml#L94
|
||||
# - role: download
|
||||
# file: "{{ downloads.curator }}"
|
|
@ -1,38 +0,0 @@
|
|||
---
|
||||
- name: "ElasticSearch | Write efk manifests (RBAC)"
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ kube_config_dir }}/{{ item }}"
|
||||
with_items:
|
||||
- "efk-sa.yml"
|
||||
- "efk-clusterrolebinding.yml"
|
||||
run_once: true
|
||||
|
||||
- name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)"
|
||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n kube-system"
|
||||
with_items:
|
||||
- "efk-sa.yml"
|
||||
- "efk-clusterrolebinding.yml"
|
||||
run_once: true
|
||||
|
||||
- name: "ElasticSearch | Write ES deployment"
|
||||
template:
|
||||
src: elasticsearch-deployment.yml.j2
|
||||
dest: "{{ kube_config_dir }}/elasticsearch-deployment.yaml"
|
||||
register: es_deployment_manifest
|
||||
|
||||
- name: "ElasticSearch | Create ES deployment"
|
||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n kube-system"
|
||||
run_once: true
|
||||
when: es_deployment_manifest.changed
|
||||
|
||||
- name: "ElasticSearch | Write ES service "
|
||||
template:
|
||||
src: elasticsearch-service.yml.j2
|
||||
dest: "{{ kube_config_dir }}/elasticsearch-service.yaml"
|
||||
register: es_service_manifest
|
||||
|
||||
- name: "ElasticSearch | Create ES service"
|
||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n kube-system"
|
||||
run_once: true
|
||||
when: es_service_manifest.changed
|
|
@ -1,17 +0,0 @@
|
|||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: efk
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: efk
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
|
@ -1,9 +0,0 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: efk
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
|
@ -1,62 +0,0 @@
|
|||
---
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: elasticsearch-logging
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: "{{ elasticsearch_image_tag }}"
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
serviceName: elasticsearch-logging
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: "{{ elasticsearch_image_tag }}"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: "{{ elasticsearch_image_tag }}"
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- image: "{{ elasticsearch_image_repo }}:{{ elasticsearch_image_tag }}"
|
||||
name: elasticsearch-logging
|
||||
resources:
|
||||
# need more cpu upon initialization, therefore burstable class
|
||||
limits:
|
||||
cpu: {{ elasticsearch_cpu_limit }}
|
||||
{% if elasticsearch_mem_limit is defined and elasticsearch_mem_limit != "0M" %}
|
||||
memory: "{{ elasticsearch_mem_limit }}"
|
||||
{% endif %}
|
||||
requests:
|
||||
cpu: {{ elasticsearch_cpu_requests }}
|
||||
{% if elasticsearch_mem_requests is defined and elasticsearch_mem_requests != "0M" %}
|
||||
memory: "{{ elasticsearch_mem_requests }}"
|
||||
{% endif %}
|
||||
ports:
|
||||
- containerPort: 9200
|
||||
name: db
|
||||
protocol: TCP
|
||||
- containerPort: 9300
|
||||
name: transport
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: es-persistent-storage
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: es-persistent-storage
|
||||
emptyDir: {}
|
||||
serviceAccountName: efk
|
||||
initContainers:
|
||||
- image: alpine:3.6
|
||||
command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
|
||||
name: elasticsearch-logging-init
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: elasticsearch-logging
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "Elasticsearch"
|
||||
spec:
|
||||
ports:
|
||||
- port: {{ elasticsearch_service_port }}
|
||||
protocol: TCP
|
||||
targetPort: db
|
||||
selector:
|
||||
k8s-app: elasticsearch-logging
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
---
|
||||
fluentd_cpu_limit: 0m
|
||||
fluentd_mem_limit: 500Mi
|
||||
fluentd_cpu_requests: 100m
|
||||
fluentd_mem_requests: 200Mi
|
||||
fluentd_config_dir: /etc/fluent/config.d
|
||||
# fluentd_config_file: fluentd.conf
|
|
@ -1,22 +0,0 @@
|
|||
---
|
||||
- name: "Fluentd | copy config file"
|
||||
template:
|
||||
src: fluentd-config.yml.j2
|
||||
dest: "{{ kube_config_dir }}/fluentd-config.yaml"
|
||||
register: fluentd_config
|
||||
|
||||
- name: "Fluentd | create configMap"
|
||||
command: "{{bin_dir}}/kubectl apply -f {{ kube_config_dir }}/fluentd-config.yaml"
|
||||
run_once: true
|
||||
when: fluentd_config.changed
|
||||
|
||||
- name: "Fluentd | Write fluentd daemonset"
|
||||
template:
|
||||
src: fluentd-ds.yml.j2
|
||||
dest: "{{ kube_config_dir }}/fluentd-ds.yaml"
|
||||
register: fluentd_ds_manifest
|
||||
|
||||
- name: "Fluentd | Create fluentd daemonset"
|
||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n kube-system"
|
||||
run_once: true
|
||||
when: fluentd_ds_manifest.changed
|
|
@ -1,441 +0,0 @@
|
|||
---
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.10/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: fluentd-config
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
data:
|
||||
system.conf: |-
|
||||
<system>
|
||||
root_dir /tmp/fluentd-buffers/
|
||||
</system>
|
||||
|
||||
containers.input.conf: |-
|
||||
# This configuration file for Fluentd / td-agent is used
|
||||
# to watch changes to Docker log files. The kubelet creates symlinks that
|
||||
# capture the pod name, namespace, container name & Docker container ID
|
||||
# to the docker logs for pods in the /var/log/containers directory on the host.
|
||||
# If running this fluentd configuration in a Docker container, the /var/log
|
||||
# directory should be mounted in the container.
|
||||
#
|
||||
# These logs are then submitted to Elasticsearch which assumes the
|
||||
# installation of the fluent-plugin-elasticsearch & the
|
||||
# fluent-plugin-kubernetes_metadata_filter plugins.
|
||||
# See https://github.com/uken/fluent-plugin-elasticsearch &
|
||||
# https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
|
||||
# more information about the plugins.
|
||||
#
|
||||
# Example
|
||||
# =======
|
||||
# A line in the Docker log file might look like this JSON:
|
||||
#
|
||||
# {"log":"2014/09/25 21:15:03 Got request with path wombat\n",
|
||||
# "stream":"stderr",
|
||||
# "time":"2014-09-25T21:15:03.499185026Z"}
|
||||
#
|
||||
# The time_format specification below makes sure we properly
|
||||
# parse the time format produced by Docker. This will be
|
||||
# submitted to Elasticsearch and should appear like:
|
||||
# $ curl 'http://elasticsearch-logging:9200/_search?pretty'
|
||||
# ...
|
||||
# {
|
||||
# "_index" : "logstash-2014.09.25",
|
||||
# "_type" : "fluentd",
|
||||
# "_id" : "VBrbor2QTuGpsQyTCdfzqA",
|
||||
# "_score" : 1.0,
|
||||
# "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n",
|
||||
# "stream":"stderr","tag":"docker.container.all",
|
||||
# "@timestamp":"2014-09-25T22:45:50+00:00"}
|
||||
# },
|
||||
# ...
|
||||
#
|
||||
# The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log
|
||||
# record & add labels to the log record if properly configured. This enables users
|
||||
# to filter & search logs on any metadata.
|
||||
# For example a Docker container's logs might be in the directory:
|
||||
#
|
||||
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
|
||||
#
|
||||
# and in the file:
|
||||
#
|
||||
# 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
|
||||
#
|
||||
# where 997599971ee6... is the Docker ID of the running container.
|
||||
# The Kubernetes kubelet makes a symbolic link to this file on the host machine
|
||||
# in the /var/log/containers directory which includes the pod name and the Kubernetes
|
||||
# container name:
|
||||
#
|
||||
# synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||
# ->
|
||||
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
|
||||
#
|
||||
# The /var/log directory on the host is mapped to the /var/log directory in the container
|
||||
# running this instance of Fluentd and we end up collecting the file:
|
||||
#
|
||||
# /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||
#
|
||||
# This results in the tag:
|
||||
#
|
||||
# var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||
#
|
||||
# The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name
|
||||
# which are added to the log message as a kubernetes field object & the Docker container ID
|
||||
# is also added under the docker field object.
|
||||
# The final tag is:
|
||||
#
|
||||
# kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||
#
|
||||
# And the final log record look like:
|
||||
#
|
||||
# {
|
||||
# "log":"2014/09/25 21:15:03 Got request with path wombat\n",
|
||||
# "stream":"stderr",
|
||||
# "time":"2014-09-25T21:15:03.499185026Z",
|
||||
# "kubernetes": {
|
||||
# "namespace": "default",
|
||||
# "pod_name": "synthetic-logger-0.25lps-pod",
|
||||
# "container_name": "synth-lgr"
|
||||
# },
|
||||
# "docker": {
|
||||
# "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b"
|
||||
# }
|
||||
# }
|
||||
#
|
||||
# This makes it easier for users to search for logs by pod name or by
|
||||
# the name of the Kubernetes container regardless of how many times the
|
||||
# Kubernetes pod has been restarted (resulting in a several Docker container IDs).
|
||||
|
||||
# Json Log Example:
|
||||
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
|
||||
# CRI Log Example:
|
||||
# 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
|
||||
<source>
|
||||
@id fluentd-containers.log
|
||||
@type tail
|
||||
path /var/log/containers/*.log
|
||||
pos_file /var/log/es-containers.log.pos
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
tag raw.kubernetes.*
|
||||
read_from_head true
|
||||
<parse>
|
||||
@type multi_format
|
||||
<pattern>
|
||||
format json
|
||||
time_key time
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
</pattern>
|
||||
<pattern>
|
||||
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
|
||||
time_format %Y-%m-%dT%H:%M:%S.%N%:z
|
||||
</pattern>
|
||||
</parse>
|
||||
</source>
|
||||
|
||||
# Detect exceptions in the log output and forward them as one log entry.
|
||||
<match raw.kubernetes.**>
|
||||
@id raw.kubernetes
|
||||
@type detect_exceptions
|
||||
remove_tag_prefix raw
|
||||
message log
|
||||
stream stream
|
||||
multiline_flush_interval 5
|
||||
max_bytes 500000
|
||||
max_lines 1000
|
||||
</match>
|
||||
|
||||
system.input.conf: |-
|
||||
# Example:
|
||||
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
|
||||
<source>
|
||||
@id minion
|
||||
@type tail
|
||||
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
|
||||
time_format %Y-%m-%d %H:%M:%S
|
||||
path /var/log/salt/minion
|
||||
pos_file /var/log/salt.pos
|
||||
tag salt
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
|
||||
<source>
|
||||
@id startupscript.log
|
||||
@type tail
|
||||
format syslog
|
||||
path /var/log/startupscript.log
|
||||
pos_file /var/log/es-startupscript.log.pos
|
||||
tag startupscript
|
||||
</source>
|
||||
|
||||
# Examples:
|
||||
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
|
||||
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
|
||||
# TODO(random-liu): Remove this after cri container runtime rolls out.
|
||||
<source>
|
||||
@id docker.log
|
||||
@type tail
|
||||
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
|
||||
path /var/log/docker.log
|
||||
pos_file /var/log/es-docker.log.pos
|
||||
tag docker
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
|
||||
<source>
|
||||
@id etcd.log
|
||||
@type tail
|
||||
# Not parsing this, because it doesn't have anything particularly useful to
|
||||
# parse out of it (like severities).
|
||||
format none
|
||||
path /var/log/etcd.log
|
||||
pos_file /var/log/es-etcd.log.pos
|
||||
tag etcd
|
||||
</source>
|
||||
|
||||
# Multi-line parsing is required for all the kube logs because very large log
|
||||
# statements, such as those that include entire object bodies, get split into
|
||||
# multiple lines by glog.
|
||||
|
||||
# Example:
|
||||
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
|
||||
<source>
|
||||
@id kubelet.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/kubelet.log
|
||||
pos_file /var/log/es-kubelet.log.pos
|
||||
tag kubelet
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
|
||||
<source>
|
||||
@id kube-proxy.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/kube-proxy.log
|
||||
pos_file /var/log/es-kube-proxy.log.pos
|
||||
tag kube-proxy
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
|
||||
<source>
|
||||
@id kube-apiserver.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/kube-apiserver.log
|
||||
pos_file /var/log/es-kube-apiserver.log.pos
|
||||
tag kube-apiserver
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
|
||||
<source>
|
||||
@id kube-controller-manager.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/kube-controller-manager.log
|
||||
pos_file /var/log/es-kube-controller-manager.log.pos
|
||||
tag kube-controller-manager
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
|
||||
<source>
|
||||
@id kube-scheduler.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/kube-scheduler.log
|
||||
pos_file /var/log/es-kube-scheduler.log.pos
|
||||
tag kube-scheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
|
||||
<source>
|
||||
@id rescheduler.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/rescheduler.log
|
||||
pos_file /var/log/es-rescheduler.log.pos
|
||||
tag rescheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||
<source>
|
||||
@id glbc.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/glbc.log
|
||||
pos_file /var/log/es-glbc.log.pos
|
||||
tag glbc
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||
<source>
|
||||
@id cluster-autoscaler.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/cluster-autoscaler.log
|
||||
pos_file /var/log/es-cluster-autoscaler.log.pos
|
||||
tag cluster-autoscaler
|
||||
</source>
|
||||
|
||||
# Logs from systemd-journal for interesting services.
|
||||
# TODO(random-liu): Remove this after cri container runtime rolls out.
|
||||
<source>
|
||||
@id journald-docker
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
</storage>
|
||||
read_from_head true
|
||||
tag docker
|
||||
</source>
|
||||
|
||||
# <source>
|
||||
# @id journald-container-runtime
|
||||
# @type systemd
|
||||
# filters [{ "_SYSTEMD_UNIT": "{% raw %}{{ container_runtime }} {% endraw %}.service" }]
|
||||
# <storage>
|
||||
# @type local
|
||||
# persistent true
|
||||
# </storage>
|
||||
# read_from_head true
|
||||
# tag container-runtime
|
||||
# </source>
|
||||
|
||||
<source>
|
||||
@id journald-kubelet
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
</storage>
|
||||
read_from_head true
|
||||
tag kubelet
|
||||
</source>
|
||||
|
||||
<source>
|
||||
@id journald-node-problem-detector
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
</storage>
|
||||
read_from_head true
|
||||
tag node-problem-detector
|
||||
</source>
|
||||
|
||||
forward.input.conf: |-
|
||||
# Takes the messages sent over TCP
|
||||
<source>
|
||||
@type forward
|
||||
</source>
|
||||
|
||||
monitoring.conf: |-
|
||||
# Prometheus Exporter Plugin
|
||||
# input plugin that exports metrics
|
||||
<source>
|
||||
@type prometheus
|
||||
</source>
|
||||
|
||||
<source>
|
||||
@type monitor_agent
|
||||
</source>
|
||||
|
||||
# input plugin that collects metrics from MonitorAgent
|
||||
<source>
|
||||
@type prometheus_monitor
|
||||
<labels>
|
||||
host ${hostname}
|
||||
</labels>
|
||||
</source>
|
||||
|
||||
# input plugin that collects metrics for output plugin
|
||||
<source>
|
||||
@type prometheus_output_monitor
|
||||
<labels>
|
||||
host ${hostname}
|
||||
</labels>
|
||||
</source>
|
||||
|
||||
# input plugin that collects metrics for in_tail plugin
|
||||
<source>
|
||||
@type prometheus_tail_monitor
|
||||
<labels>
|
||||
host ${hostname}
|
||||
</labels>
|
||||
</source>
|
||||
|
||||
output.conf: |-
|
||||
# Enriches records with Kubernetes metadata
|
||||
<filter kubernetes.**>
|
||||
@type kubernetes_metadata
|
||||
</filter>
|
||||
|
||||
<match **>
|
||||
@id elasticsearch
|
||||
@type elasticsearch
|
||||
@log_level info
|
||||
include_tag_key true
|
||||
host elasticsearch-logging
|
||||
port 9200
|
||||
logstash_format true
|
||||
<buffer>
|
||||
@type file
|
||||
path /var/log/fluentd-buffers/kubernetes.system.buffer
|
||||
flush_mode interval
|
||||
retry_type exponential_backoff
|
||||
flush_thread_count 2
|
||||
flush_interval 5s
|
||||
retry_forever
|
||||
retry_max_interval 30
|
||||
chunk_limit_size 2M
|
||||
queue_limit_length 8
|
||||
overflow_action block
|
||||
</buffer>
|
||||
</match>
|
|
@ -1,68 +0,0 @@
|
|||
---
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: "fluentd-es-{{ fluentd_version }}"
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
version: "{{ fluentd_version }}"
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: fluentd-es
|
||||
version: "{{ fluentd_version }}"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: "{{ fluentd_version }}"
|
||||
# This annotation ensures that fluentd does not get evicted if the node
|
||||
# supports critical pod annotation based priority scheme.
|
||||
# Note that this does not guarantee admission on the nodes (#40573).
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: efk
|
||||
# When having win nodes in cluster without this patch, this pod cloud try to be created in windows
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
containers:
|
||||
- name: fluentd-es
|
||||
image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}"
|
||||
env:
|
||||
- name: FLUENTD_ARGS
|
||||
value: "--no-supervisor -q"
|
||||
resources:
|
||||
limits:
|
||||
{% if fluentd_cpu_limit is defined and fluentd_cpu_limit != "0m" %}
|
||||
cpu: {{ fluentd_cpu_limit }}
|
||||
{% endif %}
|
||||
memory: {{ fluentd_mem_limit }}
|
||||
requests:
|
||||
cpu: {{ fluentd_cpu_requests }}
|
||||
memory: {{ fluentd_mem_requests }}
|
||||
volumeMounts:
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
- name: varlibdockercontainers
|
||||
mountPath: "{{ docker_daemon_graph }}/containers"
|
||||
readOnly: true
|
||||
- name: config-volume
|
||||
mountPath: "{{ fluentd_config_dir }}"
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: {{ docker_daemon_graph }}/containers
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: fluentd-config
|
|
@ -1,7 +0,0 @@
|
|||
---
|
||||
kibana_cpu_limit: 100m
|
||||
kibana_mem_limit: 0M
|
||||
kibana_cpu_requests: 100m
|
||||
kibana_mem_requests: 0M
|
||||
kibana_service_port: 5601
|
||||
kibana_base_url: "/api/v1/namespaces/kube-system/services/kibana-logging/proxy"
|
|
@ -1,34 +0,0 @@
|
|||
---
|
||||
- name: "Kibana | Write Kibana deployment"
|
||||
template:
|
||||
src: kibana-deployment.yml.j2
|
||||
dest: "{{ kube_config_dir }}/kibana-deployment.yaml"
|
||||
register: kibana_deployment_manifest
|
||||
|
||||
- name: "Kibana | Create Kibana deployment"
|
||||
kube:
|
||||
filename: "{{kube_config_dir}}/kibana-deployment.yaml"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
name: "kibana-logging"
|
||||
namespace: "kube-system"
|
||||
resource: "deployment"
|
||||
state: "latest"
|
||||
with_items: "{{ kibana_deployment_manifest.changed }}"
|
||||
run_once: true
|
||||
|
||||
- name: "Kibana | Write Kibana service "
|
||||
template:
|
||||
src: kibana-service.yml.j2
|
||||
dest: "{{ kube_config_dir }}/kibana-service.yaml"
|
||||
register: kibana_service_manifest
|
||||
|
||||
- name: "Kibana | Create Kibana service"
|
||||
kube:
|
||||
filename: "{{kube_config_dir}}/kibana-service.yaml"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
name: "kibana-logging"
|
||||
namespace: "kube-system"
|
||||
resource: "svc"
|
||||
state: "latest"
|
||||
with_items: "{{ kibana_service_manifest.changed }}"
|
||||
run_once: true
|
|
@ -1,49 +0,0 @@
|
|||
---
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.10/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kibana-logging
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
k8s-app: kibana-logging
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kibana-logging
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kibana-logging
|
||||
spec:
|
||||
containers:
|
||||
- name: kibana-logging
|
||||
image: "{{ kibana_image_repo }}:{{ kibana_image_tag }}"
|
||||
resources:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: {{ kibana_cpu_limit }}
|
||||
{% if kibana_mem_limit is defined and kibana_mem_limit != "0M" %}
|
||||
memory: "{{ kibana_mem_limit }}"
|
||||
{% endif %}
|
||||
requests:
|
||||
cpu: {{ kibana_cpu_requests }}
|
||||
{% if kibana_mem_requests is defined and kibana_mem_requests != "0M" %}
|
||||
memory: "{{ kibana_mem_requests }}"
|
||||
{% endif %}
|
||||
env:
|
||||
- name: "ELASTICSEARCH_URL"
|
||||
value: "http://elasticsearch-logging:{{ elasticsearch_service_port }}"
|
||||
- name: "SERVER_BASEPATH"
|
||||
value: "{{ kibana_base_url }}"
|
||||
- name: XPACK_MONITORING_ENABLED
|
||||
value: "false"
|
||||
- name: XPACK_SECURITY_ENABLED
|
||||
value: "false"
|
||||
ports:
|
||||
- containerPort: 5601
|
||||
name: ui
|
||||
protocol: TCP
|
||||
serviceAccountName: efk
|
|
@ -1,18 +0,0 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kibana-logging
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
k8s-app: kibana-logging
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "Kibana"
|
||||
spec:
|
||||
ports:
|
||||
- port: {{ kibana_service_port }}
|
||||
protocol: TCP
|
||||
targetPort: ui
|
||||
selector:
|
||||
k8s-app: kibana-logging
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: kubernetes-apps/efk/elasticsearch
|
||||
- role: kubernetes-apps/efk/fluentd
|
||||
- role: kubernetes-apps/efk/kibana
|
|
@ -6,14 +6,6 @@ dependencies:
|
|||
tags:
|
||||
- apps
|
||||
|
||||
- role: kubernetes-apps/efk
|
||||
when:
|
||||
- efk_enabled
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags:
|
||||
- apps
|
||||
- efk
|
||||
|
||||
- role: kubernetes-apps/helm
|
||||
when:
|
||||
- helm_enabled
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
{{ bin_dir }}/kubectl get secrets --all-namespaces
|
||||
-o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}'
|
||||
| grep kubernetes.io/service-account-token
|
||||
| egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|efk|tiller|local-volume-provisioner'
|
||||
| egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|tiller|local-volume-provisioner'
|
||||
register: tokens_to_delete
|
||||
when: needs_rotation
|
||||
|
||||
|
|
|
@ -39,7 +39,6 @@
|
|||
- { name: download_run_once, value: "{{ download_run_once }}" }
|
||||
- { name: deploy_netchecker, value: "{{ deploy_netchecker }}" }
|
||||
- { name: download_always_pull, value: "{{ download_always_pull }}" }
|
||||
- { name: efk_enabled, value: "{{ efk_enabled }}" }
|
||||
- { name: helm_enabled, value: "{{ helm_enabled }}" }
|
||||
- { name: openstack_lbaas_enabled, value: "{{ openstack_lbaas_enabled }}" }
|
||||
ignore_errors: "{{ ignore_assert_errors }}"
|
||||
|
|
|
@ -238,7 +238,6 @@ k8s_image_pull_policy: IfNotPresent
|
|||
dashboard_enabled: true
|
||||
|
||||
# Addons which can be enabled
|
||||
efk_enabled: false
|
||||
helm_enabled: false
|
||||
registry_enabled: false
|
||||
enable_network_policy: true
|
||||
|
|
|
@ -7,7 +7,6 @@ mode: ha
|
|||
# Deployment settings
|
||||
kube_network_plugin: flannel
|
||||
helm_enabled: true
|
||||
efk_enabled: true
|
||||
kubernetes_audit: true
|
||||
etcd_events_cluster_setup: true
|
||||
local_volume_provisioner_enabled: true
|
||||
|
|
Loading…
Reference in New Issue