🔧 添加elk配置
parent
a8fc56f7da
commit
ade84ffcda
|
@ -0,0 +1,177 @@
|
|||
###################### Filebeat Configuration Example #########################
|
||||
|
||||
# This file is an example configuration file highlighting only the most common
|
||||
# options. The filebeat.reference.yml file from the same directory contains all the
|
||||
# supported options with more comments. You can use it as a reference.
|
||||
#
|
||||
# You can find the full configuration reference here:
|
||||
# https://www.elastic.co/guide/en/beats/filebeat/index.html
|
||||
|
||||
# For more available modules and options, please see the filebeat.reference.yml sample
|
||||
# configuration file.
|
||||
|
||||
#=========================== Filebeat prospectors =============================
|
||||
|
||||
filebeat.prospectors:
|
||||
|
||||
# Each - is a prospector. Most options can be set at the prospector level, so
|
||||
# you can use different prospectors for various configurations.
|
||||
# Below are the prospector specific configurations.
|
||||
|
||||
- type: log
|
||||
|
||||
# Change to true to enable this prospector configuration.
|
||||
enabled: true
|
||||
|
||||
# Paths that should be crawled and fetched. Glob based paths.
|
||||
paths:
|
||||
#- /var/log/*.log
|
||||
#- c:\programdata\elasticsearch\logs\*
|
||||
- /home/xyz/log/*.log
|
||||
|
||||
# Exclude lines. A list of regular expressions to match. It drops the lines that are
|
||||
# matching any regular expression from the list.
|
||||
#exclude_lines: ['^DBG']
|
||||
|
||||
# Include lines. A list of regular expressions to match. It exports the lines that are
|
||||
# matching any regular expression from the list.
|
||||
#include_lines: ['^ERR', '^WARN']
|
||||
|
||||
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
|
||||
# are matching any regular expression from the list. By default, no files are dropped.
|
||||
#exclude_files: ['.gz$']
|
||||
|
||||
# Optional additional fields. These fields can be freely picked
|
||||
# to add additional information to the crawled log files for filtering
|
||||
#fields:
|
||||
# level: debug
|
||||
# review: 1
|
||||
|
||||
### Multiline options
|
||||
|
||||
# Mutiline can be used for log messages spanning multiple lines. This is common
|
||||
# for Java Stack Traces or C-Line Continuation
|
||||
|
||||
# The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
|
||||
#multiline.pattern: ^\[
|
||||
|
||||
# Defines if the pattern set under pattern should be negated or not. Default is false.
|
||||
#multiline.negate: false
|
||||
|
||||
# Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
|
||||
# that was (not) matched before or after or as long as a pattern is not matched based on negate.
|
||||
# Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
|
||||
#multiline.match: after
|
||||
|
||||
|
||||
#============================= Filebeat modules ===============================
|
||||
|
||||
filebeat.config.modules:
|
||||
# Glob pattern for configuration loading
|
||||
path: ${path.config}/modules.d/*.yml
|
||||
|
||||
# Set to true to enable config reloading
|
||||
reload.enabled: true
|
||||
|
||||
# Period on which files under path should be checked for changes
|
||||
#reload.period: 10s
|
||||
|
||||
#==================== Elasticsearch template setting ==========================
|
||||
|
||||
setup.template.settings:
|
||||
index.number_of_shards: 3
|
||||
#index.codec: best_compression
|
||||
#_source.enabled: false
|
||||
|
||||
#================================ General =====================================
|
||||
|
||||
# The name of the shipper that publishes the network data. It can be used to group
|
||||
# all the transactions sent by a single shipper in the web interface.
|
||||
name: 127.0.0.1
|
||||
|
||||
# The tags of the shipper are included in their own field with each
|
||||
# transaction published.
|
||||
#tags: ["service-X", "web-tier"]
|
||||
|
||||
# Optional fields that you can specify to add additional information to the
|
||||
# output.
|
||||
fields:
|
||||
profile: development
|
||||
|
||||
|
||||
#============================== Dashboards =====================================
|
||||
# These settings control loading the sample dashboards to the Kibana index. Loading
|
||||
# the dashboards is disabled by default and can be enabled either by setting the
|
||||
# options here, or by using the `-setup` CLI flag or the `setup` command.
|
||||
setup.dashboards.enabled: true
|
||||
|
||||
# The URL from where to download the dashboards archive. By default this URL
|
||||
# has a value which is computed based on the Beat name and version. For released
|
||||
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
|
||||
# website.
|
||||
#setup.dashboards.url:
|
||||
|
||||
#============================== Kibana =====================================
|
||||
|
||||
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
|
||||
# This requires a Kibana endpoint configuration.
|
||||
setup.kibana:
|
||||
|
||||
# Kibana Host
|
||||
# Scheme and port can be left out and will be set to the default (http and 5601)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
|
||||
host: "192.168.28.11:5601"
|
||||
|
||||
#============================= Elastic Cloud ==================================
|
||||
|
||||
# These settings simplify using filebeat with the Elastic Cloud (https://cloud.elastic.co/).
|
||||
|
||||
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
|
||||
# `setup.kibana.host` options.
|
||||
# You can find the `cloud.id` in the Elastic Cloud web UI.
|
||||
#cloud.id:
|
||||
|
||||
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
|
||||
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
|
||||
#cloud.auth:
|
||||
|
||||
#================================ Outputs =====================================
|
||||
|
||||
# Configure what output to use when sending the data collected by the beat.
|
||||
|
||||
#-------------------------- Elasticsearch output ------------------------------
|
||||
#output.elasticsearch:
|
||||
# Array of hosts to connect to.
|
||||
#hosts: ["192.168.28.11:9200"]
|
||||
|
||||
# Optional protocol and basic auth credentials.
|
||||
protocol: "http"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
#----------------------------- Logstash output --------------------------------
|
||||
output.logstash:
|
||||
# The Logstash hosts
|
||||
hosts: ["192.168.28.32:5044"]
|
||||
|
||||
# Optional SSL. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
#================================ Logging =====================================
|
||||
|
||||
# Sets log level. The default log level is info.
|
||||
# Available log levels are: critical, error, warning, info, debug
|
||||
#logging.level: debug
|
||||
|
||||
# At debug level, you can selectively enable logging only for some components.
|
||||
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
|
||||
# "publish", "service".
|
||||
logging.selectors: ["*"]
|
|
@ -0,0 +1,57 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
|
||||
<!-- logback中一共有5种有效级别,分别是TRACE、DEBUG、INFO、WARN、ERROR,优先级依次从低到高 -->
|
||||
<configuration scan="true" scanPeriod="60 seconds" debug="false">
|
||||
|
||||
<property name="FILE_NAME" value="javatool"/>
|
||||
|
||||
<!-- 将记录日志打印到控制台 -->
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>%d{HH:mm:ss.SSS} [%thread] [%-5p] %c{36}.%M - %m%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!-- RollingFileAppender begin -->
|
||||
<appender name="ALL" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<!-- 根据时间来制定滚动策略 -->
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
|
||||
<fileNamePattern>${user.dir}/logs/${FILE_NAME}-all.%d{yyyy-MM-dd}.log</fileNamePattern>
|
||||
<maxHistory>30</maxHistory>
|
||||
</rollingPolicy>
|
||||
|
||||
<!-- 根据文件大小来制定滚动策略 -->
|
||||
<triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
|
||||
<maxFileSize>30MB</maxFileSize>
|
||||
</triggeringPolicy>
|
||||
|
||||
<encoder>
|
||||
<pattern>%d{HH:mm:ss.SSS} [%thread] [%-5p] %c{36}.%M - %m%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
<appender name="ELK-TCP" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
|
||||
<!--
|
||||
destination 是 logstash 服务的 host:port,
|
||||
相当于和 logstash 建立了管道,将日志数据定向传输到 logstash
|
||||
-->
|
||||
<destination>192.168.28.32:9251</destination>
|
||||
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder">
|
||||
<customFields>{"appname":"javatool"}</customFields>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!-- RollingFileAppender end -->
|
||||
|
||||
<!-- logger begin -->
|
||||
<!-- 本项目的日志记录,分级打印 -->
|
||||
<logger name="cn.xyz" level="TRACE">
|
||||
<appender-ref ref="ELK-TCP"/>
|
||||
<appender-ref ref="ALL"/>
|
||||
</logger>
|
||||
|
||||
<root level="TRACE">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
</root>
|
||||
<!-- logger end -->
|
||||
|
||||
</configuration>
|
|
@ -0,0 +1,12 @@
|
|||
input {
|
||||
tcp {
|
||||
port => 9251
|
||||
codec => json_lines
|
||||
mode => server
|
||||
tags => ["javaapp"]
|
||||
}
|
||||
}
|
||||
output {
|
||||
elasticsearch { hosts => ["localhost:9200"] }
|
||||
stdout { codec => rubydebug }
|
||||
}
|
Loading…
Reference in New Issue