ELK+filebeat、kafka、zookeeper搭建文档

系统:centos 6.5

JDK:1.8

Elasticsearch-6.0.0
Logstash-6.0.0
kibana-6.0.0
zookeeper-3.5.3
kafka_2.12-1.0.0
filebeat-6.0.0

wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-6.0.0-linux-x86_64.tar.gz
wget http://mirrors.hust.edu.cn/apache/kafka/1.0.0/kafka_2.12-1.0.0.tgz
wget http://mirror.bit.edu.cn/apache/zookeeper/zookeeper-3.5.3-beta/zookeeper-3.5.3-beta.tar.gz
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.0.0.tar.gz
wget https://artifacts.elastic.co/downloads/kibana/kibana-6.0.0-linux-x86_64.tar.gz
wget https://artifacts.elastic.co/downloads/logstash/logstash-6.0.0.tar.gz

工作流程
filebeat 收集日志out到kafka, kafka再out到logstash,logstash在out到elasticsearch,最后通过kafka展示到web页面

filebeat 节点配置文件

[root@centos199 filebeat-6.0.0-linux-x86_64]# cat filebeat.yml
filebeat.prospectors:
- input_type: log
paths:
- /home/test/backup/mysql-*.log
document_type: mysql
tail_files: true
multiline.pattern: ^\[[0-9]{4}-[0-9]{2}-[0-9]{2}

multiline.negate: true
multiline.match: after

output.kafka:
hosts: ["192.168.1.99:9092"]
topic: guo
partition.round_robin:
reachable_only: false

required_acks: 1
compression: gzip
max_message_bytes: 1000000

启动filebeat   nohup ./filebeat -c filebeat.yml

kafka 配置文件

[root@centos199 config]# cat server.properties |grep -E -v "^#|^$"
broker.id=0   如果是两个kafka,另一台的id改为其他就行了
port = 9092
host.name = 192.168.1.99
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/tmp/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.1.99:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0

启动kafka ./bin/kafka-server-start.sh -daemon config/server.properties

创建消息主题:./bin/kafka-topics.sh --create --zookeeper 192.168.1.99:2181 --replication-factor 1 --partitions 2 --topic ecplogs
输出测试消息生产与消费:./bin/kafka-console-consumer.sh --zookeeper 192.168.1.99:2181 --topic ecplogs --from-beginning
写入测试消息:./bin/kafka-console-producer.sh --broker-list 192.168.1.99:9092 --topic ecplogs

zookeeper 配置文件

[root@centos199 conf]# cat zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/home/test/zookeeper
clientPort=2181

启动zookeeper  ./bin/zkServer.sh start

logstash 配置文件

[root@centos199 config]# cat logstash.conf
input {
kafka {
bootstrap_servers => "192.168.1.99:9092"
topics => ["guo"]
codec => "json"
}
}
output {
elasticsearch {
hosts => ["192.168.1.99:9200"]
index => "mysql-%{+YYYY.MM.dd}"
template_overwrite => true
}
}

启动logstash  ./bin/logstash -f conf/logstash.conf

elasticsearch 配置文件

[root@centos199 config]# cat elasticsearch.yml |grep -E -v "^#|^$"
path.data: /home/test/elk/elastic/data
path.logs: /home/test/elk/elastic/logs
bootstrap.memory_lock: false
bootstrap.system_call_filter: false
network.host: 192.168.1.99
http.port: 9200

[elk@centos199 elasticsearch-6.0.0]$./bin/elasticsearch -d

kibana 配置文件

[root@centos199 config]# cat kibana.yml |grep -E -v "^#|^$"
server.port: 5601
server.host: "192.168.1.99"
elasticsearch.url: "http://192.168.1.99:9200"

nohup ./bin/kibana &

上一篇:在vscode中使用pylint-django插件解决pylint的一些不必要的错误提示【转】


下一篇:详解SpringBoot整合Freemarker | 带你读《SpringBoot实战教程》之十六