通过docker stack部署elfk。elfk最新版本:7.5.1
swarm集群(一个节点):
manager 192.168.30.128
mkdir -p /home/elfk/{elasticsearch,logstash,kibana,filebeat}/configmkdir /home/elfk/logstash/pipelinecd /home/elfk
tree ..├── docker-stack.yml ├── elasticsearch │ ├── config │ │ └── elasticsearch.yml │ └── Dockerfile ├── filebeat │ ├── config │ │ └── filebeat.yml │ └── Dockerfile ├── kibana │ ├── config │ │ └── kibana.yml │ └── Dockerfile └── logstash ├── config │ └── logstash.yml ├── Dockerfile └── pipeline └── logstash.conf 9 directories, 10 files
elasticsearch
Dockerfile
vim /home/elfk/elasticsearch/Dockerfile
ARG ELK_VERSION=7.5.1 # https://github.com/elastic/elasticsearch-docker # FROM docker.elastic.co/elasticsearch/elasticsearch:${ELK_VERSION} FROM elasticsearch:${ELK_VERSION} # Add your elasticsearch plugins setup here # Example: RUN elasticsearch-plugin install analysis-icu
elasticsearch.yml
vim /home/elfk/elasticsearch/config/elasticsearch.yml
---## Default Elasticsearch configuration from Elasticsearch base image.## https://github.com/elastic/elasticsearch/blob/master/distribution/docker/src/docker/config/elasticsearch.yml#cluster.name: "docker-cluster"network.host: 0.0.0.0## X-Pack settings## see https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-xpack.html#xpack.license.self_generated.type: trial #trial为试用版,一个月期限,可更改为basic版本xpack.security.enabled: truexpack.monitoring.collection.enabled: truehttp.cors.enabled: truehttp.cors.allow-origin: "*"http.cors.allow-headers: Authorization,X-Requested-With,Content-Length,Content-Type
cd /home/elfk/elasticsearch docker build -t elfk_elasticsearch:latest .
logstash
Dockerfile
vim /home/elfk/logstash/Dockerfile
ARG ELK_VERSION=7.5.1 # https://github.com/elastic/logstash-docker # FROM docker.elastic.co/logstash/logstash:${ELK_VERSION} FROM logstash:${ELK_VERSION} # Add your logstash plugins setup here # Example: RUN logstash-plugin install logstash-filter-json RUN logstash-plugin install logstash-filter-multiline \ && logstash-plugin install logstash-output-zabbix
logstash.yml
vim /home/elfk/logstash/config/logstash.yml
---## Default Logstash configuration from Logstash base image.## https://github.com/elastic/logstash/blob/master/docker/data/logstash/config/logstash-full.yml#http.host: "0.0.0.0"xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ]## X-Pack security credentials#xpack.monitoring.enabled: truexpack.monitoring.elasticsearch.username: elasticxpack.monitoring.elasticsearch.password: changemexpack.monitoring.collection.interval: 10s
pipeline
vim /home/elfk/logstash/pipeline/logstash.conf
input { tcp { port => 5000 } } #input { # beats { # port => 5044 # } #} output { elasticsearch { hosts => "elasticsearch:9200" user => "elastic" password => "changeme" } }
cd /home/elfk/logstash docker build -t elfk_logstash:latest .
kibana
Dockerfile
vim /home/elfk/kibana/Dockerfile
ARG ELK_VERSION=7.5.1 # https://github.com/elastic/kibana-docker # FROM docker.elastic.co/kibana/kibana:${ELK_VERSION} FROM kibana:${ELK_VERSION} # Add your kibana plugins setup here # Example: RUN kibana-plugin install
kibana.yml
vim /home/elfk/kibana/config/kibana.yml
---## Default Kibana configuration from Kibana base image.## https://github.com/elastic/kibana/blob/master/src/dev/build/tasks/os_packages/docker_generator/templates/kibana_yml.template.js#server.name: kibanaserver.host: "0"elasticsearch.hosts: [ "http://elasticsearch:9200" ]xpack.monitoring.ui.container.elasticsearch.enabled: true## X-Pack security credentials#elasticsearch.username: elasticelasticsearch.password: changeme
cd /home/elfk/kibana docker build -t elfk_kibana:latest .
docker-stack.yml
vim /home/elfk/docker-stack.yml
version: '3.7'services: elasticsearch: image: elfk_elasticsearch:latest ports: - "9200:9200" - "9300:9300" configs: - source: elastic_config target: /usr/share/elasticsearch/config/elasticsearch.yml volumes: - type: volume source: elasticsearch target: /usr/share/elasticsearch/data environment: ES_JAVA_OPTS: "-Xmx256m -Xms256m" ELASTIC_PASSWORD: changeme discovery.type: single-node networks: - elk deploy: mode: replicated replicas: 1 placement: constraints: - node.role == manager logstash: image: elfk_logstash:latest ports: - "5000:5000" - "9600:9600" configs: - source: logstash_config target: /usr/share/logstash/config/logstash.yml - source: logstash_pipeline target: /usr/share/logstash/pipeline/logstash.conf environment: LS_JAVA_OPTS: "-Xmx256m -Xms256m" networks: - elk deploy: mode: replicated replicas: 1 kibana: image: elfk_kibana:latest ports: - "5601:5601" configs: - source: kibana_config target: /usr/share/kibana/config/kibana.yml networks: - elk deploy: mode: replicated replicas: 1 elastichd: image: containerize/elastichd ports: - "9800:9800" networks: - elk deploy: mode: replicated replicas: 1configs: elastic_config: file: ./elasticsearch/config/elasticsearch.yml logstash_config: file: ./logstash/config/logstash.yml logstash_pipeline: file: ./logstash/pipeline/logstash.conf kibana_config: file: ./kibana/config/kibana.yml volumes: elasticsearch: driver: local driver_opts: type: none o: bind device: /home/elfk/elasticsearch/data networks: elk: driver: overlay
if [ $(grep 'vm.max_map_count' /etc/sysctl.conf |wc -l) -eq 0 ] ; \then echo 'vm.max_map_count=655360' >> /etc/sysctl.conf; \fisysctl -pcd /home/elfk docker stack deploy elfk --compose-file docker-stack.yml
docker service lsID NAME MODE REPLICAS IMAGE PORTS mt8b42mup3xw elfk_elastichd replicated 1/1 containerize/elastichd:latest *:9800->9800/tcp p1oyya3oj5zv elfk_elasticsearch replicated 1/1 elfk_elasticsearch:latest *:9200->9200/tcp, *:9300->9300/tcp qltg67gyh38o elfk_kibana replicated 1/1 elfk_kibana:latest *:5601->5601/tcp 902xoqx7vyqk elfk_logstash replicated 1/1 elfk_logstash:latest *:5040->5040/tcp, *:9600->9600/tcpnetstat -lntp Active Internet connections (only servers)Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 16001/sshd tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 6158/master tcp6 0 0 :::9200 :::* LISTEN 15120/dockerd tcp6 0 0 :::5000 :::* LISTEN 15120/dockerd tcp6 0 0 :::9300 :::* LISTEN 15120/dockerd tcp6 0 0 :::22 :::* LISTEN 16001/sshd tcp6 0 0 ::1:25 :::* LISTEN 6158/master tcp6 0 0 :::9600 :::* LISTEN 15120/dockerd tcp6 0 0 :::5601 :::* LISTEN 15120/dockerd tcp6 0 0 :::9800 :::* LISTEN 15120/dockerd tcp6 0 0 :::2377 :::* LISTEN 15120/dockerd tcp6 0 0 :::7946 :::* LISTEN 15120/dockerd
kibana页面,账号:elastic
,密码:changeme
docker stack部署完成,这里省略日志收集测试过程,有兴趣可自行配置logstash,自定义日志处理,并结合zabbix做错误日志告警。
另外,elastichd是类似elasticsearch-head的可视化工具,用来查看elasticsearch。
无密码连接es:http://host:port
;带密码连接es:http://user:password@host:port
,如http://elastic:changeme@ip:9200
。
filebeat
Dockerfile
vim /home/elfk/filebeat/Dockerfile
ARG ELK_VERSION=7.5.1 # https://github.com/elastic/beats-docker # FROM docker.elastic.co/beats/filebeat:${ELK_VERSION} FROM elastic/filebeat:${ELK_VERSION} COPY config/filebeat.yml /usr/share/filebeat/filebeat.yml
filebeat.yml
vim /home/elfk/filebeat/config/filebeat.yml
---filebeat.inputs:- type: log enabled: false paths: - /var/log/*.logfilebeat.config.modules: path: ${path.config}/modules.d/*.yml reload.enabled: falseprocessors: - add_host_metadata: ~ - add_cloud_metadata: ~ output.elasticsearch: hosts: ['elasticsearch:9200'] username: elastic password: changeme #output.logstash: #hosts: ["localhost:5044"] #enabled: true #worker: 1 #compression_level: 3
cd /home/elfk/filebeat docker build -t elfk_filebeat:latest .
filebeat容器在需要收集日志的机器上运行,将日志传输至elasticsearch或logstash。
docker run -d \ --name=filebeat \ --user=root \ -v /var/lib/docker/containers:/var/lib/docker/containers:ro \ -v /var/run/docker.sock:/var/run/docker.sock:ro \ -v /home/logs/:/home/logs/:rw \ -e -strict.perms=false elfk_filebeat:latest
对于要收集的日志,映射时一定要给rw
权限,否则即使宿主机上产生新日志,filebeat容器内也不会同步产生新日志。