docker-compose部署ELK

时间:2023-03-09 05:22:30
docker-compose部署ELK

本章基于

https://www.cnblogs.com/lirunzhou/p/10550675.html

在此基础上将ELK系统docker-compose.yml化。

其docker-compose 需要注意

1.不要把 docker 当做数据容器来使用,数据一定要用 volumes 放在容器外面

2.不要把 docker-compose 文件暴露给别人, 因为上面有你的服务器信息

3.多用 docker-compose 的命令去操作, 不要用 docker 手动命令&docker-compose 去同时操作

4.写一个脚本类的东西,自动备份docker 映射出来的数据。

5.不要把所有服务都放在一个 docker 容器里面

准备环境:

管理节点10.191.51.44

数据节点 10.191.51.45/46/47

具体文件:

es docker-compose.yml

version: ''
services:
elasticsearch:
container_name: ES
environment :
- ES_JAVA_OPTS=-Xms4G -Xmx4G
image: 10.191.51.5/elk/elasticsearch:6.5.
volumes:
- ./config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- ./data:/usr/share/elasticsearch/data
ports:
- "9200:9200"
- "9300:9300"

管理节点 elasticsearch.yml

cluster.name: elasticsearch-cluster
node.name: es-node1
network.bind_host: 0.0.0.0
network.publish_host: 10.191.51.44
http.port:
transport.tcp.port:
http.cors.enabled: true
http.cors.allow-origin: "*"
node.master: false
node.data: false
node.ingest: true
discovery.zen.ping.unicast.hosts: ["10.191.51.44:9300","10.191.51.45:9300","10.191.51.46:9300","10.191.51.47:9300"]
discovery.zen.minimum_master_nodes:

数据节点 elasticsearch.yml

cluster.name: elasticsearch-cluster
node.name: es-node2
network.bind_host: 0.0.0.0
network.publish_host: 10.191.51.45
http.port:
transport.tcp.port:
http.cors.enabled: true
http.cors.allow-origin: "*"
node.master: true
node.data: true
discovery.zen.ping.unicast.hosts: ["10.191.51.44:9300","10.191.51.45:9300","10.191.51.46:9300","10.191.51.47:9300"]
discovery.zen.minimum_master_nodes:

kafka docker-compose.yml (其中environment需要配置)

version: ''
services:
kafka:
container_name: kafka0
environment:
- KAFKA_BROKER_ID=
- KAFKA_ZOOKEEPER_CONNECT=10.191.51.44:
- KAFKA_DEFAULT_REPLICATION_FACTOR=
- KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092
- KAFKA_ADVERTISED_HOST_NAME=kafka1
- KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://10.191.51.45:9092
- KAFKA_delete_topic_enable=true
image: 10.191.51.5/elk/wurstmeister/kafka:2.1.
ports:
- "9092:9092"

logstash docker-compose.yml

version: ''
services:
logstash:
container_name: logstash
image: 10.191.51.5/elk/logstash:6.5.
volumes:
- ./config/:/usr/share/logstash/config/
- ./pipeline/:/usr/share/logstash/pipeline/
ports:
- "5044:5044"
- "9600:9600"

pipeline/logstash.conf

        kafka{
bootstrap_servers => ["10.191.51.45:9092,10.191.51.46:9092,10.191.51.47:9092"]
client_id => "logstash-garnet"
group_id => "logstash-garnet"
consumer_threads =>
decorate_events => true
topics => ["garnet_garnetAll_log"]
type => "garnet_all_log"
}
}
filter{ if[type]=="garnet_all_log"{
mutate{
gsub => ["message", "@timestamp", "sampling_time"]
}
json{
source=>"message"
}
grok{
match=>{
"message"=>[ "%{TIMESTAMP_ISO8601:log_time}\s+\[(?<thread_name>[a-zA-Z0-9\-\s]*)\]\s+%{LOGLEVEL:log_level}\s+\[(?<class_name>[a-zA-Z0-9.]*)\]\s+%{GREEDYDATA:msg_info}parameters=%{GREEDYDATA:msg_json_info}",
"%{TIMESTAMP_ISO8601:log_time}\s+\[(?<thread_name>[a-zA-Z0-9\-\s]*)\]\s+%{LOGLEVEL:log_level}\s+\[(?<class_name>[a-zA-Z0-9.]*)\]\s+%{GREEDYDATA:msg_info}" ]
}
}
if [msg_json_info]{
json{
source=>"msg_json_info"
}
}
}
}
output { if[type] == "garnet_all_log"{
elasticsearch{
hosts => ["10.191.51.45:9200","10.191.51.46:9200","10.191.51.47:9200"]
index => "garnet_all-%{+YYYY.MM.dd}"
}
}
}

config/pipeline.yml

- pipeline.id: pipeline_1
pipeline.batch.size:
pipeline.batch.delay:
path.config: /usr/share/logstash/pipeline

config/logstash.yml

log.level: warn
xpack.license.self_generated.type: basic
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.url: "http://10.191.51.44:9200"

config/jvm.options 修改

-Xms4g
-Xmx4g