#创建网络
docker network create --driver bridge --subnet 192.168.0.0/16 --gateway 192.168.0.1 esnet
#拉取镜像
docker pull elasticsearch:7.2.0
#运行单节点es 限定内存大小
docker run -d --name elasticsearch -p 9200:9200 -p 9310:9300 -e "discovery.type=single-node" -e ES_JAVA_OPTS="-Xms100m -Xmx200m" --net esnet elasticsearch:7.2.0
#创建网络
docker network create --driver bridge --subnet 192.168.0.0/16 --gateway 192.168.0.1 esnet
#拉取镜像
docker pull elasticsearch:7.2.0
#准备挂载文件夹
mkdir /Users/yupeng/elasticsearch
mkdir /Users/yupeng/elasticsearch/{es1,es2,es3}
mkdir /Users/yupeng/elasticsearch/es1/{config,data}
mkdir /Users/yupeng/elasticsearch/es2/{config,data}
mkdir /Users/yupeng/elasticsearch/es3/{config,data}
vim /Users/yupeng/elasticsearch/es1/config/elasticsearch.yml
#主节点node1:elasticsearch.yml
cluster.name: "elasticsearch"
node.name: node-001
node.master: true
node.data: true
network.bind_host: 0.0.0.0
network.publish_host: 192.168.2.103
http.port: 9210
transport.tcp.port: 9310
discovery.seed_hosts: ["192.168.2.103:9311","192.168.2.103:9312","192.168.2.103:9313"]
cluster.initial_master_nodes: ["192.168.2.103:9310"]
discovery.zen.minimum_master_nodes: 1
http.cors.enabled: true
http.cors.allow-origin: "*"
vim /Users/yupeng/elasticsearch/es2/config/elasticsearch.yml
#数据节点node2:elasticsearch.yml
cluster.name: "elasticsearch"
node.name: node-002
node.master: true
node.data: true
network.bind_host: 0.0.0.0
network.publish_host: 192.168.2.103
http.port: 9211
transport.tcp.port: 9311
discovery.seed_hosts: ["192.168.2.103:9310","192.168.2.103:9311","192.168.2.103:9312"]
cluster.initial_master_nodes: ["192.168.2.103:9310"]
discovery.zen.minimum_master_nodes: 1
http.cors.enabled: true
http.cors.allow-origin: "*"
vim /Users/yupeng/elasticsearch/es3/config/elasticsearch.yml
#数据节点node3:elasticsearch.yml
cluster.name: "elasticsearch"
node.name: node-003
node.master: true
node.data: true
network.bind_host: 0.0.0.0
network.publish_host: 192.168.2.103
http.port: 9212
transport.tcp.port: 9312
discovery.zen.ping.unicast.hosts: ["192.168.2.103:9310","192.168.2.103:9311","192.168.2.103:9312"]
cluster.initial_master_nodes: ["192.168.2.103:9310"]
discovery.zen.minimum_master_nodes: 1
http.cors.enabled: true
http.cors.allow-origin: "*"
# 主要参数说明:
# 集群名称,集群名,自定义集群名,默认为elasticsearch,建议修改,因为低版本多播模式下同一网段下相同集群名会自动加入同一集群,如生产环境这样易造成数据运维紊乱
cluster.name: es_test_cluster
# 节点名称, 同一集群下要求每个节点的节点名不一致,起到区分节点和辨认节点作用
node.name: node-a
# 是不是有资格竞选主节点,选项为true或false,当为true时在集群启动时该节点为主节点,在宕机或任务挂掉之后会选举新的主节点,恢复后该节点依然为主节点
node.master: true
# 是否存储数据,选项为true或false。负责数据的相关操作
node.data: true
# 本节点 主机ip,对外暴露的host,0.0.0.0时暴露给外网
network.bind_host: 0.0.0.0
# 设置绑定的ip地址还有其它节点和该节点交互的ip地址,本机ip
network.publish_host: 192.168.2.103
# 在docker中可以通过 -p out-port:9200 暴露为宿主网卡的别的端口,
# 本服务节点的服务端口
http.port: 9200
# 本服务节点暴露出为集群节点之间的沟通端口,集群间通信的端口号,默认为9300
transport.tcp.port: 9300
#设置集群中master节点的初始列表,可以通过这些节点来自动发现新加入集群的节点?
# 集群的ip集合,可指定端口,默认为9300,如 ["192.168.1.101","192.168.1.102"]
discovery.zen.ping.unicast.hosts: ["192.168.2.103:9300","192.168.2.103:9301","192.168.2.103:9302"]
# 最少的主节点个数,为了防止脑裂,最好设置为(总节点数/2 + 1)个
discovery.zen.minimum_master_nodes: 2
# 初始化一个新的集群时需要此配置来选举master
cluster.initial_master_nodes: ["node-a", "node-b","node-c"]
# 如果要使用head,那么需要解决跨域问题,使head插件可以访问es
http.cors.enabled: true
http.cors.allow-origin: "*"
docker run -d --name es-node1 --net esnet -p 9210:9210 -p 9310:9310 -v /Users/yupeng/elasticsearch/es1/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml -v /Users/yupeng/elasticsearch/es1/data/:/usr/share/elasticsearch/data -e ES_JAVA_OPTS="-Xms256m -Xmx256m" elasticsearch:7.2.0
docker run -d --name es-node2 --net esnet -p 9211:9211 -p 9311:9311 -v /Users/yupeng/elasticsearch/es2/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml -v /Users/yupeng/elasticsearch/es2/data/:/usr/share/elasticsearch/data -e ES_JAVA_OPTS="-Xms256m -Xmx256m" elasticsearch:7.2.0
docker run -d --name es-node3 --net esnet -p 9212:9212 -p 9312:9312 -v /Users/yupeng/elasticsearch/es3/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml -v /Users/yupeng/elasticsearch/es3/data/:/usr/share/elasticsearch/data -e ES_JAVA_OPTS="-Xms256m -Xmx256m" elasticsearch:7.2.0
docker start es-node1
docker start es-node2
docker start es-node3
docker stop es-node1
docker stop es-node2
docker stop es-node3
docker rm -f es-node1
docker rm -f es-node2
docker rm -f es-node3
#author BAndTree
version: "3" #版本号
services:
elasticsearch: #服务名称(不是容器名)
image: elasticsearch:6.8.5 #使用的镜像
ports:
- "9200:9200" #暴露的端口信息和docker run -d -p 80:80 一样
#restart: "always" #重启策略,能够使服务保持始终运行,生产环境推荐使用
container_name: elasticsearch-1 #容器名称
environment:
ES_JAVA_OPTS: "-Xmx1024m -Xms1024m" #设置JVM最大(小)可用内存为1024,这个很重要,我一开始没有设置这个,我的es起不来
ES_JAVA_OPTS: "-Xmx1g -Xms1g"
kibana: #服务名称(不是容器名)
image: kibana:6.8.5 #使用的镜像
ports:
- "5601:5601" #暴露的端口信息和docker run -d -p 80:80 一样
#restart: "always" #重启策略,能够使服务保持始终运行,生产环境推荐使用
container_name: kibana #容器名称
#挂载文件
volumes:
- /Users/XXX/docker/docker-compose/conf1/kibana.yml:/etc/kibana/kibana.yml
links:
- elasticsearch:es01 #容器关联es01是别名
redis:
image: redis:4.0.13
container_name: redis
ports:
- 6378:6379
logstash: #服务名称(不是容器名)
image: logstash:6.8.5 #使用的镜像
#restart: "always" #重启策略,能够使服务保持始终运行,生产环境推荐使用
ports:
- 5044:5044
- 9600:9600
container_name: logstash #容器名称
#挂载文件1.自定义配置文件 2.logstash启动配置文件 3.文件读取的挂载路径
volumes:
- /Users/XXX/docker/docker/docker-compose/conf1/logstash.conf:/usr/share/logstash/config/logstash.conf
- /Users/XXX/docker/docker/docker-compose/conf1/logstash.yml:/usr/share/logstash/config/logstash.yml
links:
- elasticsearch:es01 #容器关联es01是别名
- redis:redis
#新建docker-compose.yml文件
cd /Users/yupeng/elasticsearch
vim /Users/yupeng/elasticsearch/docker-compose.yml
version: ‘3.7‘ services: es1: image: elasticsearch:7.2.0 container_name: es-node1 environment: - ES_JAVA_OPTS=-Xms256m -Xmx256m ports: - "9201:9201" - "9311:9301" volumes: - /Users/yupeng/elasticsearch/es1/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml - /Users/yupeng/elasticsearch/es1/data/:/usr/share/elasticsearch/data networks: - esnet es2: image: elasticsearch:7.2.0 container_name: es-node2 environment: - ES_JAVA_OPTS=-Xms256m -Xmx256m ports: - "9202:9202" - "9312:9302" volumes: - /Users/yupeng/elasticsearch/es2/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml - /Users/yupeng/elasticsearch/es2/data/:/usr/share/elasticsearch/data networks: - esnet es3: image: elasticsearch:7.2.0 container_name: es-node3 environment: - ES_JAVA_OPTS=-Xms256m -Xmx256m ports: - "9203:9203" - "9313:9303" volumes: - /Users/yupeng/elasticsearch/es3/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml - /Users/yupeng/elasticsearch/es3/data/:/usr/share/elasticsearch/data networks: - esnet
networks:
esnet:
driver: bridge
#cd到docker-compose目录下,启动docker-compose.yml文件
cd /Users/yupeng/elasticsearch
docker-compose up -d
#status状态为green,视为集群搭建成功
curl 192.168.2.103:9210/_cluster/health
#{"cluster_name":"elk","status":"green","timed_out":false,"number_of_nodes":3,"number_of_data_nodes":2,"active_primary_shards":9,"active_shards":18,"relocating_shards":0,"initializing_shards":0,"unassigned_shards":0,"delayed_unassigned_shards":0,"number_of_pending_tasks":0,"number_of_in_flight_fetch":0,"task_max_waiting_in_queue_millis":0,"active_shards_percent_as_number":100.0}
在浏览器地址栏访问,查看节点状态
#运行
docker run -p 6379:6379 --restart=always --name redis --net esnet -v /Users/yupeng/redis/redis.conf:/etc/redis/redis.conf -v /Users/yupeng/redis/data:/data -d redis redis-server /etc/redis/redis.conf
#查看端口号等信息
docker inspect redis
docker inspect elasticsearch
#拉取镜像
docker pull kibana:7.2.0
#准备挂载文件夹
mkdir /Users/yupeng/kibana
mkdir /Users/yupeng/kibana/config
#切换目录 配置文件
cd /Users/yupeng/kibana/config
vim kibana.yml
#kibana.yml
server.port: 5601
server.host: "0.0.0.0"
elasticsearch.hosts: ["http://127.0.0.1:9200"]
#运行
docker run -d --name kibana -p 5601:5601 -v /Users/yupeng/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml --net esnet kibana:7.2.0
#查看日志
docker logs kibana
#验证
curl http://localhost:5601
#拉取镜像
docker pull logstash:7.2.0
#准备挂载文件
mkdir /Users/yupeng/logstash
mkdir /Users/yupeng/logstash/config
#切换目录 配置文件
vim /Users/yupeng/logstash/config/logstash.yml
vim /Users/yupeng/logstash/config/logstash.conf
#logstash.yml
path.config: /usr/share/logstash/config/logstash.conf
path.logs: /var/log/logstash
#logstash.conf
#以redis为例,可配资多个输入输出源
#以type为条件判断
input {
redis {
codec => plain
host => "192.168.0.5"
port => 6379
data_type => list
key => "redislog01"
db => 0
type => "redislog01"
}
redis {
codec => plain
host => "192.168.0.5"
port => 6379
data_type => list
key => "redislog02"
db => 0
type => "redislog02"
}
}
output {
if [type] == "redislog01" {
elasticsearch {
hosts => ["192.168.0.3:9200"]
index => "redislog01"
#user => "elastic"
#password => "changeme"
}
}
if [type] == "redislog02" {
elasticsearch {
hosts => ["192.168.0.3:9200"]
index => "redislog02"
#user => "elastic"
#password => "changeme"
}
}
}
#logstash.conf
#以redis为例,可配资多个输入输出源
#以tag为条件判断
input {
redis {
codec => plain
host => "192.168.0.5"
port => 6379
data_type => list
key => "redislog01"
db => 0
tags => "redislog01"
}
redis {
codec => plain
host => "192.168.0.5"
port => 6379
data_type => list
key => "redislog02"
db => 0
tags => "redislog02"
}
}
output {
if "redislog01" in [tags] {
elasticsearch {
hosts => ["192.168.0.3:9200"]
index => "redislog01"
#user => "elastic"
#password => "changeme"
}
}
if "redislog02" in [tags] {
elasticsearch {
hosts => ["192.168.0.3:9200"]
index => "redislog02"
#user => "elastic"
#password => "changeme"
}
}
}
#运行
docker run -d --restart=always --name logstash -p 5044:5044 -p 9600:9600 --net esnet --log-driver json-file -v /Users/yupeng/logstash/config/logstash.conf:/usr/share/logstash/config/logstash.conf -v /Users/yupeng/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml logstash:7.2.0
#查看日志
docker logs logstash
#创建挂载文件
mkdir -p /Users/yupeng/logstash2/{config,pipeline,data}
vim /Users/yupeng/logstash2/config/logstash.yml
config:
reload:
automatic: true
interval: 3s
xpack:
management.enabled: false
monitoring.enabled: false
#path.config: /usr/share/logstash/config/conf.d/*.conf
#path.logs: /usr/share/logstash/logs
#以下配置能在kibana查看logstash状态
xpack.monitoring.enabled: true
#xpack.monitoring.elasticsearch.username: "logstash46"
#xpack.monitoring.elasticsearch.password: "123456"
xpack.monitoring.elasticsearch.hosts: ["http://192.168.0.3:9200"]
每一个pipeline.id对应一个管道,本项目是使用logstash消费kafka,针对不同的topic,建立不同的通道,效果等同配置文件中的if判断匹配tag将不同的数据写入不同的index,如果使用一个通道(默认通道是main)将会导致一个索引能查到所有数据,无法进行分类。且当接入数据类型太多使用if判断会导致配置文件臃肿。
vim /Users/yupeng/logstash2/config/pipelines.yml
- pipeline.id: redislog01
path.config: "/usr/share/logstash/pipeline/redislog01.conf"
- pipeline.id: redislog02
path.config: "/usr/share/logstash/pipeline/redislog02.conf"
vim /Users/yupeng/logstash2/pipeline/redislog01.conf
#redislog01.conf
input{
redis {
codec => plain
host => "192.168.0.5"
port => 6379
data_type => list
key => "redislog01"
db => 0
tags => "redislog01"
}
}
filter{
grok{
match => {
"message" => "\[bgctvpayservice\]\[%{WORD:interface}\]"
}
}
grok{
match => {
"message" => "uid=%{NUMBER:uid}\&"
}
}
grok{
match => {
"message" => "\&ret_code=%{WORD:ret_code}\&"
}
}
grok{
match => {
"message" => "vid=%{NUMBER:vid}"
}
}
mutate{
remove_field => ["beat"]
remove_field => ["@version"]
remove_field => ["_score"]
remove_field => ["prospector"]
remove_field => ["_type"]
}
}
output {
elasticsearch {
hosts => ["192.168.0.3:9200"]
index => "redislog01"
}
stdout { codec => rubydebug }
}
vim /Users/yupeng/logstash2/pipeline/redislog02.conf
#redislog02.conf
input{
redis {
codec => plain
host => "192.168.0.5"
port => 6379
data_type => list
key => "redislog02"
db => 0
tags => "redislog02"
}
}
filter{
grok{
match => {
"message" => "\[bgctvpayservice\]\[%{WORD:interface}\]"
}
}
grok{
match => {
"message" => "uid=%{NUMBER:uid}\&"
}
}
grok{
match => {
"message" => "\&ret_code=%{WORD:ret_code}\&"
}
}
grok{
match => {
"message" => "vid=%{NUMBER:vid}"
}
}
mutate{
remove_field => ["beat"]
remove_field => ["@version"]
remove_field => ["_score"]
remove_field => ["prospector"]
remove_field => ["_type"]
}
}
output {
elasticsearch {
hosts => ["192.168.0.3:9200"]
index => "redislog02"
}
stdout { codec => rubydebug }
}
#运行
docker run -d --restart=always --name logstash --network esnet -p 5044:5044 -p 9600:9600 -v /Users/yupeng/logstash2/config/pipelines.yml:/usr/share/logstash/config/pipelines.yml -v /Users/yupeng/logstash2/config/logstash.yml:/usr/share/logstash/config/logstash.yml -v /Users/yupeng/logstash2/pipeline/:/usr/share/logstash/pipeline/ logstash:7.2.0
#查看日志
docker logs logstash
#在修改完某些配置文件之后,需要干掉之前已运行的容器,重新启动一个新的实例
docker stop logstash
docker rm -f logstash
#服务器是centos7.5,这里使用java 11,jdk的下载地址如下:
https://www.oracle.com/technetwork/java/javase/downloads/jdk11-downloads-5066655.html
#1.下载jdk,我下载的是.tar.gz格式的文件。然后传到合适的地址上,地址是/Users/yupeng/java下。
#2.解压jdk的安装包,tar -zxvf jdk-11.0.10_linux-x64_bin.tar.gz,之后就看到当前目录下多了一个jdk-11.0.10的文件。
#3.运行vim /etc/profile,在文件的末尾处添加上一下内容:
export JAVA_HOME=/Users/yupeng/java/jdk-11.0.10
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export PATH=${JAVA_HOME}/bin:$PATH
#4.使配置文件生效
source /etc/profile
#5.运行java -version,出现以下信息,就成功了
java version "11.0.10" 2021-01-19 LTS
Java(TM) SE Runtime Environment 18.9 (build 11.0.10+8-LTS-162)
Java HotSpot(TM) 64-Bit Server VM 18.9 (build 11.0.10+8-LTS-162, mixed mode)
#相关文章
https://blog.csdn.net/belonghuang157405/article/details/83301937
https://blog.csdn.net/lu_wei_wei/article/details/51263153
https://www.jianshu.com/p/681dd6984210
https://blog.csdn.net/yhflyl/article/details/102596883
原文:https://www.cnblogs.com/ypboss/p/14594729.html