下周就彻底离职了,工作还没着落,悲催
系统优化参数
net.ipv4.ip_forward = 0
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
kernel.sysrq = 0
kernel.core_uses_pid = 1
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.shmmax = 68719476736
kernel.shmall = 4294967296
net.ipv4.tcp_max_tw_buckets = 60000
net.ipv4.tcp_sack = 1
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_rmem = 4096 87380 4194304
net.ipv4.tcp_wmem = 4096 16384 4194304
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.netdev_max_backlog = 500000
net.core.somaxconn = 262144
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 262144
net.ipv4.tcp_timestamps = 1
net.ipv4.tcp_synack_retries = 1
net.ipv4.tcp_syn_retries = 1
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_fin_timeout = 1
net.ipv4.tcp_keepalive_time = 1200
net.ipv4.tcp_keepalive_probes=5
net.ipv4.tcp_keepalive_intvl=15
net.ipv4.ip_local_port_range = 1024 65535
vm.swappiness = 0
vm.max_map_count=262144
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.core.somaxconn = 32767
vm.overcommit_memory=1
echo never > /sys/kernel/mm/transparent_hugepage/enabled,并把这句话加入到/etc/rc.local中
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.5.0.rpm
wget https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v5.5.0/elasticsearch-analysis-ik-5.5.0.zip
yum install elasticsearch-5.5.0.rpm -y
#安装完es后,把ik分词软件elasticsearch-analysis-ik-5.5.0.zip拷贝至/usr/share/elasticsearch/plugins目录,然后执行下面命令
cd /usr/share/elasticsearch/plugins
unzip elasticsearch-analysis-ik-5.5.0.zip -d ik && rm -f elasticsearch-analysis-ik-5.5.0.zip
如果是yum安装的java的话,就不需要设置,如果是自定义安装的java就需要设置,不然的话,es会报找不到java的错误
下面是原文件/etc/sysconfig/elasticsearch
################################
# Elasticsearch
################################
# Elasticsearch home directory
#ES_HOME=/usr/share/elasticsearch
# Elasticsearch Java path
#修改此处
JAVA_HOME=/usr/local/java/jdk1.8.0_152
# Elasticsearch configuration directory
CONF_DIR=/etc/elasticsearch
# Elasticsearch data directory
#DATA_DIR=/var/lib/elasticsearch
# Elasticsearch logs directory
#LOG_DIR=/var/log/elasticsearch
# Elasticsearch PID directory
#PID_DIR=/var/run/elasticsearch
# Additional Java OPTS
#ES_JAVA_OPTS=
# Configure restart on package upgrade (true, every other setting will lead to not restarting)
#RESTART_ON_UPGRADE=true
################################
# Elasticsearch service
################################
# SysV init.d
#
# When executing the init script, this user will be used to run the elasticsearch service.
# The default value is ‘elasticsearch‘ and is declared in the init.d file.
# Note that this setting is only used by the init script. If changed, make sure that
# the configured user can read and write into the data, work, plugins and log directories.
# For systemd service, the user is usually configured in file /usr/lib/systemd/system/elasticsearch.service
#ES_USER=elasticsearch
#ES_GROUP=elasticsearch
# The number of seconds to wait before checking if Elasticsearch started successfully as a daemon process
ES_STARTUP_SLEEP_TIME=5
################################
# System properties
################################
# Specifies the maximum file descriptor number that can be opened by this process
# When using Systemd, this setting is ignored and the LimitNOFILE defined in
# /usr/lib/systemd/system/elasticsearch.service takes precedence
#MAX_OPEN_FILES=65536
# The maximum number of bytes of memory that may be locked into RAM
# Set to "unlimited" if you use the ‘bootstrap.memory_lock: true‘ option
# in elasticsearch.yml.
# When using Systemd, the LimitMEMLOCK property must be set
# in /usr/lib/systemd/system/elasticsearch.service
#MAX_LOCKED_MEMORY=unlimited
# Maximum number of VMA (Virtual Memory Areas) a process can own
# When using Systemd, this setting is ignored and the ‘vm.max_map_count‘
# property is set at boot time in /usr/lib/sysctl.d/elasticsearch.conf
#MAX_MAP_COUNT=262144
jvm配置
/etc/elasticsearch/jvm.options,修改下面所示:因为这3台机器还装了redis集群,所以分配为16G内存
-Xms16g
-Xmx16g
elasticsearch.yml配置
在该文件下面增加下面的配置即可,另外2台服务器修改node.name和network.host这2个字段
其中需要提前创建/data/elasticsearch,/var/log/elasticsearch目录,并授予elasticsearch.elasticsearch权限 chown -R elasticsearch.elasticsearch /data/elasticsearch && chown -R elasticsearch.elasticsearch /var/log/elasticsearch
## 157增加
path.data: /data/elasticsearch
path.logs: /var/log/elasticsearch
cluster.name: shop-system
node.name: ser5-167.tech-idc.net
node.master: true
node.data: true
network.host: 10.80.5.167
http.port: 9200
discovery.zen.ping.unicast.hosts: ["10.80.5.167", "10.80.5.168","10.80.5.169"]
discovery.zen.minimum_master_nodes: 2
## 158增加
path.data: /data/elasticsearch
path.logs: /var/log/elasticsearch
cluster.name: shop-system
node.name: ser5-168.tech-idc.net
node.master: true
node.data: true
network.host: 10.80.5.168
http.port: 9200
discovery.zen.ping.unicast.hosts: ["10.80.5.167", "10.80.5.168","10.80.5.169"]
discovery.zen.minimum_master_nodes: 2
## 159增加
path.data: /data/elasticsearch
path.logs: /var/log/elasticsearch
cluster.name: shop-system
node.name: ser5-169.tech-idc.net
node.master: true
node.data: true
network.host: 10.80.5.169
http.port: 9200
discovery.zen.ping.unicast.hosts: ["10.80.5.167", "10.80.5.168","10.80.5.169"]
discovery.zen.minimum_master_nodes: 2
systemctl start elasticsearch
systemctl enable elasticsearch
wget http://download.redis.io/releases/redis-3.2.11.tar.gz(所有机器下载)
wget https://cache.ruby-lang.org/pub/ruby/2.3/ruby-2.3.1.tar.gz (其中一台机器即可)
tar xvf redis-3.2.11.tar.gz -C /usr/local
cd redis-3.2.11 && make && make install
安装完后验证
[root@ser5-167 elasticsearch]# redis-cli --version
redis-cli 3.2.11
端口选择设置为8001-8006
mkdir -p /data/redis/{8001,8002}/{conf,data} #在服务器10.80.5.157创建
mkdir -p /data/redis/{8003,8004}/{conf,data} #在服务器10.80.5.158创建
mkdir -p /data/redis/{8005,8006}/{conf,data} #在服务器10.80.5.159创建
mkdir -p /var/log/redis && mkdir -p /var/run/redis (3台服务器上执行)
#157 8001端口
daemonize yes
pidfile "/var/run/redis/redis-8001.pid"
dir "/data/redis/8001/data"
port 8001
tcp-backlog 511
tcp-keepalive 0
bind 10.80.5.167
loglevel notice
logfile "/var/log/redis/redis-8001.log"
databases 16
cluster-enabled yes
cluster-node-timeout 15000
cluster-config-file "node-8001.conf"
appendonly yes
appendfilename "appendonly-8001.aof"
appendfsync everysec
no-appendfsync-on-rewrite yes
auto-aof-rewrite-percentage 80-100
auto-aof-rewrite-min-size 64mb
===================================
#157 8002端口
daemonize yes
pidfile "/var/run/redis/redis-8002.pid"
dir "/data/redis/8002/data"
port 8002
tcp-backlog 511
tcp-keepalive 0
bind 10.80.5.167
loglevel notice
logfile "/var/log/redis/redis-8002.log"
databases 16
cluster-enabled yes
cluster-node-timeout 15000
cluster-config-file "node-8002.conf"
appendonly yes
appendfilename "appendonly-8002.aof"
appendfsync everysec
no-appendfsync-on-rewrite yes
auto-aof-rewrite-percentage 80-100
auto-aof-rewrite-min-size 64mb
================================
# 158 8003端口
daemonize yes
pidfile "/var/run/redis/redis-8003.pid"
dir "/data/redis/8003/data"
port 8003
tcp-backlog 511
tcp-keepalive 0
bind 10.80.5.168
loglevel notice
logfile "/var/log/redis/redis-8003.log"
databases 16
cluster-enabled yes
cluster-node-timeout 15000
cluster-config-file "node-8003.conf"
appendonly yes
appendfilename "appendonly-8003.aof"
appendfsync everysec
no-appendfsync-on-rewrite yes
auto-aof-rewrite-percentage 80-100
auto-aof-rewrite-min-size 64mb
======================================
# 158 8004端口
daemonize yes
pidfile "/var/run/redis/redis-8004.pid"
dir "/data/redis/8004/data"
port 8004
tcp-backlog 511
tcp-keepalive 0
bind 10.80.5.168
loglevel notice
logfile "/var/log/redis/redis-8004.log"
databases 16
cluster-enabled yes
cluster-node-timeout 15000
cluster-config-file "node-8004.conf"
appendonly yes
appendfilename "appendonly-8004.aof"
appendfsync everysec
no-appendfsync-on-rewrite yes
auto-aof-rewrite-percentage 80-100
auto-aof-rewrite-min-size 64mb
=========================================
# 159 8005端口
daemonize yes
pidfile "/var/run/redis/redis-8005.pid"
dir "/data/redis/8005/data"
port 8005
tcp-backlog 511
tcp-keepalive 0
bind 10.80.5.169
loglevel notice
logfile "/var/log/redis/redis-8005.log"
databases 16
cluster-enabled yes
cluster-node-timeout 15000
cluster-config-file "node-8005.conf"
appendonly yes
appendfilename "appendonly-8005.aof"
appendfsync everysec
no-appendfsync-on-rewrite yes
auto-aof-rewrite-percentage 80-100
auto-aof-rewrite-min-size 64mb
====================================
# 159 8086端口
daemonize yes
pidfile "/var/run/redis/redis-8006.pid"
dir "/data/redis/8006/data"
port 8006
tcp-backlog 511
tcp-keepalive 0
bind 10.80.5.169
loglevel notice
logfile "/var/log/redis/redis-8006.log"
databases 16
cluster-enabled yes
cluster-node-timeout 15000
cluster-config-file "node-8006.conf"
appendonly yes
appendfilename "appendonly-8006.aof"
appendfsync everysec
no-appendfsync-on-rewrite yes
auto-aof-rewrite-percentage 80-100
auto-aof-rewrite-min-size 64mb
#157执行
/usr/local/bin/redis-server /data/redis/8001/conf/redis.conf
/usr/local/bin/redis-server /data/redis/8002/conf/redis.conf
#158执行
/usr/local/bin/redis-server /data/redis/8003/conf/redis.conf
/usr/local/bin/redis-server /data/redis/8004/conf/redis.conf
#159执行
/usr/local/bin/redis-server /data/redis/8005/conf/redis.conf
/usr/local/bin/redis-server /data/redis/8006/conf/redis.conf
ps -ef|grep redis
tar xvf ruby-2.3.1.tar.gz
cd ruby-2.3.1 && ./configure --prefix=/usr/local/ruby && make && make install
#编译成功后拷贝命令到/usr/local/bin/目录
cd /usr/local/ruby/
cp bin/ruby /usr/local/bin
cp bin/gem /usr/local/bin
#验证是否okay
ruby --version
#安装rubygem redis的依赖
gem install redis
#拷贝redis-trib.rb命令至/usr/local/bin
cp /usr/local/reds-3.2.11/src/redis-trib.rb /usr/local/bin
cp /usr/local/bin/gem /bin/
redis-trib.rb create --replicas 1 10.80.5.167:8001 10.80.5.167:8002 10.80.5.168:8003 10.80.5.168:8004 10.80.5.169:8005 10.80.5.169:8006
输出如下所示:
[root@ser5-167 ~]# redis-trib.rb create --replicas 1 10.80.5.167:8001 10.80.5.167:8002 10.80.5.168:8003 10.80.5.168:8004 10.80.5.169:8005 10.80.5.169:8006
>>> Creating cluster
>>> Performing hash slots allocation on 6 nodes...
Using 3 masters:
10.80.5.167:8001
10.80.5.168:8003
10.80.5.169:8005
Adding replica 10.80.5.168:8004 to 10.80.5.167:8001
Adding replica 10.80.5.167:8002 to 10.80.5.168:8003
Adding replica 10.80.5.169:8006 to 10.80.5.169:8005
M: d7466862e56c91c541f28d1b0cdff52d5667f34f 10.80.5.167:8001
slots:0-5460 (5461 slots) master
S: 9739701c5187dd03d43e8b7d78470974ec44ab53 10.80.5.167:8002
replicates 03681e008967c073392f46a9102f98967401ab86
M: 03681e008967c073392f46a9102f98967401ab86 10.80.5.168:8003
slots:5461-10922 (5462 slots) master
S: 1bd2ea2eb9f519949bda598e9cf524017f3fcbc7 10.80.5.168:8004
replicates d7466862e56c91c541f28d1b0cdff52d5667f34f
M: a10bb4b544b24a9fa4f838fe2af41eb81feaa7c7 10.80.5.169:8005
slots:10923-16383 (5461 slots) master
S: 11f696519406052b8a44aa169f222635d3562c0a 10.80.5.169:8006
replicates a10bb4b544b24a9fa4f838fe2af41eb81feaa7c7
Can I set the above configuration? (type ‘yes‘ to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join.....
>>> Performing Cluster Check (using node 10.80.5.167:8001)
M: d7466862e56c91c541f28d1b0cdff52d5667f34f 10.80.5.167:8001
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: 11f696519406052b8a44aa169f222635d3562c0a 10.80.5.169:8006
slots: (0 slots) slave
replicates a10bb4b544b24a9fa4f838fe2af41eb81feaa7c7
M: a10bb4b544b24a9fa4f838fe2af41eb81feaa7c7 10.80.5.169:8005
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: 9739701c5187dd03d43e8b7d78470974ec44ab53 10.80.5.167:8002
slots: (0 slots) slave
replicates 03681e008967c073392f46a9102f98967401ab86
S: 1bd2ea2eb9f519949bda598e9cf524017f3fcbc7 10.80.5.168:8004
slots: (0 slots) slave
replicates d7466862e56c91c541f28d1b0cdff52d5667f34f
M: 03681e008967c073392f46a9102f98967401ab86 10.80.5.168:8003
slots:5461-10922 (5462 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
至此,redis集群就搭建好了,遇到坑安装rubygem redis的依赖的时候,一开始安装的是3.3.0版本,无法初始化集群,后来直接安装最新版的就好了,gem install redis
elasticsearch和redis集群搭建文档-电商系统
原文:https://www.cnblogs.com/uglyliu/p/12675055.html