[root@lib01 ~]# ifconfig eth0 | awk -F ‘[ :]+‘ ‘NR==2{print $4}‘ 10.0.0.5
[root@lib02 ~]# ifconfig eth0 | awk -F ‘[ :]+‘ ‘NR==2{print $4}‘ 10.0.0.6
Nginx,LVS,haproxy。
Nginx反向代理模块官网介绍: http://nginx.org/en/docs/http/ngx_http_upstream_module.html
lib01 10.0.0.5 Nginx 主负载均衡器
lib02 10.0.0.6 Nginx 辅负载均衡器
web01 10.0.0.8 web01 服务器
web02 10.0.0.7 web02 服务器
下面将在以上4台服务器上安装Nginx。
#1)安装依赖软件包集合
yum install opensshl openssl-devel pcre pcre-devel -y
rpm -qa opensshl openssl-devel pcre pcre-deve
#2)安装Nginx软件包集合
mkdir -p /home/oldboy/tools
cd /home/oldboy/tools
yum install -y wget gcc
wget -q http://nginx.org/download/nginx-1.6.3.tar.gz
ls -l nginx-1.6.3.tar.gz
useradd nginx -s /sbin/nologin -M
tar xf nginx-1.6.3.tar.gz
cd nginx-1.6.3
./configure --user=nginx --group=nginx --prefix=/application/nginx-1.6.3 --with-http_stub_status_module --with-http_ssl_module
make
make install
/bin/ln -s /application/nginx-1.6.3 /application/nginx
ll /application/nginx
[root@lib01 conf]#
cd /application/nginx/conf/
egrep -v "^$|#" nginx.conf.default >nginx.conf
[root@lib01 conf]# vim /application/nginx/conf/nginx.conf
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
upstream backend {
server 10.0.0.7:80 weight=1;
server 10.0.0.8:80 weight=1;
}
server {
listen 80;
server_name www.etiantian.org;
location / {
proxy_pass http://backend;
}
}
}
../sbin/nginx -t
../sbin/nginx
lsof -i :80
[root@lib02 nginx-1.6.3]# vi /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.16.1.5 lb01 www.etiantian.org bbs.etiantian.org blog.etiantian.org
172/16.1.6 lb02
172.16.1.7 web02
172.16.1.8 web01
172.16.1.51 db01 db01.etiantian.org
172.16.1.31 nfs01
172.16.1.41 backup
172.16.1.61 m01
[root@lib02 nginx-1.6.3]# for n in `seq 100`;do curl www.etiantian.org;sleep 1 ;done
apache www.etiantain.org
www
apache www.etiantain.org
www
apache www.etiantain.org
www
可以看到已经成功了。已经是1比1的调度了。
在测试一下,我把[root@web01 ~]# /application/nginx/sbin/nginx -s stop 关掉看看会不会把宕机的机器剔除掉。
再测试
[root@lib02 nginx-1.6.3]# for n in `seq 100`;do curl www.etiantian.org;sleep 1 ;done
apache www.etiantain.org
apache www.etiantain.org
apache www.etiantain.org
自动跳过了。现在已经实现简单的负载均衡
但是默认如果用户访问网站第一是先发http请求报文到负载均衡服务器,
这时候会有一个请求头部,但是,当负载均衡服务器向后面的web服务器请求数据的时候,
默认是不带用户的请求头部的,这时候web服务器不知道负载均衡服务器的头部,
就会默认把第一个返回给负载均衡服务器,这样就会造成错误,所以我们需要在负载均衡服务器配置中添加 proxy_set_header Host $host;。现在我们在测试一下;
[root@lib01 conf]# vi nginx.conf # 修改里面的 www改成 bbs.etiantian.org;
server_name bbs.etiantian.org;
lib02进行测试:
[root@lib02 nginx-1.6.3]# for n in seq 100
;do curl bbs.etiantian.org;sleep 1 ;done www apache www.etiantain.org www apache www.etiantain.org
看到了吗?这个就是上面所说的错误。现在我们添加那个参数再看看。
lib01 nginx.conf添加 proxy_set_header Host $host;
[root@lib01 conf]# vi nginx.conf
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
upstream backend {
server 10.0.0.7:80 weight=1;
server 10.0.0.8:80 weight=1;
}
server {
listen 80;
server_name bbs.etiantian.org;
location / {
proxy_pass http://backend;
proxy_set_header Host $host;
}
}
}
[root@lib01 conf]# /application/nginx/sbin/nginx -t
[root@lib01 conf]# /application/nginx/sbin/nginx -s reload
lib02 进行测试
[root@lib02 nginx-1.6.3]# for n in `seq 100`;do curl bbs.etiantian.org;sleep 1 ;done
bbs.etiantain.org
bbs
bbs.etiantain.org
bbs
bbs.etiantain.org
bbs
我们可以看到正常了。
现在我们到web01服务器
[root@web01 logs]# cat /application/nginx/logs/www_access.log
10.0.0.5 - - [17/Feb/2017:18:00:40 +0800] "GET / HTTP/1.0" 200 4 "-" "curl/7.19.7 (x86_64-redhat-linux-gnu) libcurl/7.19.7 NSS/3.21 Basic ECC zlib/1.2.3 libidn/1.18 libssh2/1.4.2" "-" 10.0.0.5 - - [17/Feb/2017:18:10:46 +0800] "GET / HTTP/1.0" 200 4 "-" "curl/7.19.7 (x86_64-redhat-linux-gnu) libcurl/7.19.7 NSS/3.21 Basic ECC zlib/1.2.3 libidn/1.18 libssh2/1.4.2" "-" 10.0.0.5 - - [17/Feb/2017:18:10:48 +0800] "GET / HTTP/1.0" 200 4 "-" "curl/7.19.7 (x86_64-redhat-linux-gnu) libcurl/7.19.7 NSS/3.21 Basic ECC zlib/1.2.3 libidn/1.18 libssh2/1.4.2" "-"
可以看到上面的IP都是负载均衡器的访问IP。因为负载均衡器是替代用户向web服务器请求数据,但是我们想在这里变成是用户的IP。那我们就需要添加 proxy_set_header X-Forwarded-For $remote_addr;这个参数就可以实现日志中负载均衡访问节点显示用户IP。
[root@lib01 conf]# /application/nginx/sbin/nginx -t
[root@lib01 conf]# /application/nginx/sbin/nginx -s reload
lib02客户端测试
[root@lib02 nginx-1.6.3]# ifconfig eth1|awk -F ‘[ :]+‘ ‘NR==2{print $4}‘
172.16.1.6
[root@lib02 nginx-1.6.3]# for n in `seq 100`;do curl www.etiantian.org;sleep 1 ;done
到web01上实时查看,我们可以看到客户端的ip了
[root@web01 logs]# tail -F /application/nginx/logs/www_access.log
10.0.0.5 - - [17/Feb/2017:18:31:28 +0800] "GET / HTTP/1.0" 200 4 "-" "curl/7.19.7 (x86_64-redhat-linux-gnu) libcurl/7.19.7 NSS/3.21 Basic ECC zlib/1.2.3 libidn/1.18 libssh2/1.4.2" "172.16.1.6"
X-Forwarded-For 完美解决
[root@lib02 nginx-1.6.3]# echo > /application/nginx/conf/nginx.conf
[root@lib02 nginx-1.6.3]# vim /application/nginx/conf/nginx.conf
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
upstream backend {
server 10.0.0.7:80 weight=1;
server 10.0.0.8:80 weight=1;
}
server {
listen 80;
server_name bbs.etiantian.org;
location / {
proxy_pass http://backend;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
}
[root@lib02 nginx-1.6.3]# /application/nginx/sbin/nginx
测试,把第一个lib01停掉
[root@lib01 conf]# pkill nginx
[root@lib02 nginx-1.6.3]# for n in seq 100
;do curl 172.16.1.5;sleep 1 ;done curl: (7) couldn‘t connect to host curl: (7) couldn‘t connect to host
这是lib01负载均衡器
[root@lib02 nginx-1.6.3]# for n in seq 100
;do curl 172.16.1.6;sleep 1 ;done apache www.etiantain.org www
这是lib02负载均衡器
那现在如果lib01宕机了,那么lib02还不能实现高可用,所以我们就需要keepalived+Nginx负载均衡
Keepalived使用的vrrp协议方式,虚拟路由冗余协议 (Virtual Router Redundancy Protocol,简称VRRP);
Heartbeat或Corosync是基于主机或网络服务的高可用方式;
简单的说就是,Keepalived的目的是模拟路由器的高可用,
Heartbeat或Corosync的目的是实现Service的高可用。
所以一般Keepalived是实现前端高可用,常用的前端高可用的组合有,
就是我们常见的LVS+Keepalived、Nginx+Keepalived、HAproxy+Keepalived。
而Heartbeat或Corosync是实现服务的高可用,
常见的组合有Heartbeat v3(Corosync)+Pacemaker+NFS+Httpd 实现Web服务器的高可用、
Heartbeat v3(Corosync)+Pacemaker+NFS+MySQL 实现MySQL服务器的高可用。
总结一下,Keepalived中实现轻量级的高可用,一般用于前端高可用,且不需要共享存储,一般常用于两个节点的高可用。
而Heartbeat(或Corosync)一般用于服务的高可用,且需要共享存储,一般用于多节点的高可用。
这个问题我们说明白了,那heartbaet与corosync我们又应该选择哪个好啊,
我想说我们一般用corosync,因为corosync的运行机制更优于heartbeat,
就连从heartbeat分离出来的pacemaker都说在以后的开发当中更倾向于corosync,
所以现在corosync+pacemaker是最佳组合。但说实话我对于软件没有任何倾向性,
所以我把所有的集群软件都和大家说了一下,我认为不管什么软件,
只要它能存活下来都有它的特点和应用领域,
只有把特定的软件放在特定的位置才能发挥最大的作用,那首先我们得对这个软件有所有了解。
学习一种软件的最好方法,就是去查官方文档。好了说了那么多希望大家有所收获,下面我们来说一说keepalived。
现在我们来做一个vip,vip 就是相当于一个浮动IP一会在.5上,一会在.6上。.5宕机了自动就飘到.6上。
10.0.0.3/24 负载均衡器的VIP
[root@lib01 conf]# ip addr add 10.0.0.3/24 dev eth0 label eth0:0
#ip配的叫做辅助IP,ifconfig配的叫别名。要想使用ifconfig能看见ip配的辅助IP就需要加上label eth0:0 参数。否则是看不到的。
[root@lib01 conf]# ifconfig
eth0 Link encap:Ethernet HWaddr 00:0C:29:13:C6:EA
inet addr:10.0.0.5 Bcast:10.0.0.255 Mask:255.255.255.0
inet6 addr: fe80::20c:29ff:fe13:c6ea/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:35506 errors:0 dropped:0 overruns:0 frame:0
TX packets:20245 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:34669873 (33.0 MiB) TX bytes:1976855 (1.8 MiB)
eth0:0 Link encap:Ethernet HWaddr 00:0C:29:13:C6:EA
inet addr:10.0.0.3 Bcast:0.0.0.0 Mask:255.255.255.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
#看见了吧。
[root@lib01 conf]# cat nginx.conf
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
upstream backend {
server 10.0.0.7:80 weight=1;
server 10.0.0.8:80 weight=1;
}
server {
listen 80;
server_name bbs.etiantian.org;
location / {
proxy_pass http://backend;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
server {
listen 80;
server_name blog.etiantian.org;
location / {
proxy_pass http://backend;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
}
[root@lib01 conf]# /application/nginx/sbin/nginx -t
[root@lib01 conf]# /application/nginx/sbin/nginx
10.0.0.3 www.etiantian.org etiantian.org blog.etiantian.org
我们可以看到可以访问了,因为我们配置了10.0.0.3为辅助IP
[root@lib01 conf]# netstat -lntup|grep nginx
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 9171/nginx
我们可以看到lib01上nginx监听所有来自80端口的IP,所以10.0.0.3就可以访问。
现在让我们lib01和lib02 配置统一 vi /application/nginx/conf/nginx.conf
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
upstream backend {
server 10.0.0.7:80 weight=1;
server 10.0.0.8:80 weight=1;
}
server {
listen 80;
server_name bbs.etiantian.org;
location / {
proxy_pass http://backend;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
server {
listen 80;
server_name blog.etiantian.org;
location / {
proxy_pass http://backend;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
}
/application/nginx/sbin/nginx -s reload
我们现在想手工一台宕机,还有另外一台可以持续提供服务。之后再自动化实现。
现在把lib01挂起就是.5的机器,之后MAC 浏览器就能访问了。
我们在lib02上进行配置
[root@lib02 nginx-1.6.3]# ip addr add 10.0.0.3/24 dev eth0 label eth0:0
然后我们再次刷新就可以访问啦、
[root@lib02 nginx-1.6.3]# pkill nginx
现在就不能访问啦,两台机器都挂了。
[root@lib01 conf]# ifconfig eth0:0 down # 手工配的IP先关掉。下面用keepalived
两台lib同时装 yum install -y keepalived
[root@lib01 conf]# vi /etc/keepalived/keepalived.conf keepalived默认配置文件只要以下内容即可
! Configuration File for keepalived
global_defs {
notification_email {
aaromail@qq.com
}
notification_email_from aaromail@qq.com
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.3/24 dev eth0 label eth0:1
}
}
[root@lib01 conf]# scp /etc/keepalived/keepalived.conf root@172.16.1.6:/etc/keepalived/
切换到lib02 server
[root@lib02 nginx-1.6.3]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
aaromail@qq.com
}
notification_email_from aaromail@qq.com
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL1 # 改了这个不能跟lib01一样
}
vrrp_instance VI_1 {
state BACKUP # 把这个 改成BACKUP了
interface eth0
virtual_router_id 51
priority 100 # 把这个改成100了 就改了三个地方。
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.3/24 dev eth0 label eth0:1
}
}
两台机器一起启动keepalived
/etc/init.d/keepalived start
只有主的挂了,备的才能接管
[root@lib01 conf]# ip addr|grep 10.0.0.3 # 可以看到主的起来了。 inet 10.0.0.3/24 scope global secondary eth0:1
[root@lib02 nginx-1.6.3]# ip addr|grep 10.0.0.3 # 备的没起来 [root@lib02 nginx-1.6.3]#
提示:一定要让两台机器可以实现通信,不然就会裂脑了,
可以采用第三方仲裁的方法。由于keepalived体系中主备两台机器所处的状态与对方有关。如果主备机器之间的通信出了网题,就会发生脑裂,此时keepalived体系中会出现双主的情况,产生资源竞争。 一般可以引入仲裁来解决这个问题,即每个节点必须判断自身的状态。最简单的一种操作方法是,在主备的keepalived的配置文件中增加check配置,服务器周期性地ping一下网关,如果ping不通则认为自身有问题 。
最容易的是借助keepalived提供的vrrp_script及track_script实现。如下所示。
初始的配置:
global_defs {
router_id LVS_DEVEL
}
vrrp_sync_group VG_1 {
group {
VI_1
}
notify_master /root/slave2master.py
notify_backup /root/master2slave.py
}
vrrp_instance VI_1 {
vrrp_unicast_bind 192.168.150.21
vrrp_unicast_peer 192.168.150.20
state BACKUP
interface manbr
virtual_router_id 51
nopreempt
priority 100
advert_int 10
authentication {
auth_type PASS
auth_pass XXXX
}
virtual_ipaddress {
192.168.150.23 dev manbr
}
}
增加仲裁的配置:
global_defs {
router_id LVS_DEVEL
}
track_script {
check_local
}
vrrp_script check_local {
script "/root/check_gateway.sh"
interval 5
}
vrrp_sync_group VG_1 {
group {
VI_1
}
notify_master /root/slave2master.py
notify_backup /root/master2slave.py
}
vrrp_instance VI_1 {
vrrp_unicast_bind 192.168.150.21
vrrp_unicast_peer 192.168.150.20
state BACKUP
interface manbr
virtual_router_id 51
nopreempt
priority 100
advert_int 10
authentication {
auth_type PASS
auth_pass XXXX
}
virtual_ipaddress {
192.168.150.23 dev manbr
}
}
check_gateway.sh 就是我们的仲裁逻辑,发现ping不通网关,则关闭keepalived service keepalived stop。
该方法在服务器通信正常之后无法再将keepalived启动起来
可以正常访问啦。
[root@lib02 nginx-1.6.3]# ip addr|grep 10.0.0.3 inet 10.0.0.3/24 scope global secondary eth0:1
现在备已经有IP了。
[root@lib02 nginx-1.6.3]# /application/nginx/sbin/nginx
可以正常访问啦。
我们可以看到备的自动被切换过去了。
[root@lib02 nginx-1.6.3]# ip addr|grep 10.0.0.3
inet 10.0.0.3/24 scope global secondary eth0:1
[root@lib02 nginx-1.6.3]# ip addr|grep 10.0.0.3
inet 10.0.0.3/24 scope global secondary eth0:1
[root@lib02 nginx-1.6.3]# ip addr|grep 10.0.0.3
[root@lib02 nginx-1.6.3]# ip addr|grep 10.0.0.3
[root@lib02 nginx-1.6.3]# ip addr|grep 10.0.0.3
再把主的关掉
[root@lib02 nginx-1.6.3]# ip addr|grep 10.0.0.3
[root@lib02 nginx-1.6.3]# ip addr|grep 10.0.0.3
[root@lib02 nginx-1.6.3]#
[root@lib02 nginx-1.6.3]# ip addr|grep 10.0.0.3
inet 10.0.0.3/24 scope global secondary eth0:1
[root@lib02 nginx-1.6.3]# ip addr|grep 10.0.0.3
inet 10.0.0.3/24 scope global secondary eth0:1
跳过了啦、
[root@lib01 ~]# vim /application/nginx/conf/nginx.conf
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
upstream backend {
ip_hash; # 添加了这一行
server 10.0.0.7:80 weight=1;
server 10.0.0.8:80 weight=1;
}
server {
listen 80;
server_name bbs.etiantian.org;
location / {
proxy_pass http://backend;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
server {
listen 80;
server_name blog.etiantian.org;
location / {
proxy_pass http://backend;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
}
ip_hash;
IP绑定 ip_hash 每个请求按访问ip的hash结果分配,这样每个访客固定访问一个后端服务器,可以解决session的问题。 添加了这一行的作用就是为了避免用户需要重复登录之类的情况也就是负载不均。 https://www.oschina.net/question/12_24613
[root@lib01 ~]# /application/nginx/sbin/nginx -t
[root@lib01 ~]# /application/nginx/sbin/nginx -s reload
在lib02上进行测试
[root@lib02 nginx-1.6.3]# for n in `seq 100`;do curl www.etiantian.org;sleep 1 ;done
apache www.etiantain.org
apache www.etiantain.org
apache www.etiantain.org
apache www.etiantain.org
原文:http://www.cnblogs.com/aofo/p/6413487.html