一. keepalived 工具介绍
1.专为lvs 和HA 设计的一款健康检查工具
2.支持故障自动切换
3.支持节点健康状态检查
二. keepalived 实现原理剖析
keepalived 采用VRRP热备份协议实现linux服务器的多机热备功能。
VRRP,虚拟路由冗余协议,是针对路由器的一种备份解决方案。由多台路由器组成一个热备组,通过公用虚拟ip地址对外提供服务。每个热备组内同一时刻只有一台主路由器提供服务,其他路由器处于冗余状态。若当前在线的路由器失败,则其他路由器会根据设置的优先级自动接替虚拟ip 地址,继续提供服务。
三 . 搭建lvs+keepalived+DR 高可用负载均衡群集
环境: centos6.5
web1服务器 192.168.69.6
web2 服务器 192.168.69.7
主负载均衡器:192.168.69.6(在这里主负载均衡器与web1共用同一台服务器,最好单独使用一台服务器)
从负载均衡器:192.168.69.7(在这里从负载均衡器与web2共用同一台服务器,最好单独使用一台服务器)
虚拟ip(VIP)为:192.168.69.8
1.搭建web1 web2服务器
见本博客地址:http://www.cnblogs.com/lzcys8868/p/7856469.html
搭建好后验证apache服务:
web1:
[root@localhost ~]# cd /var/www/html/
[root@localhost html]# cat index.html
69.6 页面正常web2:
[root@localhost ~]# cd /var/www/html/
[root@localhost html]# cat index.html
69.67页面正常
浏览器访问web1 web2 两台服务器: http://IP地址+端口:/index.html
出现如上页面说明apache服务正常
2.配置web1 web2 服务器上虚拟ip地址(vip)
在web1 上操作:
[root@localhost ~]# cd scripts/
[root@localhost scripts]# ls
lvs-dr[root@localhost scripts]# pwd
/root/scripts[root@localhost scripts]# cat lvs-dr#!/bin/bash#lvs-drVIP="192.168.69.8"
/sbin/ifconfig lo:0 $VIP broadcast $VIP netmask 255.255.255.255/sbin/route add -host $VIP dev lo:0echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignoreecho 2 > /proc/sys/net/ipv4/conf/lo/arp_announceecho 1 > /proc/sys/net/ipv4/conf/all/arp_ignoreecho 2 > /proc/sys/net/ipv4/conf/all/arp_announce[root@localhost scripts]# chmod +x /root/scripts/lvs-dr
[root@localhost scripts]# ll
total 4-rwxr-xr-x 1 root root 336 Nov 20 11:08 lvs-dr[root@localhost scripts]# sh lvs-dr
root@localhost scripts]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo inet 192.168.69.8/32 brd 192.168.69.8 scope global lo:0 inet6 ::1/128 scope host valid_lft forever preferred_lft forever2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:e1:71:6a:df:6c brd ff:ff:ff:ff:ff:ff inet 192.168.69.6/24 brd 192.168.69.255 scope global eth0 inet 192.168.69.8/32 scope global eth0 inet6 fe80::32e1:71ff:fe6a:df6c/64 scope link valid_lft forever preferred_lft forever3: eth1: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN qlen 1000 link/ether 30:e1:71:6a:df:6d brd ff:ff:ff:ff:ff:ff[root@localhost scripts]# echo "/root/scripts/lvs-dr" >> /etc/rc.local
web2上操作同web1,只需要把lvs-dr脚本放到web2上执行即可。
查看web2:
[root@localhost ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo inet 192.168.69.8/32 brd 192.168.69.8 scope global lo:0 inet6 ::1/128 scope host valid_lft forever preferred_lft forever2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:e1:71:70:dd:c4 brd ff:ff:ff:ff:ff:ff inet 192.168.69.7/24 brd 192.168.69.255 scope global eth0 inet6 fe80::32e1:71ff:fe70:ddc4/64 scope link valid_lft forever preferred_lft forever3.主负载均衡的搭建,在192.168.69.6上
[root@www ~]# modprobe ip_vs
[root@www ~]# cat /proc/net/ip_vsIP Virtual Server version 1.2.1 (size=4096)Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConn[root@www ~]# rpm -q ipvsadm keepalived
package ipvsadm is not installedpackage keepalived is not installed[root@www ~]# yum -y install ipvsadm keepalived
[root@localhost ~]# cd /etc/keepalived/
[root@localhost keepalived]# cp keepalived.conf keepalived.conf.origin[root@localhost keepalived]# vim keepalived.conf
1 1 ! Configuration File for keepalived 2 2 3 3 global_defs { 4 4 # notification_email { 5 5 # acassen@firewall.loc 6 6 # failover@firewall.loc 7 7 # sysadmin@firewall.loc 8 8 # } 9 9 # notification_email_from Alexandre.Cassen@firewall.loc10 10 # smtp_server 192.168.200.111 11 smtp_connect_timeout 3012 12 router_id LVS_DEVEL_BLM13 13 }14 14 15 15 vrrp_instance VI_1 {16 16 state MASTER17 17 interface eth018 18 virtual_router_id 6019 19 priority 10020 20 advert_int 221 21 authentication {22 22 auth_type PASS23 23 auth_pass 111124 24 }25 25 virtual_ipaddress {26 26 192.168.69.827 27 }28 28 }29 29 30 30 virtual_server 192.168.69.8 8000 { #VIP 端口必须与real_server端口一致31 31 delay_loop 232 32 lb_algo rr33 33 lb_kind DR #lvs 采用DR 模式34 34 ! nat_mask 255.255.255.035 35 ! persistence_timeout 30036 36 protocol TCP37 37 38 38 real_server 192.168.69.6 8000 {39 39 weight 140 40 TCP_CHECK {41 41 connect_timeout 1042 42 nb_get_retry 343 43 delay_before_retry 344 44 connect_port 800045 45 }46 46 }47 47 48 48 real_server 192.168.69.7 8000 {49 49 weight 150 50 TCP_CHECK {51 51 connect_timeout 1052 52 nb_get_retry 353 53 delay_before_retry 354 54 connect_port 800055 55 }56 56 }57 57 }
注:40行,50行中 TCP_CHECK 与大括号之间要有空格,否则,启动keepalived后查看不到两台真实的负载,只显示其中一台web
[root@localhost keepalived]# /etc/init.d/keepalived start
[root@localhost keepalived]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConnTCP 192.168.69.8:8000 rr -> 192.168.69.6:8000 Local 1 0 0 -> 192.168.69.7:8000 Route 1 0 7[root@localhost keepalived]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo inet 192.168.69.8/32 brd 192.168.69.8 scope global lo:0 inet6 ::1/128 scope host valid_lft forever preferred_lft forever2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:e1:71:6a:df:6c brd ff:ff:ff:ff:ff:ff inet 192.168.69.6/24 brd 192.168.69.255 scope global eth0 inet 192.168.69.8/32 scope global eth0 inet6 fe80::32e1:71ff:fe6a:df6c/64 scope link valid_lft forever preferred_lft forever4.从负载均衡器配置
[root@www ~]# modprobe ip_vs
[root@www ~]# cat /proc/net/ip_vs
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
[root@www ~]# rpm -q ipvsadm keepalived
package ipvsadm is not installed package keepalived is not installed
[root@www ~]# yum -y install ipvsadm keepalived
[root@localhost ~]# cat /etc/keepalived/keepalived.conf
1 ! Configuration File for keepalived 2 3 global_defs { 4 # notification_email { 5 # acassen@firewall.loc 6 # failover@firewall.loc 7 # sysadmin@firewall.loc 8 # } 9 # notification_email_from Alexandre.Cassen@firewall.loc10 # smtp_server 192.168.200.111 smtp_connect_timeout 3012 router_id LVS_DEVEL_BLM13 }14 15 vrrp_instance VI_1 {16 state BACKUP17 interface eth018 virtual_router_id 6019 priority 9920 advert_int 221 authentication {22 auth_type PASS23 auth_pass 111124 }25 virtual_ipaddress {26 192.168.69.827 }28 }29 30 virtual_server 192.168.69.8 8000 {31 delay_loop 232 lb_algo rr33 lb_kind DR34 ! nat_mask 255.255.255.035 ! persistence_timeout 5036 protocol TCP37 38 real_server 192.168.69.6 8000 {39 weight 140 TCP_CHECK { 41 connect_timeout 1042 nb_get_retry 343 delay_before_retry 344 connect_port 800045 }46 }47 48 real_server 192.168.69.7 8000 {49 weight 150 TCP_CHECK { 51 connect_timeout 1052 nb_get_retry 353 delay_before_retry 354 connect_port 800055 }56 }57 }
从负载均衡配置与主配置只有如下两点不同:
1》16行 state BACKUP2》19行 priority 99
[root@localhost keepalived]# /etc/init.d/keepalived start
[root@localhost ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo inet 192.168.69.8/32 brd 192.168.69.8 scope global lo:0 inet6 ::1/128 scope host valid_lft forever preferred_lft forever2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:e1:71:70:dd:c4 brd ff:ff:ff:ff:ff:ff inet 192.168.69.7/24 brd 192.168.69.255 scope global eth0 inet6 fe80::32e1:71ff:fe70:ddc4/64 scope link valid_lft forever preferred_lft forever[root@localhost ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConnTCP 192.168.69.8:8000 rr -> 192.168.69.6:8000 Route 1 0 7 -> 192.168.69.7:8000 Local 1 0 0
5.浏览器测试vip分发。浏览器输入:http://192.168.69.8:8000/index.html. 刷新浏览器会1:1出现两个页面,因为权重设置的都是1
6.测试,keepalived 高可用功能
停掉web1服务器上的keepalived,web2服务器应该是接管VIP,继续分发
web1上操作:
[root@localhost ~]# /etc/init.d/keepalived stop
web2上查看vip:
[root@localhost ~]# ip a1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo inet 192.168.69.8/32 brd 192.168.69.8 scope global lo:0 inet6 ::1/128 scope host valid_lft forever preferred_lft forever2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:e1:71:70:dd:c4 brd ff:ff:ff:ff:ff:ff inet 192.168.69.7/24 brd 192.168.69.255 scope global eth0 inet 192.168.69.8/32 scope global eth0 inet6 fe80::32e1:71ff:fe70:ddc4/64 scope link valid_lft forever preferred_lft forever浏览器继续测试分发功能:
分发功能正常
web1启动keepalived后,web1上应该是自动给夺回VIP,测试:
[root@localhost ~]# /etc/init.d/keepalived start
[root@localhost ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo inet 192.168.69.8/32 brd 192.168.69.8 scope global lo:0 inet6 ::1/128 scope host valid_lft forever preferred_lft forever2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:e1:71:6a:df:6c brd ff:ff:ff:ff:ff:ff inet 192.168.69.6/24 brd 192.168.69.255 scope global eth0 inet 192.168.69.8/32 scope global eth0 inet6 fe80::32e1:71ff:fe6a:df6c/64 scope link valid_lft forever preferred_lft forever至此 lvs+keepalived 高可用集群搭建完毕
注:当两台服务器即做reale-server 又做主从负载均衡时,在生产环境测试访问VIP 非常慢,是由于形成了广播风暴。可以停掉从负载均衡,有服务器后再把两台主从负载均衡迁移。
扩展:
nat模式与dr模式的区别:
两种模式都是实现负载均衡lvs的方法,nat模式在包进入的时候在分发器上做了目的地址的mac转换,也就是DNAT,包回去的时候从哪进来的也要从哪里出去,这就造成了nat模式在real server过多的时候造成了数据包在回去的时候都是从一个出口方向,也就造成了瓶颈。DR模式在数据包进入的时候由分发器上把收到的数据包分派给架构下的real server来工作,而数据包在返回的时候没有经过分发器而直接发送给数据包的来源地址,这样就解决了数据包都从分发器上返回数据包的瓶颈,从而解决大量的用户访问。