我们试图将后端的业务通过lvs转到nginx代理,nginx分别作为4成和7层代理,这里使用DR模型,那就意味着只能进行做4层代理。类似如:tomcat,mq等也可以进行代理,试图减少代码的耦合度,将他们拆分,使用一个vpi加端口的形式。
我准备了4台机器加一台redis做测试
lvs 10.10.240.144 and 10.10.240.143
nginx-proxy 10.10.240.113 And 10.10.240.114
redis 10.10.240.145
VIP 10.10.240.188
安装lvs+keepalived
[root@linuxea-vm-Node_10_10_240_144 ~]# yum install keepalived ipvsadm -y
修改keepavlied配置文件/etc/keepalived/keepalived.conf
,添加vip 10.10.240.188,已经后端的代理节点的端口以及Ip配置信息
[root@linuxea-vm-Node_10_10_240_144 /etc/keepalived]$ cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_instance platformtransfer {
state MASTER
interface eth0
virtual_router_id 63
priority 99
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.10.240.188/16 brd 10.10.255.255 dev eth0 label eth0:trans1
}
}
virtual_server 10.10.240.188 880 {
delay_loop 6
lb_algo rr
lb_kind DR
protocol TCP
nopreempt
garp_master_delay 10
real_server 10.10.240.114 880 {
weight 1
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 880
}
}
real_server 10.10.240.113 880 {
weight 1
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 880
}
}
}
virtual_server 10.10.240.188 6379 {
delay_loop 6
lb_algo rr
lb_kind DR
protocol TCP
nopreempt
garp_master_delay 10
real_server 10.10.240.114 6379 {
weight 1
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 6379
}
}
real_server 10.10.240.113 6379 {
weight 1
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 6379
}
}
}
添加lvs脚本/scripts/lvs.sh
[root@linuxea-vm-Node_10_10_240_144 /etc/keepalived]$ cat /scripts/lvs.sh
#!/bin/bash
#注:下文中LVS_IP_INF变量中的信息为需要配置的需求变量,变量之间分别以@符合隔开,总共有7个变量。以本例的需求来解析:
#lvs节点通过vip 10.10.240.188,将880,6379端口以DR模型的方式转发到10.10.240.114和113节点的880,6379端口
#[-g|i|m]: LVS类型
# -g: DR
# -i: TUN
# -m: NAT
#本脚本中默认的调度模式为rr
#通过以上解析,第一个变量绑定vip的网卡,第二个变量为vip,第三个变量为lvs-RS转发节点(可多个,以空格隔开),
#第四个变量为lvs-RS节点端口号,第五个变量为该业务备注信息,第六个变量为lvs节点中使用的端口号,第七个变量为lvs的类型
LVS_IP_INF=`cat << EOF
eth0@10.10.240.188@10.10.240.114 10.10.240.113@880@test880@880@g
eth0@10.10.240.188@10.10.240.114 10.10.240.113@6379@redis6379@6379@g
EOF`
case "$1" in
start)
/usr/sbin/ipvsadm -C
echo "$LVS_IP_INF" | while read line;do
read NET_FACE VIP PROJ SPORT MODE < <(echo "$line" | awk -F"@" '{print $1 " " $2 " " $5 " " $6 " " $7}')
RIPS=$(echo $line | awk -F"@" '{print $3}')
PORTS=$(echo $line | awk -F"@" '{print $4}')
echo "添加项目LVS ${PROJ}: 网卡--- ${NET_FACE} 虚拟IP--- ${VIP} 真实主机--- ${RIPS} 代理端口--- ${PORTS}"
for port in ${PORTS};do
echo "添加虚拟服务器记录 ipvsadm -At ${VIP}:${SPORT} -s rr"
/usr/sbin/ipvsadm -At ${VIP}:${SPORT} -s rr
for rip in ${RIPS};do
echo "添加真实服务器记录 ipvsadm -at ${VIP}:${SPORT} -r ${rip}:${port} -${MODE}"
/usr/sbin/ipvsadm -at ${VIP}:${SPORT} -r ${rip}:${port} -${MODE}
done
echo
done
echo
done
echo "当前LVS状态:"
/usr/sbin/ipvsadm -Ln
;;
stop)
/usr/sbin/ipvsadm -C
/usr/sbin/ipvsadm -Ln
;;
add)
date
echo "$LVS_IP_INF" | while read line;do
read NET_FACE VIP PROJ SPORT MODE < <(echo "$line" | awk -F"@" '{print $1 " " $2 " " $5 " " $6 " " $7}')
RIPS=`echo $(echo $line | awk -F"@" '{print $3}')`
PORTS=`echo $(echo $line | awk -F"@" '{print $4}')`
echo "添加LVS项目 ${PROJ}: 网卡---${NET_FACE} 虚拟IP---${VIP} 真实主机---${RIPS} 代理端口---${PORTS}"
for port in ${PORTS};do
/usr/sbin/ipvsadm -Ln | grep -v '-' | grep ${VIP}:${SPORT} > /dev/null
[ $? -eq 0 ] && echo $VIP:${SPORT}"已存在" && continue
echo "添加虚拟服务器记录 ipvsadm -At ${VIP}:${SPORT} -s rr"
/usr/sbin/ipvsadm -At ${VIP}:${SPORT} -s rr
for rip in ${RIPS};do
echo "添加真实服务器记录 ipvsadm -at ${VIP}:${SPORT} -r ${rip}:${port} -${MODE}"
/usr/sbin/ipvsadm -at ${VIP}:${SPORT} -r ${rip}:${port} -${MODE}
done
echo
done
echo
done
echo "当前LVS状态:"
/usr/sbin/ipvsadm -Ln
;;
*)
echo "Usage: $0 {start|stop|add}"
;;
esac
而后需要添加x执行权限
chmod +x /scripts/lvs.sh
配置systemd风格的启动脚本/usr/lib/systemd/system/addvip.service
and /usr/lib/systemd/system/keepalived.service
cat /usr/lib/systemd/system/addvip.service
[Unit]
Description=Add lvs vip
After=keepalived.service multi-user.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStart=/scripts/lvs.sh start
ExecStop=/scripts/lvs.sh stop
[Install]
WantedBy=multi-user.target
cat /usr/lib/systemd/system/keepalived.service
[Unit]
Description=LVS and VRRP High Availability Monitor
After=syslog.target network-online.target
[Service]
Type=forking
PIDFile=/var/run/keepalived.pid
KillMode=process
EnvironmentFile=-/etc/sysconfig/keepalived
ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS
ExecReload=/bin/kill -HUP $MAINPID
[Install]
WantedBy=multi-user.target
放行112端口,两台keepaliaved之间
-A INPUT -p 112 -s 10.10.240.143 -j ACCEPT
-A INPUT -p tcp -m tcp -m state --state NEW -m multiport --dports 880,6379 -m comment --comment "lvs" -j ACCEPT
-A INPUT -p 112 -s 10.10.240.144 -j ACCEPT
-A INPUT -p tcp -m tcp -m state --state NEW -m multiport --dports 880,6379 -m comment --comment "lvs" -j ACCEPT
启动
systemctl start keepalived.service addvip.service
启动后的ip会进行绑定到keepalived的master节点之上
[root@linuxea-vm-Node_10_10_240_144 /etc/keepalived]$ ip a
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 88:88:2f:f0:48:19 brd ff:ff:ff:ff:ff:ff
inet 10.10.240.144/8 brd 10.255.255.255 scope global dynamic eth0
valid_lft 84948sec preferred_lft 84948sec
inet 10.10.240.188/16 brd 172.25.255.255 scope global eth0:trans1
valid_lft forever preferred_lft forever
[root@linuxea-vm-Node_10_10_240_144 /etc/keepalived]$ ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.10.240.188:880 rr
-> 10.10.240.113:880 Route 1 0 0
-> 10.10.240.114:880 Route 1 0 0
TCP 10.10.240.188:6379 rr
-> 10.10.240.113:6379 Route 1 0 0
-> 10.10.240.114:6379 Route 1 0 0
配置4层负载
配置4层nginx代理后端的redis节点,便于测试,仅仅配置了一台redis,在配置nginx之前,需要将vip绑定到nginx层的lo网络接口上,如下:
[root@linuxea-vm-Node113 /etc/nginx/stream]# cat /scripts/lvs.sh
#!/bin/bash
#########################################################################
# File Name: /scripts/lvs.sh
# Author: LookBack
# Email: admin#dwhd.org
# Version:
# Created Time: 2018年10月13日 星期六 10时20分41秒
#########################################################################
VIP1=10.10.240.188
case "$1" in
start)
ip addr add ${VIP1}/32 brd $VIP1 dev lo label lo:0
ip route add $VIP1 dev lo:0
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
sysctl -p >/dev/null 2>&1
echo "RealServer Start OK"
;;
stop)
ip addr del ${VIP1}/32 brd $VIP1 dev lo label lo:0
ip route del $VIP1 dev lo:0
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce
echo "RealServer Stoped"
;;
*)
echo "Usage: $0 {start|stop}"
;;
esac
两台都需要绑定,而后如下:
[root@linuxea-vm-Node113 /etc/nginx/stream]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet 10.10.188.188/32 brd 10.10.188.188 scope global lo:0
valid_lft forever preferred_lft forever
inet 10.10.240.188/32 brd 10.10.240.188 scope global lo:0
valid_lft forever preferred_lft forever
[root@linuxea-vm-Node114 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet 10.10.188.188/32 brd 10.10.188.188 scope global lo:0
valid_lft forever preferred_lft forever
inet 10.10.240.188/32 brd 10.10.240.188 scope global lo:0
valid_lft forever preferred_lft forever
并且配置放行从lvs到nginx的端口
-A INPUT -s 10.10.240.0/24 -p tcp -m tcp -m state --state NEW -m multiport --dports 880,6379 -j ACCEPT
配置nginx
[root@linuxea-vm-Node113 ~]# cat docker-compose-nginx_vts.yml
version: '3'
services:
nginx_vts:
image: marksugar/nginx:v1.14.0-vts
container_name: nginx
restart: always
network_mode: "host"
volumes:
- /etc/nginx:/etc/nginx/
- /data/:/data/
environment:
- NGINXCONF=on
- NGINX_PORT=80
- SERVER_NAME=www.linuxea.net
- PHP_FPM_SERVER=127.0.0.1:9000
ports:
- "80"
修改添加/etc/nginx/nginx.conf一个字段
stream {
include stream/*.conf;
}
而后添加redis节点的upstream
[root@linuxea-vm-Node113 ~]# cat /etc/nginx/stream/redis.conf
upstream 6379 {
server 10.10.240.145:6379;
}
server {
listen 6379;
proxy_pass 6379;
}
配置redis
redis使用之前的编写的docker配置
[root@linuxea-vm-Node_10_10_240_145 /data1/redis]$ cat docker-compose-redis-4-0-11.yml
version: '2'
services:
redis:
image: marksugar/redis:4.0.11
container_name: redis
restart: always
network_mode: "host"
privileged: true
environment:
- REDIS_CONF=on
- REQUIREPASSWD=OTdmOWI4ZTM4NTY1M2M4OTZh
- MASTERAUTHPAD=OTdmOWI4ZTM4NTY1M2M4OTZh
- MAXCLIENTS_NUM=600
- MAXMEMORY_SIZE=4096
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/redis:/etc/redis
- /data/redis-data:/data/redis:Z
- /data/logs:/data/logs
测试
在redis本机访问lvs dr的vip代理的redis ip和端口。
[root@linuxea-vm-Node_10_10_240_145 /data1/redis]$ redis-cli -h 10.10.240.188 -p 6379 -a OTdmOWI4ZTM4NTY1M2M4OTZh info |grep cpu
used_cpu_sys:805.85
used_cpu_user:747.25
used_cpu_sys_children:0.00
used_cpu_user_children:0.00