sh# 新建一个配置文件存放目录
[root@rocky7 sh]#mkdir /etc/haproxy/conf.d
# 修改services文件
[root@rocky7 sh]#vim /lib/systemd/system/haproxy.service
1 [Unit]
2 Description=HAProxy Load Balancer
3 After=syslog.target network.target
4
5 [Service]
# 以下两行添加-f /etc/haproxy/conf.d/即可,
6 ExecStartPre=/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -f /etc/haproxy/conf.d/ -c -q
7 ExecStart=/usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -f /etc/haproxy/conf.d/ -p /var/lib/haproxy/haproxy.pid
8 ExecReload=/bin/kill -USR2
9
10 [Install]
11 WantedBy=multi-user.target
# 重新载入services
[root@rocky7 sh]#systemctl daemon-reload
sh# 新建一个文件
[root@rocky7 conf.d]#vim ha-four.cfg
listen web_http_nodes
bind 0.0.0.0:80
mode tcp #不支持http协议
balance roundrobin
server web1 10.0.0.136:80 send-proxy check inter 3000 fall 3 rise 5
# 重启服务
[root@rocky7 conf.d]#systemctl restart haproxy.service
sh#修改log_format记录日志格式,$proxy_protocol_addr为客户端地址
log_format main '$remote_addr - $remote_user [$time_local] "$request" "$proxy_protocol_addr"';
server {
listen 80 proxy_protocol; #添加此行后将不能直接访问这个nginx页面
server_name localhost;
access_log logs/access.log main; #日志记录文件
location / {
root html;
index index.html index.htm;
}
sh# 查看镜像,这里的镜像是基于alpine编译安装的nginx1.20
[root@rocky7 ~]#docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
alpine-nginx-1.20 v1.1 c9887509b772 6 days ago 212MB
# 运行两个容器
[root@rocky7 ~]#docker run -d --name web01 -p 5555:80 alpine-nginx-1.20:v1.1
[root@rocky7 ~]#docker run -d --name web03 -p 7777:80 alpine-nginx-1.20:v1.1
# 修改默认页面,两个页面只有
[root@rocky7 ~]#docker exec -it web01 sh
/ # vi /data/nginx/index.html
/ # nginx -t
nginx: the configuration file /apps/nginx/conf/nginx.conf syntax is ok
nginx: configuration file /apps/nginx/conf/nginx.conf test is successful
/ # nginx -s reload
/ # curl 127.0.0.1
<h1>this is web01 html</h1>
/ # exit
# 测试两个页面是可以否正常
[root@rocky7 ~]#curl 10.0.0.140:5555
<h1>this is web01 html</h1>
[root@rocky7 ~]#curl 10.0.0.140:7777
<h1>this is web03 html</h1>
sh# haproxy 配置:
listen web_http_nodes
bind 0.0.0.0:80
mode http
log global
balance roundrobin
#server web1 10.0.0.136:80 send-proxy check inter 3000 fall 3 rise 5 #添加send-proxy
server web01 10.0.0.140:5555 check inter 3000 fall 2 rise 5
server web03 10.0.0.140:7777 check inter 3000 fall 2 rise 5
# 验证
[root@rocky7 conf.d]#systemctl restart haproxy.service
[root@rocky7 conf.d]#curl 10.0.0.140
<h1>this is web01 html</h1>
[root@rocky7 conf.d]#curl 10.0.0.140
<h1>this is web03 html</h1>
[root@rocky7 conf.d]#curl 10.0.0.140
<h1>this is web01 html</h1>
[root@rocky7 conf.d]#curl 10.0.0.140
<h1>this is web03 html</h1>
[root@rocky7 conf.d]#curl 10.0.0.140
<h1>this is web01 html</h1>
[root@rocky7 conf.d]#curl 10.0.0.140
<h1>this is web03 html</h1>
[root@rocky7 conf.d]#curl 10.0.0.140
<h1>this is web01 html</h1>
[root@rocky7 conf.d]#curl 10.0.0.140
<h1>this is web03 html</h1>
# 这里是轮巡,所以不会会话保持
shlisten web_http_nodes
bind 0.0.0.0:80
mode http
log global
balance roundrobin
cookie WEBSRV insert nocache indirect
#server web1 10.0.0.136:80 send-proxy check inter 3000 fall 3 rise 5 #添加send-proxy
server web01 10.0.0.140:5555 check inter 3000 fall 2 rise 5 cookie web01
server web03 10.0.0.140:7777 check inter 3000 fall 2 rise 5 cookie web03
----------------------------第一次访问---------------------------------------
----------------------------第二次访问---------------------------------------
sherrorfile 503 /data/http/503.html
shmkdir /data/http && cd /data/http
cat 503.html
HTTP/1.0 503 Service Unavailable
Cache-Control: no-cache
Connection: close
Content-Type: text/html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>报错页面</title>
</head>
<body>
<center><h1>网站维护中......请稍候再试</h1></center>
<center><h2>联系电话:400-123-4567</h2></center>
<center><h3>503 Service Unavailable</h3></center>
</body>
提示
要想测试出503最快的方法就是关闭后端服务器,我这里是docker启动的容器,所以 docker stop container-name就可以了
提示
虽然haproxy支持https,但是基于性能考虑,生成中证书是在后端服务器比如nginx上实现。 即用户到haproxy利用tcp模式,再到后端服务器
废话不多说,开整
sh# 创建证书存放目录并进入目录中
[root@rocky7 haproxy]#mkdir /etc/haproxy/certs
[root@rocky7 haproxy]#cd /etc/haproxy/certs
# 生成证书文件,并合并为pem文件
[root@rocky7 certs]#openssl req -newkey rsa:4096 -nodes -sha256 -keyout https.haproxy.org.key -x509 -days 3650 -out https.haproxy.org.crt
Generating a RSA private key
..................................................................++++
..............................................................++++
writing new private key to 'https.haproxy.org.key'
-----
You are about to be asked to enter information that will be incorporated
into your certificate request.
What you are about to enter is what is called a Distinguished Name or a DN.
There are quite a few fields but you can leave some blank
For some fields there will be a default value,
If you enter '.', the field will be left blank.
-----
Country Name (2 letter code) [XX]:CN
State or Province Name (full name) []:shanxi
Locality Name (eg, city) [Default City]:taiyuan
Organization Name (eg, company) [Default Company Ltd]:
Organizational Unit Name (eg, section) []:
Common Name (eg, your name or your server's hostname) []:https.haproxy.org
Email Address []:
[root@rocky7 certs]#cat https.haproxy.org.key https.haproxy.org.crt > https.haproxy.org.pem
shlisten web_http_nodes
bind 0.0.0.0:80
# 在之前配置文件原有的基础上加两条配置,一条是打开443端口,指定ssl配置文件,一条是访问80重定向到443的配置
bind 0.0.0.0:443 ssl crt /etc/haproxy/certs/https.haproxy.org.pem
redirect scheme https if !{ ssl_fc }
mode http
log global
balance roundrobin
cookie WEBSRV insert nocache indirect
#server web1 10.0.0.136:80 send-proxy check inter 3000 fall 3 rise 5 #添加send-proxy
server web01 10.0.0.140:5555 check inter 3000 fall 2 rise 5 cookie web01
server web03 10.0.0.140:7777 check inter 3000 fall 2 rise 5 cookie web03
提示
haproxy默认使用的mode是http,不写就是默认,使用四层时必须修改为mode为tcp
sh#主配置文件添加一行错误页面定义路径即可,这一个定义页面所有haproxy代理的web都通用。
errorfile 503 /data/http/503.html
#503.html文件中如下两行必须有
HTTP/1.0 503 Service Unavailable
Content-Type: text/html
shell#最重要的配置就是指定SSL路径的这条配置了
bind 0.0.0.0:443 ssl crt /etc/haproxy/certs/https.haproxy.org.pem
#http自动跳转https根据需求添加
redirect scheme https if !{ ssl_fc }
环境
ka1:10.0.0.140
ka2:10.0.0.136
web:10.0.0.130
VIP:10.0.0.190
sh# ka1配置
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51
priority 100 # 优先级为100,优先级高
advert_int 1
nopreempt # 非抢占配置
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.190 #漂移VIP
}
unicast_src_ip 10.0.0.140 # 本机IP
unicast_peer{
10.0.0.136 # 另一个ka服务器的ip地址,可以写多个
}
# ka2配置
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51
priority 88 # 优先级为88,优先级低
advert_int 1
#nopreempt # 生产环境中ka2是抢占式,所以不用配置
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.190
}
unicast_src_ip 10.0.0.136 # 本机IP
unicast_peer{
10.0.0.140 # 另一台ka服务器的ip地址
}
sh[root@rocky4 conf.d]#tcpdump -i eth0 -nn src host 10.0.0.140 and dst host 10.0.0.136
dropped privs to tcpdump
tcpdump: verbose output suppressed, use -v or -vv for full protocol decode
listening on eth0, link-type EN10MB (Ethernet), capture size 262144 bytes
12:36:07.816161 IP 10.0.0.140 > 10.0.0.136: ICMP 10.0.0.140 protocol 112 unreachable, length 48
12:36:08.172068 ARP, Request who-has 10.0.0.136 tell 10.0.0.140, length 46
12:36:08.817709 IP 10.0.0.140 > 10.0.0.136: ICMP 10.0.0.140 protocol 112 unreachable, length 48
12:36:09.818372 IP 10.0.0.140 > 10.0.0.136: ICMP 10.0.0.140 protocol 112 unreachable, length 48
12:36:10.819776 IP 10.0.0.140 > 10.0.0.136: ICMP 10.0.0.140 protocol 112 unreachable, length 48
12:36:11.821513 IP 10.0.0.140 > 10.0.0.136: ICMP 10.0.0.140 protocol 112 unreachable, length 48
12:36:12.821687 IP 10.0.0.140 > 10.0.0.136: ICMP 10.0.0.140 protocol 112 unreachable, length 48
12:36:13.822643 IP 10.0.0.140 > 10.0.0.136: ICMP 10.0.0.140 protocol 112 unreachable, length 48
12:36:14.823823 IP 10.0.0.140 > 10.0.0.136: ICMP 10.0.0.140 protocol 112 unreachable, length 48
12:36:15.824337 IP 10.0.0.140 > 10.0.0.136: ICMP 10.0.0.140 protocol 112 unreachable, length 48
12:36:16.825498 IP 10.0.0.140 > 10.0.0.136: ICMP 10.0.0.140 protocol 112 unreachable, length 48
sh# 我们使用第三台主机pingVIP地址,然后停止正在使用VIP的ka主机,VIP会自动漂移到另一台ka主机上,看ping会不会丢包
1. 开始ping VIP 10.0.0.190
2. 这里看到VIP还是在IP是10.0.0.140的ka1主机上
3. 停止keepalived服务
4. 发现VIP已经没有了,但是右侧ping的过程丢了一个包,说明VIP已经漂移到ka2上了。
5. ka1主机的keepalived优先级高,但是启动以后VIP并没有漂移回来,说明非抢占式配置成功
在上面配置的基础上添加上邮件配置,并设置好脚本即可
js#!/bin/bash
#
#*********************************************
#Author: zhang
#QQ: 200957079
#URL: ztunan.top
#Date: 2023-03-21
#Filename: notify.sh
#Description: The test script
#*********************************************
contact='200957079@qq.com'
email_send='xiaowei200957079@163.com'
email_passwd='替换成自己的授权码'
email_smtp_server='smtp.163.com'
. /etc/os-release
msg_error() {
echo -e "\033[1;31m$1\033[0m"
}
msg_info() {
echo -e "\033[1;32m$1\033[0m"
}
msg_warn() {
echo -e "\033[1;33m$1\033[0m"
}
color () {
RES_COL=60
MOVE_TO_COL="echo -en \\033[${RES_COL}G"
SETCOLOR_SUCCESS="echo -en \\033[1;32m"
SETCOLOR_FAILURE="echo -en \\033[1;31m"
SETCOLOR_WARNING="echo -en \\033[1;33m"
SETCOLOR_NORMAL="echo -en \E[0m"
echo -n "$1" && $MOVE_TO_COL
echo -n "["
if [ $2 = "success" -o $2 = "0" ] ;then
${SETCOLOR_SUCCESS}
echo -n $" OK "
elif [ $2 = "failure" -o $2 = "1" ] ;then
${SETCOLOR_FAILURE}
echo -n $"FAILED"
else
${SETCOLOR_WARNING}
echo -n $"WARNING"
fi
${SETCOLOR_NORMAL}
echo -n "]"
echo
}
install_sendemail () {
if [[ $ID =~ rhel|centos|rocky ]];then
rpm -q sendemail &> /dev/null || yum install -y sendemail
elif [ $ID = 'ubuntu' ];then
dpkg -l |grep -q sendemail || { apt update; apt install -y libiosocket-ssl-perl libnet-ssleay-perl sendemail ; }
else
color "不支持此操作系统,退出!" 1
exit
fi
}
send_email () {
local email_receive="$1"
local email_subject="$2"
local email_message="$3"
sendemail -f $email_send -t $email_receive -u $email_subject -m $email_message -s $email_smtp_server -o message-charset=utf-8 -o tls=yes -xu $email_send -xp $email_passwd
[ $? -eq 0 ] && color "邮件发送成功!" 0 || color "邮件发送失败!" 1
}
notify() {
if [[ $1 =~ ^(master|backup|fault)$ ]];then
mailsubject="$(hostname) to be $1, vip floating"
mailbody="$(date +'%F %T'): vrrp transition, $(hostname) changed to be
$1"
send_email "$contact" "$mailsubject" "$mailbody"
else
echo "Usage: $(basename $0) {master|backup|fault}"
exit 1
fi
}
install_sendemail
notify $1
sh# 必须在vrrp_instance VI_1模块中添加
vim /etc/keepalived/keepalived.conf
....
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51
priority 100
advert_int 1
nopreempt
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.190
}
unicast_src_ip 10.0.0.140
unicast_peer{
10.0.0.136
}
notify_master "/etc/keepalived/notify.sh master"
notify_backup "/etc/keepalived/notify.sh backup"
notify_fault "/etc/keepalived/notify.sh fault"
}
....
sh# 脚本授权
[root@rocky7 ~]#chmod +x /etc/keepalived/notify.sh
# 重启keepalived,系统会发送两封邮件
[root@rocky7 ~]#systemctl restart keepalived
环境
ka1:10.0.0.140
ka2:10.0.0.136
VIP:10.0.0.190
web1:10.0.0.130
web2:10.0.0.138
sh###########修改服务器内核参数,忽略网络中的IP冲突################
#!/bin/bash
#Author:zhangwei
#Date:2018-08-13
vip=10.0.0.190
mask='255.255.255.255'
dev=lo:1
case $1 in
start)
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
ifconfig $dev $vip netmask $mask
echo "The RS Server is Ready!"
;;
stop)
ifconfig $dev down
echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce
echo "The RS Server is Canceled!"
;;
*)
echo "Usage: $(basename $0) start|stop"
exit 1
;;
esac
#执行脚本
[root@rocky4 sh]#bash lvs_dr_rs.sh start
The RS Server is Ready!
#安装httpd服务,设置首页
[root@rocky4 sh]#yum -y install httpd
[root@rocky4 sh]#hostname -I > /var/www/html/indes.html
[root@rocky4 sh]#systemctl enable --now httpd
[root@rocky4 sh]#curl 10.0.0.130
10.0.0.130
sh#在主配置文件下添加以下配置信息
virtual_server 10.0.0.190 80 { #VIP地址和port
delay_loop 3 #检查后端服务器的时间间隔
lb_algo rr #定义调度算法为rr
lb_kind DR #集群的类型为DR,注意要大写
protocol TCP #指定服务协议,一般为TCP
sorry_server 127.0.0.1 80 #当所有RS故障时备用地址
real_server 10.0.0.130 80 { #RS的IP和端口
weight 1 #RS主机的权重
HTTP_GET { #应用层检测
url {
path / #定义监控的url
status_code 200 #定义url检测健康的响应码,一般为200
}
connect_timeout 1 #客户端请求的超时时长
nb_get_retry 3 #重试次数
delay_before_retry 1 #重试之前的延迟时间
}
}
real_server 10.0.0.138 80 {
weight 1
TCP_CHECK { #另一台主机使用TCP检测
connect_timeout 5
nb_get_retry 3
delay_before_retry 3
connect_port 80 ##向当前RS的哪个PORT发起健康状态检测请求
}
}
}
sh[root@rocky2 ~]#systemctl restart keepalived
[root@rocky2 ~]#yum -y install ipvsadm
[root@rocky2 ~]#ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.0.0.190:80 rr
-> 10.0.0.130:80 Route 1 0 0
-> 10.0.0.138:80 Route 1 0 0
#这里看到两个lvs服务器已经被keepalived主机配置成功
2. VIP在ka2上测试
3. 测试通过,到此、keepalived+lvs实现了负载均衡及高可用功能。
本文作者:笑一个吧~
本文链接:
版权声明:本博客所有文章除特别声明外,均采用 本文为博主「笑一个吧~」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。 许可协议。转载请注明出处!