LVS+keepalived高可用负载均衡(NAT模式)集群架构

目录

架构图

LVS+keepalived高可用负载均衡(NAT模式)集群架构

准备环境

  • client: 10.3.2.16
  • LVS01: ip1: 10.3.2.17 ip2: 192.168.52.128
  • LVS02: ip1: 10.3.2.97 ip2: 192.168.52.131
  • web01: 192.168.52.129
  • web02: 192.168.52.130
  • nfs-server: 192.168.52.133
  • VIP: 10.3.2.123

Keepalived配置

LVS01

[root@lvs01 ~]# yum -y install keepalived	//安装服务
[root@lvs01 ~]# vim /etc/keepalived/keepalived.conf	//更改配置文件

将配置文件中全部内容删除,键入以下内容配置文件内容

! Configuration File for keepalived         #配置文件声明,前面不得有空行或者空格
global_defs {
    router_id 1                             #设备在组中的标识
}

vrrp_instance VI_1 {                        #VI_1。实例名两台路由器相同。注意区分
    state MASTER                            #主或者从状态
    interface ens32                         #监控网卡
    mcast_src_ip 10.3.2.17            		#心跳源IP本机ip
    virtual_router_id 55                    #虚拟路由编号,主备要一致,注意区分
    priority 100                            #优先级
    advert_int 1                            #心跳间隔

    authentication {                        #秘钥认证(1-8位)
            auth_type PASS
            auth_pass 123456
    }

    virtual_ipaddress {                     #VIP
            10.3.2.123/20
    }
}
vrrp_instance VI_2 {
    state MASTER
    interface ens33
    mcast_src_ip 192.168.52.128
    virtual_router_id 56
    priority 100
    advert_int 1

    authentication {
            auth_type PASS
            auth_pass 123456
    }

    virtual_ipaddress {
            192.168.52.100/24
    }
}
[root@lvs01 ~]# systemctl start keepalived.service 
[root@lvs01 ~]# systemctl enable keepalived.service

LVS02

[root@lvs02 ~]# yum -y install keepalived	//安装服务
[root@lvs02 ~]# vim /etc/keepalived/keepalived.conf	//更改配置文件

将配置文件中全部内容删除,键入以下内容配置文件内容

! Configuration File for keepalived
global_defs {
    router_id 2
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens32
    mcast_src_ip 10.3.2.97
    virtual_router_id 55
    priority 99
    advert_int 1

    authentication {
            auth_type PASS
            auth_pass 123456
    }

    virtual_ipaddress {
            10.3.2.123/20
    }
}	
vrrp_instance VI_2 {
    state BACKUP
    interface ens33
    mcast_src_ip 192.168.52.131
    virtual_router_id 56
    priority 99
    advert_int 1

    authentication {
            auth_type PASS
            auth_pass 123456
    }

    virtual_ipaddress {
            192.168.52.100/24
    }
}
[root@lvs02 ~]# systemctl start keepalived.service 
[root@lvs02 ~]# systemctl enable keepalived.service

LVS配置

LVS01

[root@lvs01 ~]# echo 1 > /proc/sys/net/ipv4/ip_forward	//开启路由功能
[root@lvs01 ~]# yum -y install ipvsadm	//安装IP虚拟服务管理器
[root@lvs01 ~]# ipvsadm -A -t 10.3.2.123:80 -s rr	//-A:对外提供的地址 -t:TCP协议 -s:策略 rr:轮询策略
[root@lvs01 ~]# ipvsadm -a -t 10.3.2.123:80 -r 192.168.52.130 -m -w 1	//-a:对内的地址 -r:真实服务器地址 -m:nat模式 -w:权重为1
[root@lvs01 ~]# ipvsadm -a -t 10.3.2.123:80 -r 192.168.52.192 -m -w 1

查询配置结果

[root@lvs01 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.3.2.123:80 rr
  -> 192.168.52.129:80            Masq    1      0          0         
  -> 192.168.52.130:80            Masq    1      0          0  

[root@lvs01 ~]# systemctl restart keepalived.service	//重启keepalived

LVS02

[root@lvs02 ~]# echo 1 > /proc/sys/net/ipv4/ip_forward	//开启路由功能
[root@lvs02 ~]# yum -y install ipvsadm	//安装IP虚拟服务管理器
[root@lvs02 ~]# ipvsadm -A -t 10.3.2.123:80 -s rr	//-A:对外提供的地址 -t:TCP协议 -s:策略 rr:轮询策略
[root@lvs02 ~]# ipvsadm -a -t 10.3.2.123:80 -r 192.168.52.130 -m -w 1	//-a:对内的地址 -r:真实服务器地址 -m:nat模式 -w:权重为1
[root@lvs02 ~]# ipvsadm -a -t 10.3.2.123:80 -r 192.168.52.192 -m -w 1

查询配置结果

[root@lvs01 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.3.2.123:80 rr
  -> 192.168.52.129:80            Masq    1      0          0         
  -> 192.168.52.130:80            Masq    1      0          0 

[root@lvs02 ~]# systemctl restart keepalived.service	//重启keepalived

中断测试

两台web服务器安装nginx

web1

[root@web01 ~]# echo web1 > /usr/share/nginx/html/index.html	//添加测试页
[root@web01 ~]# route add -net 10.3.0.0/20 gw 192.168.52.100	//添加路由

web2

[root@web01 ~]# echo web2 > /usr/share/nginx/html/index.html	//添加测试页
[root@web01 ~]# route add -net 10.3.0.0/20 gw 192.168.52.100	//添加路由

client*问

[root@client ~]# elinks --dump http://10.3.2.123
   web1
[root@client ~]# elinks --dump http://10.3.2.123
   web2

//测试成功

nfs-server服务器

安装nfs-utils

[root@nfs-server ~]# yum -y install nf-utils
[root@nfs-server ~]# mkdir /webdata	//创建存放网站代码目录
[root@nfs-server ~]# echo "nfs-test..." > /webdata/index.html

配置nfs服务器

[root@nfs-server ~]# vim /etc/exports
//在文件中输入以下数据
/webdata 192.168.52.0/24 (rw)

[root@nfs-server ~]# systemctl start nfs-server.service	//启动nfs服务
[root@nfs-server ~]# exportfs -v	//检查nfs输出是否正常
/webdata      	192.168.52.0/24(sync,wdelay,hide,no_subtree_check,sec=sys,ro,secure,root_squash,no_all_squash)
/webdata      	<world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,root_squash,no_all_squash)

nfs客户端配置

web001

[root@web01 ~]# yum -y install nfs-utils	//安装nfs-utils
[root@web01 ~]# showmount -e 192.168.52.133	//检查存储端共享
Export list for 192.168.52.133:
/webdata (everyone)

[root@web01 ~]# mount -t nfs 192.168.52.133:/webdata /usr/share/nginx/html/	//手动挂载

web002

[root@web02 ~]# yum -y install nfs-utils	//安装nfs-utils
[root@web02 ~]# showmount -e 192.168.52.133	//检查存储端共享
Export list for 192.168.52.133:
/webdata (everyone)

[root@web02 ~]# mount -t nfs 192.168.52.133:/webdata /usr/share/nginx/html/	//手动挂载

测试

在client中访问

[root@client ~]# elinks --dump http://10.3.2.123
   nfs-test...

成功奈斯

上一篇:Android4.0设置界面改动总结(三)


下一篇:Javascript通过className选择元素