1、涉及机器
192.168.60.11 node1.ha.com
192.168.60.12 node2.ha.com
2、安装配置corosync、pacemaker
yum -y install corosync pacemaker
cd /etc/corosync
cp corosync.conf.example corosync.conf
grep -v ^# /etc/corosync/corosync.conf | grep -v .*# | grep -v ^$
compatibility: whitetank
totem {
version: 2
secauth: on
threads: 0
interface {
ringnumber: 0
bindnetaddr: 192.168.60.0
mcastaddr: 239.255.1.1
mcastport: 5405
ttl: 1
}
}
logging {
fileline: off
to_stderr: no
to_logfile: yes
logfile: /var/log/cluster/corosync.log
to_syslog: no
debug: off
timestamp: on
logger_subsys {
subsys: AMF
debug: off
}
}
service {
ver: 0
name: pacemaker
}
aisexec {
user: root
group: root
}
corosync-keygen
scp corosync.conf authkey root@node2.ha.com:/etc/corosync
service corosync start
3、验证corosync、pacemaker是否正常启动
验证corosync是否正常启动
[root@node1 corosync]# grep -e "Corosync Cluster Engine" -e "configuration file" /var/log/cluster/corosync.log
Jan 10 08:46:12 corosync [MAIN ] Corosync Cluster Engine ('1.4.7'): started and ready to provide service.
Jan 10 08:46:12 corosync [MAIN ] Successfully read main configuration file '/etc/corosync/corosync.conf'.
验证pacemaker是否正常启动
[root@node1 corosync]# grep pcmk_startup /var/log/cluster/corosync.log
Jan 10 08:46:12 corosync [pcmk ] info: pcmk_startup: CRM: Initialized
Jan 10 08:46:12 corosync [pcmk ] Logging: Initialized pcmk_startup
Jan 10 08:46:12 corosync [pcmk ] info: pcmk_startup: Maximum core file size is: 18446744073709551615
Jan 10 08:46:12 corosync [pcmk ] info: pcmk_startup: Service: 9
Jan 10 08:46:12 corosync [pcmk ] info: pcmk_startup: Local hostname: node1.ha.com
4、安装配置crmsh
wget ftp://ftp.pbone.net/mirror/ftp5.gwdg.de/pub/opensuse/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/crmsh-1.2.6-0.rc2.2.1.x86_64.rpm
wget ftp://ftp.pbone.net/mirror/ftp5.gwdg.de/pub/opensuse/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/pssh-2.3.1-2.1.x86_64.rpm
yum -y install pssh-2.3.1-2.1.x86_64.rpm crmsh-1.2.6-0.rc2.2.1.x86_64.rpm
调低pacemaker对应版本
cibadmin --modify --xml-text '<cib validate-with="pacemaker-1.2"/>'
关闭stonish
crm configure property stonith-enabled=false
忽略法定票数限制,不达法定票数资源也可以转移
crm configure property no-quorum-policy=ignore
设置资源默认粘性值
crm configure rsc_defaults resource-stickiness=100
[root@node1 corosync]# crm status
Stack: classic openais (with plugin)
Current DC: node2.ha.com (version 1.1.18-3.el6-bfe4e80420) - partition with quorum
Last updated: Wed Jan 16 13:44:07 2019
Last change: Wed Jan 16 13:43:38 2019 by hacluster via cibadmin on node2.ha.com
2 nodes configured (2 expected votes)
0 resources configured
Online: [ node1.ha.com node2.ha.com ]
No active resources
[root@node1 corosync]#
5、安装配置DRBD
ls /sys/class/scsi_host/
echo "- - -" > /sys/class/scsi_host/host0/scan
echo "- - -" > /sys/class/scsi_host/host1/scan
echo "- - -" > /sys/class/scsi_host/host2/scan
wget ftp://rpmfind.net/linux/atrpms/el6-x86_64/atrpms/stable/drbd-8.4.3-33.el6.x86_64.rpm
wget ftp://rpmfind.net/linux/atrpms/el6-x86_64/atrpms/stable/drbd-kmdl-2.6.32-431.el6-8.4.3-33.el6.x86_64.rpm
rpm -ivh drbd-8.4.3-33.el6.x86_64.rpm drbd-kmdl-2.6.32-431.el6-8.4.3-33.el6.x86_64.rpm
grep -v ^# /etc/drbd.d/global_common.conf | grep -v .*#
global {
usage-count no;
}
common {
handlers {
pri-on-incon-degr "/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";
pri-lost-after-sb "/usr/lib/drbd/notify-pri-lost-after-sb.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";
local-io-error "/usr/lib/drbd/notify-io-error.sh; /usr/lib/drbd/notify-emergency-shutdown.sh; echo o > /proc/sysrq-trigger ; halt -f";
}
startup {
}
options {
}
disk {
on-io-error detach;
}
net {
protocol C;
cram-hmac-alg "sha1";
shared-secret "drbdsecret";
}
syncer {
rate 1000M;
}
}
cat /etc/drbd.d/web.res
resource web {
on node1.ha.com {
device /dev/drbd0;
disk /dev/sdb;
address 192.168.60.11:7789;
meta-disk internal;
}
on node2.ha.com {
device /dev/drbd0;
disk /dev/sdb;
address 192.168.60.12:7789;
meta-disk internal;
}
}
cd /etc/drbd.d/
scp global_common.conf web.res root@node2.ha.com:/etc/drbd.d
6、初始化资源并启动服务、查看状态(所有节点都要执行)
drbdadm create-md web
/etc/init.d/drbd start
drbd-overview(cat /proc/drbd)
drbdadm primary --force web
7、在主节点上创建文件系统并挂载
mke2fs -t ext4 /dev/drbd0
mount /dev/drbd0 /mnt
umount /mnt
drbdadm secondary web
drbd-overview
/etc/init.d/drbd stop
chkconfig drbd off
8、DRBD角色自动切换配置
[root@node2 ~]# crm
crm(live)# configure
crm(live)configure# primitive mydrbd ocf:linbit:drbd params drbd_resource="web" op monitor role=Slave interval=20s timeout=20s op monitor role=Master interval=10s timeout=20s op start timeout=240s op stop timeout=100s
crm(live)configure# verify
crm(live)configure# ms ms_mydrbd mydrbd meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
crm(live)configure# verify
crm(live)configure# commit
crm(live)configure# cd ..
crm(live)# status
Stack: classic openais (with plugin)
Current DC: node1.ha.com (version 1.1.18-3.el6-bfe4e80420) - partition with quorum
Last updated: Thu Jan 17 15:12:05 2019
Last change: Thu Jan 17 15:07:03 2019 by root via cibadmin on node1.ha.com
2 nodes configured (2 expected votes)
2 resources configured
Online: [ node1.ha.com node2.ha.com ]
Active resources:
Master/Slave Set: ms_mydrbd [mydrbd]
Masters: [ node1.ha.com ]
Slaves: [ node2.ha.com ]
crm(live)# node
crm(live)node# standby node1.ha.com
crm(live)node# cd ..
crm(live)# status
Stack: classic openais (with plugin)
Current DC: node1.ha.com (version 1.1.18-3.el6-bfe4e80420) - partition with quorum
Last updated: Thu Jan 17 15:12:43 2019
Last change: Thu Jan 17 15:12:41 2019 by root via crm_attribute on node1.ha.com
2 nodes configured (2 expected votes)
2 resources configured
Node node1.ha.com: standby
Online: [ node2.ha.com ]
Active resources:
Master/Slave Set: ms_mydrbd [mydrbd]
Masters: [ node2.ha.com ]
crm(live)# node
crm(live)node# online node1.ha.com
crm(live)node# cd ..
crm(live)# status
Stack: classic openais (with plugin)
Current DC: node1.ha.com (version 1.1.18-3.el6-bfe4e80420) - partition with quorum
Last updated: Thu Jan 17 15:12:59 2019
Last change: Thu Jan 17 15:12:56 2019 by root via crm_attribute on node1.ha.com
2 nodes configured (2 expected votes)
2 resources configured
Online: [ node1.ha.com node2.ha.com ]
Active resources:
Master/Slave Set: ms_mydrbd [mydrbd]
Masters: [ node2.ha.com ]
Slaves: [ node1.ha.com ]
crm(live)#
crm(live)configure# primitive myfs ocf:heartbeat:Filesystem params device=/dev/drbd0 directory=/mnt fstype="ext4" op monitor interval=20s timeout=40s op start timeout=60s op stop timeout=60s
crm(live)configure# verify
crm(live)configure# colocation myfs_with_ms_mydrbd_master inf: myfs ms_mydrbd:Master
crm(live)configure# verify
crm(live)configure# order ms_mydrbd_master_before_myfs inf: ms_mydrbd:promote myfs:start
crm(live)configure# verify
crm(live)configure# commit
crm(live)configure# cd ..
crm(live)# status
Stack: classic openais (with plugin)
Current DC: node1.ha.com (version 1.1.18-3.el6-bfe4e80420) - partition with quorum
Last updated: Thu Jan 17 15:15:08 2019
Last change: Thu Jan 17 15:14:58 2019 by root via cibadmin on node1.ha.com
2 nodes configured (2 expected votes)
3 resources configured
Online: [ node1.ha.com node2.ha.com ]
Active resources:
Master/Slave Set: ms_mydrbd [mydrbd]
Masters: [ node2.ha.com ]
Slaves: [ node1.ha.com ]
myfs (ocf::heartbeat:Filesystem): Started node2.ha.com
crm(live)# node
crm(live)node# standby node2.ha.com
crm(live)node# cd ..
crm(live)# status
Stack: classic openais (with plugin)
Current DC: node1.ha.com (version 1.1.18-3.el6-bfe4e80420) - partition with quorum
Last updated: Thu Jan 17 15:16:10 2019
Last change: Thu Jan 17 15:16:06 2019 by root via crm_attribute on node1.ha.com
2 nodes configured (2 expected votes)
3 resources configured
Node node2.ha.com: standby
Online: [ node1.ha.com ]
Active resources:
Master/Slave Set: ms_mydrbd [mydrbd]
Masters: [ node1.ha.com ]
myfs (ocf::heartbeat:Filesystem): Started node1.ha.com
crm(live)# node
crm(live)node# online node2.ha.com
crm(live)node# cd ..
crm(live)# status
Stack: classic openais (with plugin)
Current DC: node1.ha.com (version 1.1.18-3.el6-bfe4e80420) - partition with quorum
Last updated: Thu Jan 17 15:16:30 2019
Last change: Thu Jan 17 15:16:21 2019 by root via crm_attribute on node1.ha.com
2 nodes configured (2 expected votes)
3 resources configured
Online: [ node1.ha.com node2.ha.com ]
Active resources:
Master/Slave Set: ms_mydrbd [mydrbd]
Masters: [ node1.ha.com ]
Slaves: [ node2.ha.com ]
myfs (ocf::heartbeat:Filesystem): Started node1.ha.com
crm(live)#