数据库集群
DBA
关闭nginx
开启数据库
主从一致
无损模式
AFTR——SYNC
生产环境半同步开关需更大,保证无损
server1:
ps aux ##查看进程,有其他服务关闭
systemctl disable --now nginx
/etc/init.d/mysqld start
##启动数据库
mysql -pwestos
show master status;
+------------------+----------+--------------+------------------+-------------------+
| File | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set |
+------------------+----------+--------------+------------------+-------------------+
| mysql-bin.000004 | 154 | | | |
+------------------+----------+--------------+------------------+-------------------+
show processlist;
INSTALL PLUGIN rpl_semi_sync_master SONAME ‘semisync_master.so’;
SELECT PLUGIN_NAME,PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS WHERE PLUGIN_NAME LIKE ‘%semi%’;
SET GLOBAL rpl_semi_sync_master_enabled = 1;
show variables like ‘rpl%’;
server2,server3:
ps aux
/etc/init.d/mysqld start
mysql -pwestos
show Slave_IO_Running: Yes
Slave_SQL_Running: Yes
有一个不是yes
Last_IO_Error: Fatal error: The slave I/O thread stops because master and slave have equal MySQL server ids; these ids must be different for replication to work (or the --replicate-same-server-id option must be used on slave but this does not always make sense; please check the manual before using it).
查看错误
修改/etc/init.d/mysqld restart
后
slave status\G;
INSTALL PLUGIN rpl_semi_sync_slave SONAME ‘semisync_slave.so’;
SELECT PLUGIN_NAME,PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS WHERE PLUGIN_NAME LIKE ‘%semi%’;
SET GLOBAL rpl_semi_sync_slave_enabled = 1;
show variables like ‘rpl%’;
show status like ‘rpl%’;
SET GLOBAL rpl_semi_sync_slave_enabled =1;
STOP SLAVE IO_THREAD;
START SLAVE IO_THREAD;
show status like ‘rpl%’;
test:
server1:
use lxq
insert user_tb values (‘user4’,‘123’);
server2,3:
use lxq
select * from lxq.user_tb;
可查看数据
server2:
STOP SLAVE IO_THREAD;
server1:
insert user_tb values (‘user5’,‘123’);
正常执行无反应
需要保证每个主机都同步
server1:
set global rpl_semi_sync_master_wait_for_slave_count=2;
insert user_tb values (‘user7’,‘123’);
##有延迟
server2:
select * from lxq.user_tb;
查看不到
server3:
select * from lxq.user_tb;
可查看
热启用:
server2:
START SLAVE IO_THREAD;
show processlist;
STOP SLAVE SQL_THREAD;
set global slave_parallel_type=LOGICAL_CLOCK;
START SLAVE SQL_THREAD;
show variables like ‘master_info%’;
stop slave;
set global master_info_repository = ‘TABLE’;
show variables like ‘relay_%’;
set global relay_log_info_repository=‘TABLE’;
START slave;
set global slave_parallel_workers=16;
cd /usr/local/mysql/data
ls
mysql -pwestos
server3:
主配置文件
vim /etc/my.cnf
[mysqld]
datadir=/usr/local/mysql/data
socket=/usr/local/mysql/data/mysql.sock
symbolic-links=0
log-bin=mysql-bin
server-id=3
gtid_mode=ON
enforce-gtid-consistency=ON
slave-parallel-type=LOGICAL_CLOCK
slave-parallel-workers=16
relay_log_info_repository=TABLE
relay_log_recovery=ON
master_info_repository=TABLE
/etc/init.d/mysqld restart
mysql -pwestos
STOP SLAVE SQL_THREAD
CHANGE MASTER TO MASTER_DELAY=30;
START SLAVE SQL_THREAD
set global rpl_semi_sync_slave_enabled=1;
stop slave;
start slave;
show variables like ‘rpl%’;
test:
server1:
use lxq
insert into user_tb values (‘user7’,‘777’);
set global slow_query_log=ON;
show variables like ‘%slow%’;
show variables like ‘long%’;
server2,3:
select * from lxq.user_tb;
组复制:
server2,3;
/etc/init.d/mysqld stop ##关闭服务
cd /usr/local/mysql/data
ls
rm -rf * ##删除数据
vim /etc/my.cnf ##编辑主配置文件
[mysqld]
datadir=/usr/local/mysql/data
socket=/usr/local/mysql/data/mysql.sock
symbolic-links=0
disabled_storage_engines="MylSAM,BLACKHOLE,FEDERATED,ARCHIVE,MEMORY"
server_id=3
gtid_mode=ON
enforce_gtid_consistency=ON
master_info_repository=TABLE
relay_log_info_repository=TABLE
binlog_checksum=NONE
log_slave_updates=ON
log_bin=binlog
binlog_format=ROW
plugin_load_add='group_replication.so'
transaction_write_set_extraction=XXHASH64
group_replication_group_name="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
group_replication_start_on_boot=off
group_replication_local_address="172.25.9.3:33061"
group_replication_group_seeds="172.25.9.1:33061,172.25.9.2:33061,172.25.9.3:33061"
group_replication_bootstrap_group=off
group_replication_ip_whitelist="172.25.9.0/24,127.0.0.1/8"
group_replication_single_primary_mode=OFF
group_replication_enforce_update_everywhere_checks=ON
group_replication_allow_local_disjoint_gtids_join=ON
mysqld --initialize --user=mysql
/etc/init.d/mysqld start
mysql -p
alter user root@localhost identified by ‘westos’;
SET SQL_LOG_BIN=0;
CREATE USER rpl_user@’%’ IDENTIFIED BY ‘westos’;
GRANT REPLICATION SLAVE ON . TO rpl_user@’%’;
SET SQL_LOG_BIN=1;
FLUSH PRIVILEGES;
CHANGE MASTER TO MASTER_USER=‘rpl_user’,MASTER_PASSWORD=‘westos’ FOR CHANNEL ‘group_replication_recovery’;
SET GLOBAL group_replication_bootstrap_group=ON;
START GROUP_REPLICATION;
SET GLOBAL group_replication_bootstrap_group=OFF;
SELECT * FROM performance_schema.replication_group_members;
server2,3;
/etc/init.d/mysqld stop ##关闭服务
cd /usr/local/mysql/data
ls
rm -rf * ##删除数据
vim /etc/my.cnf ##编辑主配置文件
[mysqld]
datadir=/usr/local/mysql/data
socket=/usr/local/mysql/data/mysql.sock
symbolic-links=0
disabled_storage_engines="MylSAM,BLACKHOLE,FEDERATED,ARCHIVE,MEMORY"
server_id=3
gtid_mode=ON
enforce_gtid_consistency=ON
master_info_repository=TABLE
relay_log_info_repository=TABLE
binlog_checksum=NONE
log_slave_updates=ON
log_bin=binlog
binlog_format=ROW
plugin_load_add='group_replication.so'
transaction_write_set_extraction=XXHASH64
group_replication_group_name="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
group_replication_start_on_boot=off
group_replication_local_address="172.25.9.3:33061"
group_replication_group_seeds="172.25.9.1:33061,172.25.9.2:33061,172.25.9.3:33061"
group_replication_bootstrap_group=off
group_replication_ip_whitelist="172.25.9.0/24,127.0.0.1/8"
group_replication_single_primary_mode=OFF
group_replication_enforce_update_everywhere_checks=ON
group_replication_allow_local_disjoint_gtids_join=ON
mysqld --initialize --user=mysql
/etc/init.d/mysqld start
mysql -p
alter user root@localhost identified by ‘westos’;
SET SQL_LOG_BIN=0;
CREATE USER rpl_user@’%’ IDENTIFIED BY ‘westos’;
GRANT REPLICATION SLAVE ON . TO rpl_user@’%’;
SET SQL_LOG_BIN=1;
FLUSH PRIVILEGES;
CHANGE MASTER TO MASTER_USER=‘rpl_user’,MASTER_PASSWORD=‘westos’ FOR CHANNEL ‘group_replication_recovery’;
START GROUP_REPLICATION;
SELECT * FROM performance_schema.replication_group_members;
开启虚拟机四
server4:
rpm -ivh mysql-router-community-8.0.21-1.el7.x86_64.rpm
cd /etc/mysqlrouter/
vim mysqlrouter.conf
[routing:ro]
bind_address=0.0.0.0
bind_port=7001
destinations=172.25.9.1:3306,172.25.9.2:3306,172.25.9.3:3306
routing_strategy=round-robin
[routing:rw]
bind_address=0.0.0.0
bind_port=7002
destinations=172.25.9.1:3306,172.25.9.2:3306,172.25.9.3:3306
routing_strategy=first-available
systemctl start mysqlrouter.service
netstat -antlp
server1,2,3:
yum install lsof -y
lsof -i :3306
server1:
mysql -pwestos
create database test;
use test;
insert into t1 values (1,‘luis’);
select * from t1;
grant insert.update on test.* to user2@’%’ identified by ‘westos’;
flush privileges;
SELECT * FROM performance_schema.replication_group_members;
主机:
mysql -h 172.25.9.4 -P 7001 -u user1 -p
select * from test.t1;
server1,2,3:
yum install lsof
lsof -i :3306
##查看此服务由哪个主机提供
mysql -h 172.25.9.4 -P 7001 -u user2 -p
use test
insert into t1 values (2,‘lxq’);
select * from test.t1; ##user2不可以使用此权限
server3:
STOP GROUP_REPLICATION;
server2:
SELECT * FROM performance_schema.replication_group_members;
+---------------------------+--------------------------------------+-------------+-------------+--------------+
| CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE |
+---------------------------+--------------------------------------+-------------+-------------+--------------+
| group_replication_applier | 453bd81d-9c17-11eb-b6be-525400ffe223 | server1 | 3306 | ONLINE |
| group_replication_applier | b2330a0f-9c21-11eb-a1f6-525400b81b72 | server2 | 3306 | ONLINE |
+---------------------------+--------------------------------------+-------------+-------------+--------------+
主机:
show databases;
ERROR 2013 (HY000): Lost connection to MySQL server during query
MySQL [test]> show databases;
ERROR 2006 (HY000): MySQL server has gone away
No connection. Trying to reconnect...
Connection id: 44
Current database: test
+--------------------+
| Database |
+--------------------+
| information_schema |
| test |
+--------------------+
数据库高可用
节点有限:9个
MHA高可用(老,稳定,与mysql主从复制相似)
MHA集群架构
(二进制日志,重点)
数据库专属集群套件
无中心化,架构重量级,不够轻量,太过复杂
一主两从 架构还原
manage节点对所有节点免密
节点间也需要免密
检测必须通过
server4:
server1:
/etc/init.d/mysqld stop
cd /usr/local/mysql/data/
ls
rm -rf *
vim /etc/my.cnf
[mysqld]
datadir=/usr/local/mysql/data
socket=/usr/local/mysql/data/mysql.sock
symbolic-links=0
server_id=1
gtid_mode=ON
enforce_gtid_consistency=ON
master_info_repository=TABLE
relay_log_info_repository=TABLE
log_slave_updates=ON
log_bin=binlog
binlog_format=ROW
mysqld --initialize --user=mysql
/etc/init.d/mysqld start
mysql -p
alter user root@localhost identified by ‘westos’;
show master status;
grant replication slave on . to repl@’%’ identified by ‘westos’;
cd
yum install mha4mysql-node-0.58-0.el7.centos.noarch.rpm
server2,3:
/etc/init.d/mysqld stop
vim /etc/my.cnf
rm -rf *
ls
mysql --initialize --user=mysql
/etc/init.d/mysqld start
mysql -p
alter user root@localhost identified by ‘westos’;
change master to master_host=‘172.25.9.1’,master_user=‘repl’,master_password=‘westos’,master_auto_position=1;
start slave;
show slave status\G;
cd
yum install mha4mysql-node-0.58-0.el7.centos.noarch.rpm -y
server4:
systemctl stop mysqlrouter.service
cd MHA-7/
yum install *.rpm -y
scp mha4mysql-node-0.58-0.el7.centos.noarch.rpm server1:
scp mha4mysql-node-0.58-0.el7.centos.noarch.rpm server3:
scp mha4mysql-node-0.58-0.el7.centos.noarch.rpm server2:
ssh-keygen
ssh-copy-id server1
ssh-copy-id server2
ssh-copy-id server3
scp -r .ssh/ server2:
scp -r .ssh/ server3:
scp -r .ssh/ server1:
server4:
tar zxf mha4mysql-manager-0.58.tar.gz
cd mha4mysql-manager-0.58
ls
cd samples/
ls
cd conf/
mkdir /etc/masterha
cat app1.cnf masterha_default.cnf > /etc/masterha/app.cnf
cd /etc/masterha/
ls
vim app.cnf
[server default]
user=root
password=westos
ssh_user=root
repl_user=repl
repl_password=westos
master_binlog_dir=/usr/local/mysql/data
remote_workdir=/tmp
secondary_check_script=masterha_secondary_check -s 172.25.9.3 -s 172.25.9.250
ping_interval=3
master_ip_online_change_script=/script/masterha/master_ip_online_change
manager_workdir=/etc/masterha/app1
manager_log=/etc/masterha/app1/manager.log
[server1]
hostname=172.25.9.1
candidate_master=1
[server2]
hostname=172.25.9.2
candidate_master=1
[server3]
hostname=172.25.9.3
no_master=1
# master_ip_failover_script= /script/masterha/master_ip_failover
# shutdown_script= /script/masterha/power_manager
# report_script= /script/masterha/send_report
# master_ip_online_change_script= /script/masterha/master_ip_online_change
[root@server4 masterha]#
masterha_check_ssh --conf=/etc/masterha/app.cnf
##免密检查
grant all on . to root@’%’ identified by ‘westos’
masterha_check_repl --conf=/etc/masterha/app.cnf
##健康检查