MYSQL备份,主从,高可用实验
MYSQL备份,主从,高可用实验

MYSQL备份,主从,高可用实验

MYSQL冷备实验,脚本

#关闭mysql服务
[root@localhost ~]#systemctl stop mysqld.service;systemctl status mysqld.service 
● mysqld.service - MySQL 8.0 database server
   Loaded: loaded (/usr/lib/systemd/system/mysqld.service; disabled; vendor preset: disabled)
   Active: inactive (dead)

Aug 31 21:16:27 localhost.localdomain systemd[1]: Starting MySQL 8.0 database server...
Aug 31 21:16:27 localhost.localdomain systemd[1]: Started MySQL 8.0 database server.
Aug 31 21:16:35 localhost.localdomain systemd[1]: Stopping MySQL 8.0 database server...
Aug 31 21:16:36 localhost.localdomain systemd[1]: mysqld.service: Succeeded.
Aug 31 21:16:36 localhost.localdomain systemd[1]: Stopped MySQL 8.0 database server.
Aug 31 21:16:36 localhost.localdomain systemd[1]: Starting MySQL 8.0 database server...
Aug 31 21:16:36 localhost.localdomain systemd[1]: Started MySQL 8.0 database server.
Aug 31 21:19:45 localhost.localdomain systemd[1]: Stopping MySQL 8.0 database server...
Aug 31 21:19:46 localhost.localdomain systemd[1]: mysqld.service: Succeeded.
Aug 31 21:19:46 localhost.localdomain systemd[1]: Stopped MySQL 8.0 database server.

#拷贝msql需要备份的相关文件
[root@localhost ~]#tar czf mysql-data.bak.tar.gz /var/lib/mysql/*
[root@localhost ~]#tar czf mysqlbinlog.tar.gz /data/mysqllog/*
[root@localhost ~]#scp mysql-data.bak.tar.gz  10.0.0.154:
[root@localhost ~]#scp mysqlbinlog.tar.gz 10.0.0.154:
[root@localhost ~]#scp /etc/my.cnf 10.0.0.154:

#库文件丢失的情况进行还原
[root@localhost ~]#tree /var/lib/mysql
/var/lib/mysql

0 directories, 0 files
[root@rocky8 ~]#scp my* 10.0.0.153:/data/
[root@localhost ~]#cd /data/
[root@localhost data]#tar xf mysqlbinlog.tar.gz 
[root@localhost data]#tar xf mysql-data.bak.tar.gz 
[root@localhost data]#cp var/lib/mysql/* /var/lib/mysql -r
[root@localhost data]#cp my.cnf /etc/my.cnf
[root@localhost data]#cp  /data/data/mysqllog/* /data/mysqllog/
[root@localhost data]#chown -R mysql:mysql /data/mysqllog/
[root@localhost data]#chown -R mysql:mysql /var/lib/mysql/
[root@localhost data]#systemctl start mysqld.service 
[root@localhost data]#systemctl status mysqld.service 
● mysqld.service - MySQL 8.0 database server
   Loaded: loaded (/usr/lib/systemd/system/mysqld.service; disabl>
   Active: active (running) since Wed 2022-08-31 21:38:04 CST; 13>
  Process: 28029 ExecStartPost=/usr/libexec/mysql-check-upgrade (>
  Process: 27946 ExecStartPre=/usr/libexec/mysql-prepare-db-dir m>
  Process: 27922 ExecStartPre=/usr/libexec/mysql-check-socket (co>
 Main PID: 27983 (mysqld)
   Status: "Server is operational"
    Tasks: 38 (limit: 11175)
   Memory: 360.9M
   CGroup: /system.slice/mysqld.service
           └─27983 /usr/libexec/mysqld --basedir=/usr

Aug 31 21:38:02 localhost.localdomain systemd[1]: Starting MySQL >
Aug 31 21:38:04 localhost.localdomain systemd[1]: Started MySQL 8>

#切换服务器恢复备份,新服务器准备相同版本的MySQL数据库,启动确认没问题后,停止服务开始还原数据库
[root@rocky8 ~]#systemctl stop mysqld.service 
[root@rocky8 ~]#scp my* 10.0.0.155:/data/
[root@rocky8 data]#tar xf mysql-data.bak.tar.gz 
[root@rocky8 data]#tar xf mysqlbinlog.tar.gz 
[root@rocky8 data]#cp /data/data/* /data/ -rp
[root@rocky8 data]#chown -R  mysql:mysql mysqllog
[root@rocky8 data]#cp my.cnf /etc/my.cnf
[root@rocky8 data]#rm /var/lib/mysql/* -rf 
[root@rocky8 data]#cp var/lib/mysql/* /var/lib/mysql/ -rp
[root@rocky8 mysql]#systemctl start mysqld.service 

MYSQL冷备脚本

#备份脚本
#!/bin/bash
#
#********************************************************************
#Author:            shuhong
#QQ:                985347841
#Date:              2022-08-31
#FileName:          mysql-back.sh
#URL:               hhhh
#Description:       The test script
#Copyright (C):     2022 All rights reserved
#********************************************************************

. /etc/rc.d/init.d/functions

remoteIP="10.0.0.154"
binlogpath="/var/lib/mysql/"
myqlpath="/data/mysqllog/"
backpath="/data/date +%F-%H_%M_%S"

lock(){
if [ -e /tmp/mysql.lock ];then
   exit 1
else
   touch /tmp/mysql.lock
fi
}

unlock(){
rm -rf /tmp/mysql.lock
}

backup(){
lock
ssh root@${remoteIP} "mkdir $backpath"
systemctl stop mysqld.service &> /dev/null
tar czf mysql-data.bak.tar.gz ${binlogpath}* &> /dev/null
tar czf mysqlbinlog.tar.gz ${msqlpath}*  &> /dev/null
scp mysql-data.bak.tar.gz  ${remoteIP}:$backpath  &> /dev/null
scp mysqlbinlog.tar.gz ${remoteIP}:$backpath      &> /dev/null
scp /etc/my.cnf ${remoteIP}:$backpath    &> /dev/null
systemctl start mysqld.service && unlock || action "启动失败" false
}
backup

#定时任务
[root@localhost ~]#crontab -e
no crontab for root - using an empty one
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin
* * * * * . ~/mysql-back.sh

#备份的目的目录
[root@rocky8 data]#ll
total 0
drwxr-xr-x 2 root root 75 Sep  1 09:08 2022-09-01-09_08_36backup
drwxr-xr-x 2 root root 75 Sep  1 09:11 2022-09-01-09_11_35backup
drwxr-xr-x 2 root root 75 Sep  1 09:13 2022-09-01-09_13_01backup
drwxr-xr-x 2 root root  6 Sep  1 09:14 2022-09-01-09_14_01backup

MYSQL热备及还原

恢复误删除的表
案例说明:每天2:30做完全备份,早上10:00误删除了db1库的表test,10:10才发现故障,现需要将数
据库还原到10:10的状态,且恢复被删除db1库的test表,且不影响hellodb库

#完全备份,要求必须开启二进制日志
mysql> select @@log_bin;
+-----------+
| @@log_bin |
+-----------+
|         1 |
+-----------+
1 row in set (0.00 sec)

#完全备份时test,test2表中的数据
mysql> show tables;
+---------------+
| Tables_in_db1 |
+---------------+
| test          |
| test2         |
+---------------+
2 rows in set (0.00 sec)

mysql> select *from test;
+----+------+
| ID | name |
+----+------+
|  1 | a    |
|  2 | b    |
|  3 | c    |
|  4 | d    |
+----+------+
4 rows in set (0.00 sec)

mysql> select *from test2;
+----+------+
| ID | name |
+----+------+
|  1 | 1    |
|  2 | 2    |
+----+------+
2 rows in set (0.00 sec)

#完全备份,逐个库的备份
[root@localhost data]#vim mysqldump.sh 
#!/bin/bash
# 
#********************************************************************
#Author:            shuhong
#QQ:                985347841
#Date:              2022-09-01
#FileName:          mysqldump.sh
#URL:               hhhh
#Description:       The test script
#Copyright (C):     2022 All rights reserved
#********************************************************************
mysql -uroot -e 'show databases'|grep -Ewv '^(Database|information_schema|performance_schema|sys)$' | sed -rn 's#(.*)#mysqldump -B \
1 -F -E -R --triggers --single-transaction --source-data=2 --flush-privileges --default-character-set=utf8mb4 --hex-blob | gzip > /d
ata/backup/\1date +%F-%H_%M_%S.sql.gz#p' |bash                                        
[root@localhost data]#./mysqldump.sh 
[root@localhost data]#tree backup/
backup/
├── db12022-09-01-09_49_18.sql.gz
├── hellodb2022-09-01-09_49_18.sql.gz
└── mysql2022-09-01-09_49_19.sql.gz

0 directories, 3 files
#备份时开启了刷新二进制日志的功能所以日志起点是156,方便后续还原操作
mysql> show master status;
+---------------+----------+--------------+------------------+-------------------+
| File          | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set |
+---------------+----------+--------------+------------------+-------------------+
| binlog.000020 |      156 |              |                  |                   |
+---------------+----------+--------------+------------------+-------------------+
1 row in set (0.00 sec)

#备份问完后做数据更新操作
mysql> insert test2 (name)values('3');
Query OK, 1 row affected (0.00 sec)

mysql> insert test2 (name)values('4');
Query OK, 1 row affected (0.01 sec)

mysql> select *from test2;
+----+------+
| ID | name |
+----+------+
|  1 | 1    |
|  2 | 2    |
|  3 | 3    |
|  4 | 4    |
+----+------+
4 rows in set (0.00 sec)

#早上10:00误删除了db1库的表test
mysql> drop table test;
Query OK, 0 rows affected (0.01 sec)
mysql> show tables;
+---------------+
| Tables_in_db1 |
+---------------+
| test2         |
+---------------+
1 row in set (0.00 sec)

#后续继续插入了数据
mysql> insert test2 (name)values('5');
Query OK, 1 row affected (0.01 sec)

mysql> insert test2 (name)values('6');
Query OK, 1 row affected (0.01 sec)

mysql> select * from test2;
+----+------+
| ID | name |
+----+------+
|  1 | 1    |
|  2 | 2    |
|  3 | 3    |
|  4 | 4    |
|  5 | 5    |
|  6 | 6    |
+----+------+
6 rows in set (0.00 sec)

#发现数据库误删表的行为开始恢复数据,需要恢复的数据test表且保证test2表插入的数据不受影响
[root@localhost backup]#gzip -d db12022-09-01-09_49_18.sql.gz 
#注意此处看到的二进制日志文件是binlog.000018,但因为分库备份所以实际日志文件已经变更为binlog.000020,因为每次备份都刷新日志,所以起始位置都是156
[root@localhost backup]#grep 'CHANGE MASTER TO' db12022-09-01-09_49_18.sql 
-- CHANGE MASTER TO MASTER_LOG_FILE='binlog.000018', MASTER_LOG_POS=156;
[root@localhost backup]#mysqlbinlog --start-position=156 /data/mysqllog/binlog.000020 > /data/backup/inc.sql

#找到误删除的语句,从备份中删除此语句
[root@localhost backup]#sed -i '/DROP TABLE test/d' /data/backup/inc.sql

#利用完全备份和修改过的二进制日志进行还原
mysql> set sql_log_bin=0;
mysql> source /data/backup/db12022-09-01-09_49_18.sql
mysql> source /data/backup/inc.sql
mysql> set sql_log_bin=1;

#还原后的效果
mysql> show tables;
+---------------+
| Tables_in_db1 |
+---------------+
| test          |
| test2         |
+---------------+
2 rows in set (0.00 sec)

mysql> select *from test;
+----+------+
| ID | name |
+----+------+
|  1 | a    |
|  2 | b    |
|  3 | c    |
|  4 | d    |
+----+------+
4 rows in set (0.00 sec)

mysql> select *from test2;
+----+------+
| ID | name |
+----+------+
|  1 | 1    |
|  2 | 2    |
|  3 | 3    |
|  4 | 4    |
|  5 | 5    |
|  6 | 6    |
+----+------+
6 rows in set (0.00 sec)


msqldump备份脚本

#!/bin/bash
#
#********************************************************************
#Author:            shuhong
#QQ:                985347841
#Date:              2022-09-01
#FileName:          mysqldump.sh
#URL:               hhhh
#Description:       The test script
#Copyright (C):     2022 All rights reserved
#********************************************************************
. /etc/rc.d/init.d/functions
backpath="/data/date +%F-%H_%M_%S/"
remoteIP='10.0.0.154'

lock(){
if [ -e /tmp/mysql.lock ];then 
   exit 1
else
   touch /tmp/mysql.lock
fi
}

unlock(){
rm -rf /tmp/mysql.lock
}

backup(){
lock
mkdir ${backpath} &> /dev/null
for db in mysql -uroot -e 'show databases' | grep -Ewv '^(Database|information_schema|performance_schema|sys)$';do 
   mysqldump -B $db -F -E -R --triggers --single-transaction --source-data=2 --flush-privileges --default-character-set=utf8mb4 --hex-blob | gzip > ${backpath}$db.sql.gz &> /dev/null
done

tar czf ${backpath}mysqldump.tar.gz ${backpath}* &> /dev/null
key1=md5sum ${backpath}mysqldump.tar.gz |awk '{print $1}'
scp ${backpath}mysqldump.tar.gz root@10.0.0.154: &> /dev/null
key2=ssh root@$remoteIP "md5sum mysqldump.tar.gz" | awk '{print $1}'

if [ $key1 == $key2 ];then
   unlock
else
   action "传输中文件缺失,请确认" false
   exit 1
fi
}
backup

MYSQL一主二从

#主节点
[root@localhost backup]#vim /etc/my.cnf
[mysqld]
log_bin=/data/mysqllog/binlog
server-id=153
[root@localhost backup]#systemctl restart mysqld
[root@localhost backup]#mysqldump -A -F --master-data=1 --single-transaction > /data/backup/all.sql
#创建同步账号并授权
mysql> create user repluser@"10.0.0.%" identified by "123456";
Query OK, 0 rows affected (0.00 sec)
mysql> grant replication slave on *.* to repluser@"10.0.0.%";
Query OK, 0 rows affected (0.00 sec)


[root@localhost backup]#scp all.sql 10.0.0.155:
[root@localhost backup]#scp all.sql 10.0.0.154:

#配置slave1
[root@rocky8 data]#vim /etc/my.cnf
[mysqld]
log_bin=/data/mysqllog/binlog
server-id=154
read-only
[root@rocky8 data]#systemctl restart mysqld
[root@rocky8 ~]#vim all.sql 
CHANGE MASTER TO 
MASTER_HOST='10.0.0.153',
MASTER_USER='repluser',
MASTER_PASSWORD='123456',
MASTER_PORT=3306,
MASTER_LOG_FILE='binlog.000022', MASTER_LOG_POS=156;
[root@rocky8 ~]#mysql
mysql> set sql_log_bin=0;
mysql> source ~/all.sql;
mysql>set sql_log_bin=1;
#从节点开始复制
mysql> start slave;


#配置slave2
[root@rocky8 data]#vim /etc/my.cnf
[mysqld]
log_bin=/data/mysqllog/binlog
server-id=155
read-only
[root@rocky8 data]#systemctl restart mysqld
[root@rocky8 data]#systemctl restart mysqld
[root@rocky8 ~]#vim all.sql 
CHANGE MASTER TO 
MASTER_HOST='10.0.0.153',
MASTER_USER='repluser',
MASTER_PASSWORD='123456',
MASTER_PORT=3306,
MASTER_LOG_FILE='binlog.000022', MASTER_LOG_POS=156;
[root@rocky8 ~]#mysql
mysql> set sql_log_bin=0;
mysql> source ~/all.sql;
mysql>set sql_log_bin=1;
#从节点开始复制
mysql> start slave;

MSQL级联复制

#主节点
[root@localhost backup]#vim /etc/my.cnf
[mysqld]
log_bin=/data/mysqllog/binlog
server-id=153
[root@localhost backup]#systemctl restart mysqld
[root@localhost backup]#mysqldump -A -F --master-data=1 --single-transaction > /data/backup/all.sql
#创建同步账号并授权
mysql> create user repluser@"10.0.0.%" identified by "123456";
Query OK, 0 rows affected (0.00 sec)
mysql> grant replication slave on *.* to repluser@"10.0.0.%";
Query OK, 0 rows affected (0.00 sec)
[root@localhost backup]#scp all.sql 10.0.0.154:

#slave1
[root@rocky8 ~]#vim /etc/my.cnf
[mysqld]
server-id=154
log_bin=/data/mysqllog/binlog
read-only
[root@rocky8 ~]#mkdir /data/mysqllog
[root@rocky8 ~]#chown -R mysql:mysql /data/mysqllog/
[root@rocky8 ~]#systemctl restart mysqld.service 
[root@rocky8 ~]#vim all.sql 
CHANGE MASTER TO 
MASTER_HOST='10.0.0.153',
MASTER_USER='repluser',
MASTER_PASSWORD='123456',
MASTER_PORT=3306,
MASTER_LOG_FILE='binlog.000022', MASTER_LOG_POS=156;
[root@rocky8 ~]#mysql
mysql> set sql_log_bin=0;
mysql> source ~/all.sql;
mysql>set sql_log_bin=1;
mysql> start slave;
mysql> show master logs;
+---------------+-----------+-----------+
| Log_name      | File_size | Encrypted |
+---------------+-----------+-----------+
| binlog.000001 |      1265 | No        |
+---------------+-----------+-----------+
1 row in set (0.00 sec)


#slave2
[root@rocky8 ~]#vim /etc/my.cnf
[mysqld]
log_bin=/data/mysqllog/binlog
server-id=155
read-only
[root@rocky8 ~]#mkdir /data/mysqllog
[root@rocky8 ~]#chown -R mysql:mysql /data/mysqllog/
[root@rocky8 ~]#systemctl restart mysqld.service 
[root@rocky8 ~]#vim all.sql 
CHANGE MASTER TO
MASTER_HOST='10.0.0.154',
MASTER_USER='repluser',
MASTER_PASSWORD='123456',
MASTER_PORT=3306,
MASTER_LOG_FILE='binlog.000001', MASTER_LOG_POS=1265;
[root@rocky8 ~]#mysql
mysql> source ~/all.sql;
mysql> start slave;

MASQL主主复制

NHA#master1
[root@localhost backup]#vim /etc/my.cnf
[mysqld]
log_bin=/data/mysqllog/binlog
server-id=153
[root@localhost backup]#systemctl restart mysqld
[root@localhost backup]#mysqldump -A -F --master-data=1 --single-transaction > /data/backup/all.sql
#创建同步账号并授权
mysql> create user repluser@"10.0.0.%" identified by "123456";
Query OK, 0 rows affected (0.00 sec)
mysql> grant replication slave on *.* to repluser@"10.0.0.%";
Query OK, 0 rows affected (0.00 sec)
[root@localhost backup]#scp all.sql 10.0.0.154:


#master2
[root@rocky8 ~]#vim /etc/my.cnf
[mysqld]
server-id=154
log_bin=/data/mysqllog/binlog
read-only
[root@rocky8 ~]#mkdir /data/mysqllog
[root@rocky8 ~]#chown -R mysql:mysql /data/mysqllog/
[root@rocky8 ~]#systemctl restart mysqld.service 
[root@rocky8 ~]#vim all.sql 
CHANGE MASTER TO 
MASTER_HOST='10.0.0.153',
MASTER_USER='repluser',
MASTER_PASSWORD='123456',
MASTER_PORT=3306,
MASTER_LOG_FILE='binlog.000022', MASTER_LOG_POS=156;
[root@rocky8 ~]#mysql
mysql> set sql_log_bin=0;
mysql> source ~/all.sql;
mysql>set sql_log_bin=1;
mysql> start slave;
mysql> show master logs;
+---------------+-----------+-----------+
| Log_name      | File_size | Encrypted |
+---------------+-----------+-----------+
| binlog.000001 |      1550 | No        |
+---------------+-----------+-----------+
1 row in set (0.00 sec)

#master1
mysql> CHANGE MASTER TO
    -> MASTER_HOST='10.0.0.154',
    -> MASTER_USER='repluser',
    -> MASTER_PASSWORD='123456',
    -> MASTER_PORT=3306,
    -> MASTER_LOG_FILE='binlog.000001',
    -> MASTER_LOG_POS=1550;
mysql> start slave;

MHA,MYCAT,一主二从

半同步复制,GTID开启搭建一主二从

#前面的实现以实现一主二从
#半同步实现
#master
[mysqld]
log_bin=/data/mysqllog/binlog
server-id=153
plugin-load-add="semisync_master.so"   #启动服务是装插件
rpl_semi_sync_master_enabled=ON        #启动主节点半同步功能
rpl_semi_sync_master_timeout=3000      #设置超时等待3秒
[root@localhost ~]#systemctl restart mysqld.service 
[root@localhost ~]#mysql
mysql> SHOW GLOBAL VARIABLES LIKE '%semi%';
+-------------------------------------------+------------+
| Variable_name                             | Value      |
+-------------------------------------------+------------+
| rpl_semi_sync_master_enabled              | ON         |
| rpl_semi_sync_master_timeout              | 3000       |
| rpl_semi_sync_master_trace_level          | 32         |
| rpl_semi_sync_master_wait_for_slave_count | 1          |
| rpl_semi_sync_master_wait_no_slave        | ON         |
| rpl_semi_sync_master_wait_point           | AFTER_SYNC |
+-------------------------------------------+------------+
6 rows in set (0.00 sec)  

#slave1和slave2配置相同
[mysqld]
server-id=154
log_bin=/data/mysqllog/binlog
read-only
plugin-load-add="semisync_slave.so"
rpl_semi_sync_slave_enabled=ON           #启动从节点的半同步复制
[root@rocky8 ~]#vim /etc/my.cnf
[root@rocky8 ~]#systemctl restart mysqld.service 
[root@rocky8 ~]#mysql
mysql> stop slave;
mysql> start slave;
mysql> SHOW GLOBAL VARIABLES LIKE '%semi%';
+---------------------------------+-------+
| Variable_name                   | Value |
+---------------------------------+-------+
| rpl_semi_sync_slave_enabled     | ON    |
| rpl_semi_sync_slave_trace_level | 32    |
+---------------------------------+-------+
2 rows in set (0.01 sec)

#GTID实现
#所有mysql节点上my.cnf在mysqld文件中加入下面两行
[root@rocky8 ~]#vim /etc/my.cnf
[mysqld]
gtid_mode=ON
enforce_gtid_consistency
#清理从节点之前做好的主从同步配置
mysql> stop slave;
Query OK, 0 rows affected, 1 warning (0.00 sec)

mysql> reset slave all;
Query OK, 0 rows affected, 1 warning (0.01 sec)
#加完后全部重启服务
[root@localhost ~]#systemctl restart mysqld.service 
#master
#如果主从数据不一致则先把主从数据通过备份还原,调整一致,当前实验是基于此前同步的所以数据库是一致的跳过此步骤

#slave
mysql> CHANGE MASTER TO MASTER_HOST='10.0.0.153',
    -> MASTER_USER='repluser',
    -> MASTER_PASSWORD='123456',
    -> MASTER_PORT=3306,
    -> MASTER_AUTO_POSITION=1;
Query OK, 0 rows affected, 8 warnings (0.01 sec)

mysql> start slave;
Query OK, 0 rows affected, 1 warning (0.01 sec)

mysql> show slave status\G;
*************************** 1. row ***************************
               Slave_IO_State: Waiting for source to send event
                  Master_Host: 10.0.0.153
                  Master_User: repluser
                  Master_Port: 3306
                Connect_Retry: 60
              Master_Log_File: binlog.000067
          Read_Master_Log_Pos: 156
               Relay_Log_File: rocky8-relay-bin.000002
                Relay_Log_Pos: 365
        Relay_Master_Log_File: binlog.000067
             Slave_IO_Running: Yes
            Slave_SQL_Running: Yes
              Replicate_Do_DB: 
          Replicate_Ignore_DB: 
           Replicate_Do_Table: 
       Replicate_Ignore_Table: 
      Replicate_Wild_Do_Table: 
  Replicate_Wild_Ignore_Table: 
                   Last_Errno: 0
                   Last_Error: 
                 Skip_Counter: 0
          Exec_Master_Log_Pos: 156
              Relay_Log_Space: 575
              Until_Condition: None
               Until_Log_File: 
                Until_Log_Pos: 0
           Master_SSL_Allowed: No
           Master_SSL_CA_File: 
           Master_SSL_CA_Path: 
              Master_SSL_Cert: 
            Master_SSL_Cipher: 
               Master_SSL_Key: 
        Seconds_Behind_Master: 0
Master_SSL_Verify_Server_Cert: No
                Last_IO_Errno: 0
                Last_IO_Error: 
               Last_SQL_Errno: 0
               Last_SQL_Error: 
  Replicate_Ignore_Server_Ids: 
             Master_Server_Id: 153
                  Master_UUID: 5c671dc1-292c-11ed-bca0-000c29d9d8e1
             Master_Info_File: mysql.slave_master_info
                    SQL_Delay: 0
          SQL_Remaining_Delay: NULL
      Slave_SQL_Running_State: Replica has read all relay log; waiting for more updates
           Master_Retry_Count: 86400
                  Master_Bind: 
      Last_IO_Error_Timestamp: 
     Last_SQL_Error_Timestamp: 
               Master_SSL_Crl: 
           Master_SSL_Crlpath: 
           Retrieved_Gtid_Set: 
            Executed_Gtid_Set: 
                Auto_Position: 1
         Replicate_Rewrite_DB: 
                 Channel_Name: 
           Master_TLS_Version: 
       Master_public_key_path: 
        Get_master_public_key: 0
            Network_Namespace: 
1 row in set, 1 warning (0.01 sec)

ERROR: 
No query specified

#测试GTID主库插入数据,查看Retrieved_Gtid_set和Executed_Gtid_Set这两个值,对比主节点执行show master status的值,如果相同表示同步完成
mysql> show master status;
+---------------+----------+--------------+------------------+----------------------------------------+
| File          | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set                      |
+---------------+----------+--------------+------------------+----------------------------------------+
| binlog.000067 |      439 |              |                  | 5c671dc1-292c-11ed-bca0-000c29d9d8e1:1 |
+---------------+----------+--------------+------------------+----------------------------------------+
1 row in set (0.00 sec)

mysql> show slave status\G;
*************************** 1. row ***************************
               Slave_IO_State: Waiting for source to send event
                  Master_Host: 10.0.0.153
                  Master_User: repluser
                  Master_Port: 3306
                Connect_Retry: 60
              Master_Log_File: binlog.000067
          Read_Master_Log_Pos: 439
               Relay_Log_File: rocky8-relay-bin.000002
                Relay_Log_Pos: 648
        Relay_Master_Log_File: binlog.000067
             Slave_IO_Running: Yes
            Slave_SQL_Running: Yes
              Replicate_Do_DB: 
          Replicate_Ignore_DB: 
           Replicate_Do_Table: 
       Replicate_Ignore_Table: 
      Replicate_Wild_Do_Table: 
  Replicate_Wild_Ignore_Table: 
                   Last_Errno: 0
                   Last_Error: 
                 Skip_Counter: 0
          Exec_Master_Log_Pos: 439
              Relay_Log_Space: 858
              Until_Condition: None
               Until_Log_File: 
                Until_Log_Pos: 0
           Master_SSL_Allowed: No
           Master_SSL_CA_File: 
           Master_SSL_CA_Path: 
              Master_SSL_Cert: 
            Master_SSL_Cipher: 
               Master_SSL_Key: 
        Seconds_Behind_Master: 0
Master_SSL_Verify_Server_Cert: No
                Last_IO_Errno: 0
                Last_IO_Error: 
               Last_SQL_Errno: 0
               Last_SQL_Error: 
  Replicate_Ignore_Server_Ids: 
             Master_Server_Id: 153
                  Master_UUID: 5c671dc1-292c-11ed-bca0-000c29d9d8e1
             Master_Info_File: mysql.slave_master_info
                    SQL_Delay: 0
          SQL_Remaining_Delay: NULL
      Slave_SQL_Running_State: Replica has read all relay log; waiting for more updates
           Master_Retry_Count: 86400
                  Master_Bind: 
      Last_IO_Error_Timestamp: 
     Last_SQL_Error_Timestamp: 
               Master_SSL_Crl: 
           Master_SSL_Crlpath: 
           Retrieved_Gtid_Set: 5c671dc1-292c-11ed-bca0-000c29d9d8e1:1
            Executed_Gtid_Set: 5c671dc1-292c-11ed-bca0-000c29d9d8e1:1
                Auto_Position: 1
         Replicate_Rewrite_DB: 
                 Channel_Name: 
           Master_TLS_Version: 
       Master_public_key_path: 
        Get_master_public_key: 0
            Network_Namespace: 
1 row in set, 1 warning (0.00 sec)

MYCAT调试

#安装java
[root@rocky8 ~]#java -version
openjdk version "1.8.0_342"
OpenJDK Runtime Environment (build 1.8.0_342-b07)
OpenJDK 64-Bit Server VM (build 25.342-b07, mixed mode)

#下载安装mycat或本地上传包安装
wget http://dl.mycat.org.cn/1.6.7.4/Mycat-server-1.6.7.4-release/Mycat-server-1.6.7.4-release-20200105164103-linux.tar.gz
[root@rocky8 ~]#ll
total 30516
-rw-------. 1 root root     1093 Aug 16 08:58 anaconda-ks.cfg
-rw-r--r--  1 root root 31241677 Sep  2 14:52 Mycat-server-1.6.7.6-release-20220524173810-linux.tar.gz
[root@rocky8 ~]#ll
total 30516
-rw-------. 1 root root     1093 Aug 16 08:58 anaconda-ks.cfg
-rw-r--r--  1 root root 31241677 Sep  2 14:52 Mycat-server-1.6.7.6-release-20220524173810-linux.tar.gz
[root@rocky8 ~]#tar xf Mycat-server-1.6.7.6-release-20220524173810-linux.tar.gz  -C /apps
[root@rocky8 ~]#ll /apps/
total 0
drwxr-xr-x 7 root root 85 Sep  2 20:20 mycat


#启动和链接
[root@rocky8 mycat]#vim /etc/profile.d/mycat.sh
PATH=/apps/mycat/bin:$PATH                                                                               
[root@rocky8 mycat]#source /etc/profile.d/mycat.sh 
[root@rocky8 mycat]#mycat start
Starting Mycat-server...
[root@rocky8 mycat]#cat /apps/mycat/logs/wrapper.log 
STATUS | wrapper  | 2022/09/02 20:25:16 | --> Wrapper Started as Daemon
STATUS | wrapper  | 2022/09/02 20:25:16 | Launching a JVM...
INFO   | jvm 1    | 2022/09/02 20:25:17 | Wrapper (Version 3.2.3) http://wrapper.tanukisoftware.org
INFO   | jvm 1    | 2022/09/02 20:25:17 |   Copyright 1999-2006 Tanuki Software, Inc.  All Rights Reserved.
INFO   | jvm 1    | 2022/09/02 20:25:17 | 
INFO   | jvm 1    | 2022/09/02 20:25:18 | Loading class com.mysql.jdbc.Driver'. This is deprecated. The new driver class is .mysql.cj.jdbc.Driver'. The driver is automatically registered via the SPI and manual loading of the driver class is generally unnecessary.
INFO   | jvm 1    | 2022/09/02 20:25:19 | MyCAT Server startup successfully. see logs in logs/mycat.log

#连接测试
[root@localhost ~]#mysql -uroot -p123456 -h 10.0.0.156 -P8066
mysql> show databases;
+----------+
| DATABASE |
+----------+
| TESTDB   |
+----------+
1 row in set (0.01 sec)


#在mycat 服务器上修改server.xml文件配置Mycat的连接信息
[root@rocky8 mycat]#vim /apps/mycat/conf/server.xml 
#修改下面行的8066改为3306复制到到独立非注释行
<property name="serverPort">3306</property>
<property name="handlelDistributedTransactions">0</property> #将上面行放在此行前面

#修改schema.xml实现读写分离策略
[root@rocky8 mycat]#vim /apps/mycat/conf/schema.xml 
<?xml version="1.0"?>
<!DOCTYPE mycat:schema SYSTEM "schema.dtd">
<mycat:schema xmlns:mycat="http://io.mycat/">
    <schema name="TESTDB" checkSQLschema="false" sqlMaxLimit="100" dataNode="dn1">
    </schema>
    <dataNode name="dn1" dataHost="localhost1" database="hellodb" />
    <dataHost name="localhost1" maxCon="1000" minCon="10" balance="1"
              writeType="0" dbType="mysql" dbDriver="native" switchType="1"  slaveThreshold="100">
        <heartbeat>select user()</heartbeat>
        <writeHost host="host1" url="10.0.0.153:3306" user="mycat" password="123456">
         <readHost host="host2" url="10.0.0.154:3306" user="mycat" password="123456" />
         <readHost host="host3" url="10.0.0.156:3306" user="mycat" password="123456" />
        </writeHost>
    </dataHost>
</mycat:schema>


上面配置中,balance改为1,表示读写分离。以上配置达到的效果就是10.0.0.153为主库,10.0.0.154/156为
从库
注意:要保证能使用mycat/123456权限成功登录10.0.0.153和10.0.0.154/156机器上面的mysql数据库。同时,也一定要授权mycat机器能使用mycat/123456权限成功登录这两台机器的mysql数据库!!这很重要,否则会导致登录mycat后,对库和表操作失败!

#数据库上建立同步账号
mysql> create user mycat@'10.0.0.%' identified by '123456';
Query OK, 0 rows affected (0.01 sec)
mysql> grant all on *.* to mycat@'10.0.0.%';
Query OK, 0 rows affected (0.00 sec)

[root@rocky8 mycat]#mycat restart
Stopping Mycat-server...
Stopped Mycat-server.
Starting Mycat-server...

#测试
[root@localhost ~]#mysql -uroot -p123456 -h 10.0.0.156 -P3306
mysql> use TESTDB;
Database changed
mysql> show tables;
+-------------------+
| Tables_in_hellodb |
+-------------------+
| classes           |
| coc               |
| courses           |
| scores            |
mysql> status;
--------------
mysql  Ver 8.0.26 for Linux on x86_64 (Source distribution)

Connection id:		7
Current database:	hellodb
Current user:		mycat@10.0.0.156
SSL:			Not in use
Current pager:		stdout
Using outfile:		''
Using delimiter:	;
Server version:		5.6.29-mycat-1.6.7.6-release-20220524173810 MyCat Server (OpenCloudDB)
Protocol version:	10
Connection:		10.0.0.156 via TCP/IP
Server characterset:	utf8mb4
Db     characterset:	utf8mb3
Client characterset:	utf8mb4
Conn.  characterset:	utf8mb4
TCP port:		3306
Binary data as:		Hexadecimal
--------------

| students          |
| teachers          |
| testlog           |
| toc               |
+-------------------+
8 rows in set (0.00 sec)

mycat安装目录结构:
·bin mycat命令,启动、重启、停止等
·catlet catlet为Mycat的一个扩展功能
·conf Mycat 配置信息,重点关注
li·b Mycat引用的jar包,Mycat是java开发的
·logs 日志文件,包括Mycat启动的日志和运行的日志
·version.txt mycat版本说明
logs目录:
·wrapper.log mycat启动日志
·mycat.log mycat详细工作日志
·Mycat的配置文件都在conf目录里面,这里介绍几个常用的文件:
·server.xml Mycat软件本身相关的配置文件,设置账号、参数等
·schema.xml Mycat对应的物理数据库和数据库表的配置,读写分离、高可用、分布式策略定制、节点控制
·rule.xml Mycat分片(分库分表)规则配置文件,记录分片规则列表、使用方法等

MHA高可用

#MHA管理端manager机器centos7(mha4mysql-manager-0.58-0.el7.centos.noarch.rpm 只支持CentOS7上安装)
[root@Centos ~]# ll
total 120
-rw-------. 1 root root  1609 Jul 30 14:50 anaconda-ks.cfg
-rw-r--r--. 1 root root 81024 Aug 29  2020 mha4mysql-manager-0.58-0.el7.centos.noarch.rpm
-rw-r--r--. 1 root root 36328 Aug 29  2020 mha4mysql-node-0.58-0.el7.centos.noarch.rpm

[root@Centos ~]# yum -y install mha4mysql-node-0.58-0.el7.centos.noarch.rpm 
[root@Centos ~]# yum -y install mha4mysql-manager-0.58-0.el7.centos.noarch.rpm 


#在所有节点实现相互之间 ssh key
[root@localhost ~]#vim ssh_key.sh
#!/bin/bash
# 
#********************************************************************
#Author:            shuhong
#QQ:                985347841
#Date:              2022-09-02
#FileName:          ssh_key.sh
#URL:               hhhhhh
#Description:       The test script
#Copyright (C):     2022 All rights reserved
#********************************************************************
PASS=centos1
#设置网段最后的地址,4-255之间,越小扫描越快
END=254

IP=ip a s eth0 | awk -F'[ /]+' 'NR==3{print $3}'
NET=${IP%.*}.

. /etc/os-release

rm -f /root/.ssh/id_rsa
[ -e ./SCANIP.log ] && rm -f SCANIP.log

for((i=3;i<="$END";i++));do
    ping -c 1 -w 1  ${NET}$i &> /dev/null  && echo "${NET}$i" >> SCANIP.log &
done
wait

ssh-keygen -P "" -f /root/.ssh/id_rsa
if [ $ID = "centos" -o $ID = "rocky" ];then
    rpm -q sshpass || yum -y install sshpass
else
    dpkg -i sshpass &> /dev/null ||{ apt update; apt -y install sshpass; }
fi

sshpass -p $PASS ssh-copy-id -o StrictHostKeyChecking=no $IP

AliveIP=(cat SCANIP.log)
for n in ${AliveIP[*]};do
    sshpass -p $PASS scp -o StrictHostKeyChecking=no -r /root/.ssh root@${n}:
done

#把.ssh/known_hosts拷贝到所有主机,使它们第一次互相访问时不需要输入回车
for n in ${AliveIP[*]};do
    scp /root/.ssh/known_hosts ${n}:.ssh/
done
[root@Centos ~]# ./ssh_key.sh 

#在管理节点建立配置文件
[root@Centos ~]# mkdir /etc/mastermha/
[root@Centos ~]# vim /etc/mastermha/app1.cnf
[server default]
check_repl_delay=0
manager_log=/data/mastermha/app1/manager.log
manager_workdir=/data/mastermha/app1/
master_binlog_dir=/data/mysqllog/
master_ip_failover_script=/usr/local/bin/master_ip_failover
password=123456
ping_interval=1
remote_workdir=/data/mastermha/app1/
repl_password=123456
repl_user=repluser
report_script=/usr/local/bin/sendmail.sh
ssh_user=root
user=mhauser

[server1]
hostname=10.0.0.154

[server2]
candidate_master=1
hostname=10.0.0.155

#相关脚本
[root@Centos ~]# vim  /usr/local/bin/sendmail.sh
#!/bin/bash
echo "MHA is failover!" | mail -s "MHA Warning" root@10.0.0.152
[root@Centos ~]# chmod +x /usr/local/bin/sendmail.sh
[root@Centos ~]# vim /usr/local/bin/master_ip_failover
  
#!/usr/bin/env perl
# Copyright (C) 2011 DeNA Co.,Ltd.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
## Note: This is a sample script and is not complete. Modify the script based onyour environment.
use strict;
use warnings FATAL => 'all';
use Getopt::Long;
use MHA::DBHelper;
my (
$command, $ssh_user, $orig_master_host,
$orig_master_ip, $orig_master_port, $new_master_host,
$new_master_ip, $new_master_port, $new_master_user,
$new_master_password
);

my $vip = '10.0.0.100/24';
my $key = "1";
my $ssh_start_vip = "/sbin/ifconfig eth0:$key $vip";
my $ssh_stop_vip = "/sbin/ifconfig eth0:$key down";
GetOptions(
'command=s' => \$command,
'ssh_user=s' => \$ssh_user,
'orig_master_host=s' => \$orig_master_host,
'orig_master_ip=s' => \$orig_master_ip,
'orig_master_port=i' => \$orig_master_port,
'new_master_host=s' => \$new_master_host,
'new_master_ip=s' => \$new_master_ip,
'new_master_port=i' => \$new_master_port,
'new_master_user=s' => \$new_master_user,
'new_master_password=s' => \$new_master_password,
);
exit &main();
sub main {
if ( $command eq "stop" || $command eq "stopssh" ) {
# $orig_master_host, $orig_master_ip, $orig_master_port are passed.
# If you manage master ip address at global catalog database,
# invalidate orig_master_ip here.
my $exit_code = 1;
eval {
# updating global catalog, etc
$exit_code = 0;
};
if ($@) {
warn "Got Error: $@\n";
exit $exit_code;
}
exit $exit_code;
}
elsif ( $command eq "start" ) {
# all arguments are passed.
# If you manage master ip address at global catalog database,
# activate new_master_ip here.
# You can also grant write access (create user, set read_only=0, etc)here.
my $exit_code = 10;
eval {
print "Enabling the VIP - $vip on the new master - $new_master_host
\n";
&start_vip();
&stop_vip();
$exit_code = 0;
};
if ($@) {
warn $@;
exit $exit_code;
}
exit $exit_code;
}
elsif ( $command eq "status" ) {
print "Checking the Status of the script.. OK \n";
ssh $ssh_user\@$orig_master_host \" $ssh_start_vip \";
exit 0;
}
else {
&usage();
exit 1;
}
}
sub start_vip() {
ssh $ssh_user\@$new_master_host \" $ssh_start_vip \";
}
# A simple system call that disable the VIP on the old_master
sub stop_vip() {
ssh $ssh_user\@$orig_master_host \" $ssh_stop_vip \";
}
sub usage {
print
"Usage: master_ip_failover --command=start|stop|stopssh|status --
orig_master_host=host --orig_master_ip=ip --orig_master_port=port --
new_master_host=host --new_master_ip=ip --new_master_port=port\n";
}
[root@Centos ~]# chmod +x /usr/local/bin/master_ip_failover


#mysql上建立同步账号
mysql> create user mhauser@'10.0.0.%' identified by '123456';
Query OK, 0 rows affected (0.02 sec)

mysql> grant all on *.* to mhauser@'10.0.0.%';
Query OK, 0 rows affected (0.00 sec)



#master配置文件修改
[root@localhost data]#vim /etc/my.cnf
skip_name_resolve=1
general_log
[root@localhost data]#systemctl restart mysqld.service 

#slave配置文件修改
[root@rocky8 ~]#vim /etc/my.cnf
skip_name_resolve=1
general_log
relay_log_purge=0
[root@rocky8 ~]#systemctl restart mysqld.service 

#MHA检查
[root@Centos ~]# masterha_check_ssh --conf=/etc/mastermha/app1.cnf
Fri Sep  2 22:27:02 2022 - [warning] Global configuration file /etc/masterha_default.cnf not found. Skipping.
Fri Sep  2 22:27:02 2022 - [info] Reading application default configuration from /etc/mastermha/app1.cnf..
Fri Sep  2 22:27:02 2022 - [info] Reading server configuration from /etc/mastermha/app1.cnf..
Fri Sep  2 22:27:02 2022 - [info] Starting SSH connection tests..
Fri Sep  2 22:27:03 2022 - [debug] 
Fri Sep  2 22:27:02 2022 - [debug]  Connecting via SSH from root@10.0.0.153(10.0.0.153:22) to root@10.0.0.154(10.0.0.154:22)..
Fri Sep  2 22:27:02 2022 - [debug]   ok.
Fri Sep  2 22:27:02 2022 - [debug]  Connecting via SSH from root@10.0.0.153(10.0.0.153:22) to root@10.0.0.155(10.0.0.155:22)..
Fri Sep  2 22:27:02 2022 - [debug]   ok.
Fri Sep  2 22:27:03 2022 - [debug] 
Fri Sep  2 22:27:02 2022 - [debug]  Connecting via SSH from root@10.0.0.154(10.0.0.154:22) to root@10.0.0.153(10.0.0.153:22)..
Fri Sep  2 22:27:03 2022 - [debug]   ok.
Fri Sep  2 22:27:03 2022 - [debug]  Connecting via SSH from root@10.0.0.154(10.0.0.154:22) to root@10.0.0.155(10.0.0.155:22)..
Fri Sep  2 22:27:03 2022 - [debug]   ok.
Fri Sep  2 22:27:04 2022 - [debug] 
Fri Sep  2 22:27:03 2022 - [debug]  Connecting via SSH from root@10.0.0.155(10.0.0.155:22) to root@10.0.0.153(10.0.0.153:22)..
Fri Sep  2 22:27:03 2022 - [debug]   ok.
Fri Sep  2 22:27:03 2022 - [debug]  Connecting via SSH from root@10.0.0.155(10.0.0.155:22) to root@10.0.0.154(10.0.0.154:22)..
Fri Sep  2 22:27:03 2022 - [debug]   ok.
Fri Sep  2 22:27:04 2022 - [info] All SSH connection tests passed successfully.

[root@Centos ~]# masterha_check_repl --conf=/etc/mastermha/app1.cnf
Fri Sep  2 22:27:32 2022 - [warning] Global configuration file /etc/masterha_default.cnf not found. Skipping.
Fri Sep  2 22:27:32 2022 - [info] Reading application default configuration from /etc/mastermha/app1.cnf..
Fri Sep  2 22:27:32 2022 - [info] Reading server configuration from /etc/mastermha/app1.cnf..
Fri Sep  2 22:27:32 2022 - [info] MHA::MasterMonitor version 0.58.
Fri Sep  2 22:27:33 2022 - [info] GTID failover mode = 1
Fri Sep  2 22:27:33 2022 - [info] Dead Servers:
Fri Sep  2 22:27:33 2022 - [info] Alive Servers:
Fri Sep  2 22:27:33 2022 - [info]   10.0.0.153(10.0.0.153:3306)
Fri Sep  2 22:27:33 2022 - [info]   10.0.0.154(10.0.0.154:3306)
Fri Sep  2 22:27:33 2022 - [info]   10.0.0.155(10.0.0.155:3306)
Fri Sep  2 22:27:33 2022 - [info] Alive Slaves:
Fri Sep  2 22:27:33 2022 - [info]   10.0.0.154(10.0.0.154:3306)  Version=8.0.26 (oldest major version between slaves) log-bin:enabled
Fri Sep  2 22:27:33 2022 - [info]     GTID ON
Fri Sep  2 22:27:33 2022 - [info]     Replicating from 10.0.0.153(10.0.0.153:3306)
Fri Sep  2 22:27:33 2022 - [info]     Primary candidate for the new Master (candidate_master is set)
Fri Sep  2 22:27:33 2022 - [info]   10.0.0.155(10.0.0.155:3306)  Version=8.0.26 (oldest major version between slaves) log-bin:enabled
Fri Sep  2 22:27:33 2022 - [info]     GTID ON
Fri Sep  2 22:27:33 2022 - [info]     Replicating from 10.0.0.153(10.0.0.153:3306)
Fri Sep  2 22:27:33 2022 - [info] Current Alive Master: 10.0.0.153(10.0.0.153:3306)
Fri Sep  2 22:27:33 2022 - [info] Checking slave configurations..
Fri Sep  2 22:27:33 2022 - [info] Checking replication filtering settings..
Fri Sep  2 22:27:33 2022 - [info]  binlog_do_db= , binlog_ignore_db= 
Fri Sep  2 22:27:33 2022 - [info]  Replication filtering check ok.
Fri Sep  2 22:27:33 2022 - [info] GTID (with auto-pos) is supported. Skipping all SSH and Node package checking.
Fri Sep  2 22:27:33 2022 - [info] Checking SSH publickey authentication settings on the current master..
Fri Sep  2 22:27:33 2022 - [info] HealthCheck: SSH to 10.0.0.153 is reachable.
Fri Sep  2 22:27:33 2022 - [info] 
10.0.0.153(10.0.0.153:3306) (current master)
 +--10.0.0.154(10.0.0.154:3306)
 +--10.0.0.155(10.0.0.155:3306)

Fri Sep  2 22:27:33 2022 - [info] Checking replication health on 10.0.0.154..
Fri Sep  2 22:27:33 2022 - [info]  ok.
Fri Sep  2 22:27:33 2022 - [info] Checking replication health on 10.0.0.155..
Fri Sep  2 22:27:33 2022 - [info]  ok.
Fri Sep  2 22:27:33 2022 - [info] Checking master_ip_failover_script status:
Fri Sep  2 22:27:33 2022 - [info]   /usr/local/bin/master_ip_failover --command=status --ssh_user=root --orig_master_host=10.0.0.153 --orig_master_ip=10.0.0.153 --orig_master_port=3306 
Checking the Status of the script.. OK 
Fri Sep  2 22:27:33 2022 - [info]  OK.
Fri Sep  2 22:27:33 2022 - [warning] shutdown_script is not defined.
Fri Sep  2 22:27:33 2022 - [info] Got exit code 0 (Not master dead).

MySQL Replication Health is OK.

#查看状态
[root@Centos ~]# masterha_check_status --conf=/etc/mastermha/app1.cnf
app1 is stopped(2:NOT_RUNNING).


#正式环境启动
nohup masterha_manager --conf=/etc/mastermha/app1.cnf --remove_dead_master_conf --ignore_last_failover &> /dev/null
#测试环境启动
[root@Centos ~]# masterha_manager --conf=/etc/mastermha/app1.cnf --remove_dead_master_conf --ignore_last_failover
[root@Centos ~]# masterha_check_status --conf=/etc/mastermha/app1.cnf
app1 (pid:47767) is running(0:PING_OK), master:10.0.0.153

#模拟153主节点宕机,MHA转移主节点
[root@localhost data]#systemctl stop mysqld.service 
[root@Centos ~]# masterha_manager --conf=/etc/mastermha/app1.cnf --remove_dead_master_conf --ignore_last_failover
Fri Sep  2 22:29:42 2022 - [warning] Global configuration file /etc/masterha_default.cnf not found. Skipping.
Fri Sep  2 22:29:42 2022 - [info] Reading application default configuration from /etc/mastermha/app1.cnf..
Fri Sep  2 22:29:42 2022 - [info] Reading server configuration from /etc/mastermha/app1.cnf..
Fri Sep  2 22:30:33 2022 - [warning] Global configuration file /etc/masterha_default.cnf not found. Skipping.
Fri Sep  2 22:30:33 2022 - [info] Reading application default configuration from /etc/mastermha/app1.cnf..
Fri Sep  2 22:30:33 2022 - [info] Reading server configuration from /etc/mastermha/app1.cnf..

[root@localhost data]#ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:0c:29:d9:d8:e1 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.153/24 brd 10.0.0.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fed9:d8e1/64 scope link 
       valid_lft forever preferred_lft forever
[root@localhost data]#systemctl status mysqld.service 
● mysqld.service - MySQL 8.0 database server
   Loaded: loaded (/usr/lib/systemd/system/mysqld.service; disabled; vendor preset: disabled)
   Active: inactive (dead)

Sep 02 20:05:38 localhost.localdomain systemd[1]: Starting MySQL 8.0 database server...
Sep 02 20:05:38 localhost.localdomain systemd[1]: Started MySQL 8.0 database server.
Sep 02 21:59:49 localhost.localdomain systemd[1]: Stopping MySQL 8.0 database server...
Sep 02 22:00:00 localhost.localdomain systemd[1]: mysqld.service: Succeeded.
Sep 02 22:00:00 localhost.localdomain systemd[1]: Stopped MySQL 8.0 database server.
Sep 02 22:00:00 localhost.localdomain systemd[1]: Starting MySQL 8.0 database server...
Sep 02 22:00:01 localhost.localdomain systemd[1]: Started MySQL 8.0 database server.
Sep 02 22:30:29 localhost.localdomain systemd[1]: Stopping MySQL 8.0 database server...
Sep 02 22:30:40 localhost.localdomain systemd[1]: mysqld.service: Succeeded.
Sep 02 22:30:40 localhost.localdomain systemd[1]: Stopped MySQL 8.0 database server.


[root@rocky8 ~]#ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:0c:29:51:16:b6 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.154/24 brd 10.0.0.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 10.0.0.100/24 brd 10.0.0.255 scope global secondary eth0:1
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe51:16b6/64 scope link 
       valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:0c:29:51:16:c0 brd ff:ff:ff:ff:ff:ff
    inet 172.25.254.128/24 brd 172.25.254.255 scope global dynamic noprefixroute eth1
       valid_lft 1178sec preferred_lft 1178sec
    inet6 fe80::1dc0:cf48:556f:afd6/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever

Percona XtraDB Cluster

pxc1:10.0.0.161
pxc2:10.0.0.162
pxc3:10.0.0.163
pxc4:10.0.0.164

#安装 Percona XtraDB Cluster 5.7
[root@Centos ~]# vim /etc/yum.repos.d/pxc.repo
  
[percona]
name=percona_repo
baseurl=https://mirrors.tuna.tsinghua.edu.cn/percona/release/$releasever/RPMS/$basearch
enabled= 1
gpgcheck= 0

[root@Centos ~]# scp /etc/yum.repos.d/pxc.repo 10.0.0.162:/etc/yum.repos.d
[root@Centos ~]# scp /etc/yum.repos.d/pxc.repo 10.0.0.163:/etc/yum.repos.d
[root@Centos ~]# scp /etc/yum.repos.d/pxc.repo 10.0.0.164:/etc/yum.repos.d

#在三个节点都安装好PXC 5.7
[root@Centos ~]# yum install Percona-XtraDB-Cluster-57 -y
[root@Centos ~]# yum install Percona-XtraDB-Cluster-57 -y
[root@Centos ~]# yum install Percona-XtraDB-Cluster-57 -y

#在各个节点上分别配置mysql及集群配置文件
#/etc/my.cnf为主配置文件,当前版本中,其余的配置文件都放在/etc/percona-xtradb-cluster.conf.d目录里,包括mysqld.cnf,mysqld_safe.cnf,wsrep.cnf 三个文件
#主配置文件不需要修改
[root@Centos ~]#cat /etc/my.cnf
# The Percona XtraDB Cluster 5.7 configuration file.
...省略...
!includedir /etc/my.cnf.d/
!includedir /etc/percona-xtradb-cluster.conf.d/
[root@Centos ~]#ls /etc/my.cnf.d/
[root@Centos ~]#ls /etc/percona-xtradb-cluster.conf.d/
mysqld.cnf mysqld_safe.cnf wsrep.cnf
#下面配置文件不需要修改
[root@Centos ~]#cat /etc/percona-xtradb-cluster.conf.d/mysqld.cnf
...省略...
[client]
socket=/var/lib/mysql/mysql.sock
[mysqld]
server-id=1 #建议各个节点不同
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
log-error=/var/log/mysqld.log
pid-file=/var/run/mysqld/mysqld.pid
log-bin #建议启用,非必须项
log_slave_updates
expire_logs_days=7
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
#下面配置文件不需要修改
[root@Centos ~]#cat /etc/percona-xtradb-cluster.conf.d/mysqld_safe.cnf
...省略...
[mysqld_safe]
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/lib/mysql/mysql.sock
nice = 0

#PXC的配置文件必须修改
[root@Centos ~]#vim /etc/percona-xtradb-cluster.conf.d/wsrep.cnf
[root@Centos ~]#grep -Ev "^#|^$" /etc/percona-xtradb-cluster.conf.d/wsrep.cnf
[mysqld]
wsrep_provider=/usr/lib64/galera3/libgalera_smm.so
wsrep_cluster_address=gcomm://10.0.0.162,10.0.0.163,10.0.0.164 #三个节点的IP
binlog_format=ROW
default_storage_engine=InnoDB
wsrep_slave_threads= 8
wsrep_log_conflicts
innodb_autoinc_lock_mode=2
wsrep_node_address=10.0.0.162 #各个节点,指定自已的IP
wsrep_cluster_name=pxc-cluster #各个节点都一致
wsrep_node_name=pxc-cluster-node-1 #各个节点,指定自已节点名称
pxc_strict_mode=ENFORCING
wsrep_sst_method=xtrabackup-v2
wsrep_sst_auth="sstuser:s3cretPass" #取消本行注释,同一集群内多个节点的验证用户和密码信息必须一致

#查看数据库秘密登陆进去修改密码
[root@Centos data]# grep "temporary password" /var/log/mysqld.log
2022-09-06T07:55:08.730994Z 1 [Note] A temporary password is generated for root@localhost: fyTBj=qEA6#+
[root@Centos data]# mysql -uroot -p'fyTBj=qEA6#+'
mysql> alter user 'root'@'localhost' identified by '123456';

#创建同步用户
mysql> CREATE USER 'sstuser'@'localhost' IDENTIFIED BY 's3cretPass';
Query OK, 0 rows affected (0.00 sec)

mysql> GRANT RELOAD, LOCK TABLES, PROCESS, REPLICATION CLIENT ON *.* TO 'sstuser'@'localhost';
Query OK, 0 rows affected (0.01 sec)

mysql> SHOW VARIABLES LIKE 'wsrep%'\G
mysql> show status like 'wsrep%';

#启动主节点
[root@Centos mysqld]# systemctl start mysql@bootstrap.service
#启动其他节点
[root@Centos mysqld]# systemctl start mysql

Group Replication

#修改hostname
[root@localhost ~]#hostnamectl set-hostname node1.com
[root@localhost ~]#hostnamectl set-hostname node2.com
[root@localhost ~]#hostnamectl set-hostname node3.com

#主机名和名称解析
cat >> /etc/hosts <<EOF
10.0.0.153 node1.com
10.0.0.154 node2.com
10.0.0.155 node3.com
EOF
[root@node1 ~]#scp /etc/hosts 10.0.0.154:/etc/
[root@node1 ~]#scp /etc/hosts 10.0.0.155:/etc/


#三台机器安装mysql
[root@node1 ~]#for i in 10.0.0.{153..155};do ssh root@$i "yum -y install mysql-server";done

#所有主机修改配置文件
[root@node1 ~]#uuidgen
c2c8fe3f-d470-4f3e-b7f7-9783be82a1c9
[root@node1 ~]#vim /etc/my.cnf
[mysqld]
default_authentication_plugin=mysql_native_password
binlog_checksum=NONE
server-id=153
gtid_mode=ON
enforce_gtid_consistency=ON
loose-group_replication_group_name="c2c8fe3f-d470-4f3e-b7f7-9783be82a1c9"
loose-group_replication_start_on_boot=OFF
loose-group_replication_local_address="10.0.0.153:24901"
loose-group_replication_group_seeds="10.0.0.153:24901,10.0.0.154:24901,10.0.0.155:24901"
loose-group_replication_bootstrap_group=OFF
loose-group_replication_recovery_use_ssl=ON

[root@node2 ~]#vim /etc/my.cnf
[mysqld]
default_authentication_plugin=mysql_native_password
binlog_checksum=NONE
server-id=154
gtid_mode=ON
enforce_gtid_consistency=ON
loose-group_replication_group_name="c2c8fe3f-d470-4f3e-b7f7-9783be82a1c9"
loose-group_replication_start_on_boot=OFF
loose-group_replication_local_address="10.0.0.154:24901"
loose-group_replication_group_seeds="10.0.0.153:24901,10.0.0.154:24901,10.0.0.155:24901"
loose-group_replication_bootstrap_group=OFF
loose-group_replication_recovery_use_ssl=ON
[root@node2 ~]#systemctl restart mysqld.service 

[root@node3 ~]#vim /etc/my.cnf
[mysqld]
default_authentication_plugin=mysql_native_password
binlog_checksum=NONE
server-id=155
gtid_mode=ON
enforce_gtid_consistency=ON
loose-group_replication_group_name="c2c8fe3f-d470-4f3e-b7f7-9783be82a1c9"
loose-group_replication_start_on_boot=OFF
loose-group_replication_local_address="10.0.0.155:24901"
loose-group_replication_group_seeds="10.0.0.153:24901,10.0.0.154:24901,10.0.0.155:24901"
loose-group_replication_bootstrap_group=OFF
loose-group_replication_recovery_use_ssl=ON
[root@node3 ~]#systemctl restart mysqld



#所有mysql节点上创建同步账号
SET SQL_LOG_BIN=0;
CREATE USER repluser@'%' IDENTIFIED BY '123456';
GRANT REPLICATION SLAVE ON *.* TO repluser@'%';
FLUSH PRIVILEGES;
SET SQL_LOG_BIN=1;


#在所有主机安装插件
INSTALL PLUGIN group_replication SONAME 'group_replication.so';
SELECT * FROM INFORMATION_SCHEMA.PLUGINS WHERE PLUGIN_NAME = 'group_replication'\G;
show plugins;

#启用第一个节点Primary,node1
SET GLOBAL group_replication_bootstrap_group=ON;
START GROUP_REPLICATION;
SET GLOBAL group_replication_bootstrap_group=OFF;
mysql> SELECT * FROM performance_schema.replication_group_members;



#启用从节点
CHANGE MASTER TO MASTER_USER='repluser', MASTER_PASSWORD='123456' FOR CHANNEL 'group_replication_recovery';
#新版命令代替上面命令
CHANGE REPLICATION SOURCE TO SOURCE_USER='repluser', SOURCE_PASSWORD='123456' FOR CHANNEL 'group_replication_recovery';
START GROUP_REPLICATION;

#单主切换多主模式
#首先观察当前集群的模式和各个节点的读写模式
select * from performance_schema.replication_group_members;
show variables like '%read_only%';
#在任意节点执行切换语句
mysql> select group_replication_switch_to_multi_primary_mode();
+--------------------------------------------------+
| group_replication_switch_to_multi_primary_mode() |
+--------------------------------------------------+
| Mode switched to multi-primary successfully.     |
+--------------------------------------------------+
1 row in set (1.01 sec)

mysql> select * from performance_schema.replication_group_members;
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
| CHANNEL_NAME              | MEMBER_ID                            | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
| group_replication_applier | 378462f7-2d14-11ed-b665-000c29d9d8e1 | node1.com   |        3306 | ONLINE       | PRIMARY     | 8.0.26         |
| group_replication_applier | 7071038a-2d10-11ed-9a3f-000c29bb3772 | node3.com   |        3306 | ONLINE       | PRIMARY     | 8.0.26         |
| group_replication_applier | d4351c59-2d0f-11ed-b959-000c295116b6 | node2.com   |        3306 | ONLINE       | PRIMARY     | 8.0.26         |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
3 rows in set (0.01 sec)

#多主切换回单主
mysql> select group_replication_switch_to_single_primary_mode("7071038a-2d10-11ed-9a3f-000c29bb3772");
+-----------------------------------------------------------------------------------------+
| group_replication_switch_to_single_primary_mode("7071038a-2d10-11ed-9a3f-000c29bb3772") |
+-----------------------------------------------------------------------------------------+
| Mode switched to single-primary successfully.                                           |
+-----------------------------------------------------------------------------------------+
1 row in set (1.01 sec)

mysql> select * from performance_schema.replication_group_members;
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
| CHANNEL_NAME              | MEMBER_ID                            | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
| group_replication_applier | 378462f7-2d14-11ed-b665-000c29d9d8e1 | node1.com   |        3306 | ONLINE       | SECONDARY   | 8.0.26         |
| group_replication_applier | 7071038a-2d10-11ed-9a3f-000c29bb3772 | node3.com   |        3306 | ONLINE       | PRIMARY     | 8.0.26         |
| group_replication_applier | d4351c59-2d0f-11ed-b959-000c295116b6 | node2.com   |        3306 | ONLINE       | SECONDARY   | 8.0.26         |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
3 rows in set (0.00 sec)

#单主切换其他主
mysql> select group_replication_set_as_primary("d4351c59-2d0f-11ed-b959-000c295116b6");
+--------------------------------------------------------------------------+
| group_replication_set_as_primary("d4351c59-2d0f-11ed-b959-000c295116b6") |
+--------------------------------------------------------------------------+
| Primary server switched to: d4351c59-2d0f-11ed-b959-000c295116b6         |
+--------------------------------------------------------------------------+
1 row in set (1.01 sec)

mysql> select * from performance_schema.replication_group_members;
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
| CHANNEL_NAME              | MEMBER_ID                            | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
| group_replication_applier | 378462f7-2d14-11ed-b665-000c29d9d8e1 | node1.com   |        3306 | ONLINE       | SECONDARY   | 8.0.26         |
| group_replication_applier | 7071038a-2d10-11ed-9a3f-000c29bb3772 | node3.com   |        3306 | ONLINE       | SECONDARY   | 8.0.26         |
| group_replication_applier | d4351c59-2d0f-11ed-b959-000c295116b6 | node2.com   |        3306 | ONLINE       | PRIMARY     | 8.0.26         |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
3 rows in set (0.00 sec)