1、恢复环境
(1)、删除/etc/fstab配置文件中自动挂载记录
删除的内容如下:
/dev/md0p1 /mnt/md0 xfs defaults 0 0
(2)、解除挂载
# umount /mnt/md0
(3)、停止md0的运行
# mdadm -S /dev/md0
[root@localhost ~]# mdadm -S /dev/md0 mdadm: stopped /dev/md0
查看磁盘信息
# lsblk
[root@localhost ~]# lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sda 8:0 0 50G 0 disk ├─sda1 8:1 0 500M 0 part /boot └─sda2 8:2 0 49G 0 part ├─centos-root 253:0 0 41G 0 lvm / └─centos-swap 253:1 0 8G 0 lvm [SWAP] sdb 8:16 0 10G 0 disk └─sdb1 8:17 0 10G 0 part sdc 8:32 0 10G 0 disk └─sdc1 8:33 0 10G 0 part sr0 11:0 1 4.2G 0 rom
(4)、删除RAID 0
清除组件设备sdb1中超级块的信息
# mdadm --zero-superblock /dev/sdb1
清除组件设备sdc1中超级块的信息
# mdadm --zero-superblock /dev/sdc1
2、创建RAID 1
创建一个md1,指定设备数为2个,分别为sdb1、sdc1
# mdadm -C -v /dev/md1 -l 1 -n 2 /dev/sdb1 /dev/sdc1
[root@localhost ~]# mdadm -C -v /dev/md1 -l 1 -n 2 /dev/sdb1 /dev/sdc1 mdadm: Note: this array has metadata at the start and may not be suitable as a boot device. If you plan to store '/boot' on this device please ensure that your boot-loader understands md/v1.x metadata, or use --metadata=0.90 mdadm: size set to 10475520K Continue creating array? y mdadm: Fail to create md1 when using /sys/module/md_mod/parameters/new_array, fallback to creation via node mdadm: Defaulting to version 1.2 metadata mdadm: array /dev/md1 started.
查看结果
# lsblk
[root@localhost ~]# lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sda 8:0 0 50G 0 disk ├─sda1 8:1 0 500M 0 part /boot └─sda2 8:2 0 49G 0 part ├─centos-root 253:0 0 41G 0 lvm / └─centos-swap 253:1 0 8G 0 lvm [SWAP] sdb 8:16 0 10G 0 disk └─sdb1 8:17 0 10G 0 part └─md1 9:1 0 10G 0 raid1 └─md1p1 259:0 0 10G 0 md sdc 8:32 0 10G 0 disk └─sdc1 8:33 0 10G 0 part └─md1 9:1 0 10G 0 raid1 └─md1p1 259:0 0 10G 0 md sr0 11:0 1 4.2G 0 rom
3、查看/dev/md1 信息
# mdadm -D /dev/md1
[root@localhost ~]# mdadm -D /dev/md1 /dev/md1: Version : 1.2 Creation Time : Sat Aug 7 23:37:19 2021 Raid Level : raid1 Array Size : 10475520 (9.99 GiB 10.73 GB) Used Dev Size : 10475520 (9.99 GiB 10.73 GB) Raid Devices : 2 Total Devices : 2 Persistence : Superblock is persistent Update Time : Sat Aug 7 23:38:11 2021 State : clean Active Devices : 2 Working Devices : 2 Failed Devices : 0 Spare Devices : 0 Consistency Policy : unknown Name : localhost.localdomain:1 (local to host localhost.localdomain) UUID : e79afe8f:78e892b6:9ee993ca:7a742a92 Events : 17 Number Major Minor RaidDevice State 0 8 17 0 active sync /dev/sdb1 1 8 33 1 active sync /dev/sdc1
4、格式化
# mkfs.xfs /dev/md1p1
[root@localhost ~]# mkfs.xfs /dev/md1p1 meta-data=/dev/md1p1 isize=256 agcount=4, agsize=654656 blks = sectsz=512 attr=2, projid32bit=1 = crc=0 finobt=0 data = bsize=4096 blocks=2618624, imaxpct=25 = sunit=0 swidth=0 blks naming =version 2 bsize=4096 ascii-ci=0 ftype=0 log =internal log bsize=4096 blocks=2560, version=2 = sectsz=512 sunit=0 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0
5、挂载
创建一个挂载目录
# mkdir /mnt/md1
挂载
# mount /dev/md1p1 /mnt/md1
6、查看挂载信息
# df -Th
[root@localhost ~]# df -Th 文件系统 类型 容量 已用 可用 已用% 挂载点 /dev/mapper/centos-root xfs 41G 1.5G 40G 4% / devtmpfs devtmpfs 1.9G 0 1.9G 0% /dev tmpfs tmpfs 1.9G 0 1.9G 0% /dev/shm tmpfs tmpfs 1.9G 8.6M 1.9G 1% /run tmpfs tmpfs 1.9G 0 1.9G 0% /sys/fs/cgroup /dev/sda1 xfs 497M 150M 348M 31% /boot tmpfs tmpfs 378M 0 378M 0% /run/user/0 /dev/md1p1 xfs 10G 33M 10G 1% /mnt/md1
7、查看内存中md驱动的状态
# cat /proc/mdstat
[root@localhost ~]# cat /proc/mdstat Personalities : [raid1] md1 : active raid1 sdc1[1] sdb1[0] 10475520 blocks super 1.2 [2/2] [UU] unused devices: <none>