1新添加一块大小为10G的磁盘
2.进行分区
[root@base ~]# echo "- - -" >> /sys/class/scsi_host/host0/scan [root@base ~]# echo "- - -" >> /sys/class/scsi_host/host1/scan [root@base ~]# echo "- - -" >> /sys/class/scsi_host/host2/scan [root@base ~]# fdisk -l ... Disk /dev/sdb: 10.7 GB, 10737418240 bytes, 20971520 sectors Units = sectors of 1 * 512 = 512 bytes ... [root@base ~]# gdisk /dev/sdb ... Number Start (sector) End (sector) Size Code Name 1 2048 2099199 1024.0 MiB FD00 Linux RAID 2 2099200 4196351 1024.0 MiB FD00 Linux RAID [root@base ~]# partprobe /dev/sdb [root@base ~]# cat /proc/partitions major minor #blocks name ... 8 17 1048576 sdb1 8 18 1048576 sdb2 ————————————————
3.创建raid0
[root@base ~]# mdadm -C /dev/md0 -a yes -l 0 -n 2 /dev/sdb{1,2}
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
4.查看启用的raid设备
[root@base ~]# cat /proc/mdstat
Personalities : [raid0]
md0 : active raid0 sdb2[1] sdb1[0]
2093056 blocks super 1.2 512k chunks
unused devices:
————————————————
5.硬盘格式化
[root@base ~]# mkfs.xfs /dev/md0
meta-data=/dev/md0 isize=512 agcount=8, agsize=65408 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=523264, imaxpct=25
= sunit=128 swidth=256 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
————————————————
6.挂载
[root@base ~]# mkdir /mnt/raid0 [root@base ~]# mount /dev/md0 /mnt/raid0 [root@base ~]# df -hT /mnt/raid0 文件系统 类型 容量 已用 可用 已用% 挂载点 /dev/md0 xfs 2.0G 33M 2.0G 2% /mnt/raid0 ————————————————
7.编辑/etc/fstab文件
[root@base ~]# vim /etc/fstab /dev/md0 /mnt/raid0 xfs defaults 0 0
8.创建一个2G的raid1
[root@base ~]# gdisk /dev/sdb ... Number Start (sector) End (sector) Size Code Name 1 2048 2099199 1024.0 MiB FD00 Linux RAID 2 2099200 4196351 1024.0 MiB FD00 Linux RAID 3 4196352 8390655 2.0 GiB FD00 Linux RAID 4 8390656 12584959 2.0 GiB FD00 Linux RAID [root@base ~]# partprobe /dev/sdb [root@base ~]# cat /proc/partitons ... 8 17 1048576 sdb1 8 18 1048576 sdb2 8 19 2097152 sdb3 8 20 2097152 sdb4 ... ————————————————
9.直接创建
[root@base ~]# mdadm -C /dev/md1 -a yes -l 1 -n 2 /dev/sdb{3,4}
mdadm: Note: this array has metadata at the start and
may not be suitable as a boot device. If you plan to
store '/boot' on this device please ensure that
your boot-loader understands md/v1.x metadata, or use
--metadata=0.90
Continue creating array? y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md1 started.
[root@base ~]# cat /proc/mdstat
Personalities : [raid0] [raid1]
md1 : active raid1 sdb4[1] sdb3[0]
2094080 blocks super 1.2 [2/2] [UU]
[==>..................] resync = 14.5%
(305536/2094080) finish=0.2min speed=101845K/sec
md0 : active raid0 sdb2[1] sdb1[0]
2093056 blocks super 1.2 512k chunks
unused devices:
[root@base ~]# mkdir /mnt/Raid1
[root@base ~]# mkfs.xfs /dev/md1
meta-data=/dev/md1 isize=512 agcount=4, agsize=130880 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=523520, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@base ~]# mount /dev/md1 /mnt/Raid1
[root@base ~]# df -hT /mnt/Raid1
文件系统 类型 容量 已用 可用 已用% 挂载点
/dev/md1 xfs 2.0G 33M 2.0G 2% /mnt/Raid1
————————————————
10.查看raid1的相关信息
[root@base Raid1]# mdadm -D /dev/md1
/dev/md1:
Version : 1.2
Creation Time : Mon Jan 18 11:04:03 2021
Raid Level : raid1
Array Size : 2094080 (2045.00 MiB 2144.34 MB)
Used Dev Size : 2094080 (2045.00 MiB 2144.34 MB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Update Time : Mon Jan 18 11:07:22 2021
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0
Consistency Policy : resync
Name : localhost.localdomain:1 (local to host localhost.localdomain)
UUID : 21a27a2d:d4210bb8:faa229da:09c63a7e
Events : 17
Number Major Minor RaidDevice State
0 8 19 0 active sync /dev/sdb3
1 8 20 1 active sync /dev/sdb4
————————————————
11.模拟损坏一块磁盘sdb4并查看raid1的相关信息
[root@base Raid1]# mdadm /dev/md1 -f /dev/sdb4
mdadm: set /dev/sdb4 faulty in /dev/md1
[root@base Raid1]# mdadm -D /dev/md1
/dev/md1:
Version : 1.2
Creation Time : Mon Jan 18 11:04:03 2021
Raid Level : raid1
Array Size : 2094080 (2045.00 MiB 2144.34 MB)
Used Dev Size : 2094080 (2045.00 MiB 2144.34 MB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Update Time : Mon Jan 18 11:09:22 2021
State : active, degraded
Active Devices : 1
Working Devices : 1
Failed Devices : 1
Spare Devices : 0
Consistency Policy : resync
Name : localhost.localdomain:1 (local to host localhost.localdomain)
UUID : 21a27a2d:d4210bb8:faa229da:09c63a7e
Events : 20
Number Major Minor RaidDevice State
0 8 19 0 active sync /dev/sdb3
- 0 0 1 removed
1 8 20 - faulty /dev/sdb4
————————————————
12.磁盘损坏把他拔掉
[root@base Raid1]# mdadm /dev/md1 -r /dev/sdb4
mdadm: hot removed /dev/sdb4 from /dev/md1
[root@base Raid1]# mdadm -D /dev/md1
/dev/md1:
Version : 1.2
Creation Time : Mon Jan 18 11:04:03 2021
Raid Level : raid1
Array Size : 2094080 (2045.00 MiB 2144.34 MB)
Used Dev Size : 2094080 (2045.00 MiB 2144.34 MB)
Raid Devices : 2
Total Devices : 1
Persistence : Superblock is persistent
Update Time : Mon Jan 18 11:10:41 2021
State : clean, degraded
Active Devices : 1
Working Devices : 1
Failed Devices : 0
Spare Devices : 0
Consistency Policy : resync
Name : localhost.localdomain:1 (local to host localhost.localdomain)
UUID : 21a27a2d:d4210bb8:faa229da:09c63a7e
Events : 22
Number Major Minor RaidDevice State
0 8 19 0 active sync /dev/sdb3
- 0 0 1 removed
————————————————
13.在raid1再创建一个分区
[root@base Raid1]# gdisk /dev/sdb [root@base Raid1]# partprobe /dev/sdb [root@base Raid1]# cat /proc/partitions
14.把他进行格式化
[root@base Raid1]# mkfs.xfs /dev/sdb5
meta-data=/dev/sdb5 isize=512 agcount=4, agsize=131072 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=524288, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
————————————————
15.通过管理命令里的-a添加
[root@base Raid1]# mdadm /dev/md1 -a /dev/sdb5
mdadm: added /dev/sdb5
[root@base Raid1]# mdadm -D /dev/md1
/dev/md1:
Version : 1.2
Creation Time : Mon Jan 18 11:04:03 2021
Raid Level : raid1
Array Size : 2094080 (2045.00 MiB 2144.34 MB)
Used Dev Size : 2094080 (2045.00 MiB 2144.34 MB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Update Time : Mon Jan 18 11:14:02 2021
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0
Consistency Policy : resync
Name : localhost.localdomain:1 (local to host localhost.localdomain)
UUID : 21a27a2d:d4210bb8:faa229da:09c63a7e
Events : 41
Number Major Minor RaidDevice State
0 8 19 0 active sync /dev/sdb3
2 8 21 1 active sync /dev/sdb5
————————————————
16.如果不想用raid1阵列了,可以停止这个阵列,不过停止前需要先反挂载一下。需要注意的是当进行反挂载时,数据会丢失,所以要提前做好数据迁移
[root@localhost ~]# umount /dev/md1
[root@localhost ~]# mdadm -S /dev/md1
mdadm: stopped /dev/md1
[root@localhost ~]# cat /proc/mdstat
Personalities : [raid0] [raid1]
md0 : active raid0 sdb2[1] sdb1[0]
2093056 blocks super 1.2 512k chunks
unused devices:
————————————————
17.添加刚才损坏的磁盘
[root@base Raid1]# mdadm -A /dev/md1 /dev/sdb{3,4}
[root@base Raid1]# cat /proc/mdstat
18.手动添加
[root@base Raid1]# mdadm -AR /dev/md1 /dev/sdb{3,4}
[root@base Raid1]# cat /proc/mdstat
[root@base Raid1]# mdadm -D /dev/md1
[root@base Raid1]# mdadm /dev/md1 -a /dev/sdb4 mdadm: added /dev/sdb4 [root@base Raid1]# mdadm -D /dev/md1 [root@base Raid1]# cat /proc/mdstat ————————————————
19.使用命令模式-a选项直接添加
[root@base Raid1]# mdadm /dev/md1 -a /dev/sdb5 mdadm: added /dev/sdb5 [root@base Raid1]# mdadm -D /dev/md1
20.我们再模拟一块磁盘损坏,看sdb5是否会补上空缺
[root@base Raid1]# mdadm /dev/md1 -f /dev/sdb3 mdadm: set /dev/sdb3 faulty in /dev/md1 [root@base Raid1]# mdadm -D /dev/md1
21.自动识别
[root@base Raid1]# mdadm -D --scan [root@base Raid1]# mdadm -D --scan > /etc/mdadm.conf [root@base Raid1]# mdadm -S /dev/md1 [root@base Raid1]# cat /proc/mdstat [root@base Raid1]# mdadm -A /dev/md1 [root@base Raid1]# cat /proc/mdstat ————————————————



