27.1 文件系统基本概念
ext2: 文件系统块组组成:
超级块、GDT、block bitmap、inode bitmap、data blocks
驱动程序:将CPU的控制指令转换为特定设备要执行的操作(一般由硬件厂商提供)
控制器:直接继承的设备
适配器:非继承的
硬盘类型
IDE总线:速度133Mbps
SATA(1,2,3):300Mbps,600Mbps,6Gbps
USB 3.0:480Mbps
SCSI(Small Computer System Interface):有自己的CPU可以分担大CPU的存储负担(目前RAID常用)
SAS:(目前RAID常用)
27.2 RAID
RAID(Redundant Arrays of Inexpensive/Independent Disks )
廉价/独立冗余磁盘阵列(之前叫廉价,后来发现成本不低,就改成了独立)
Berkeley(美国加州大学伯克利分校的一位教授的论文):A case for Redundent Arrays of Inexpensive Disks RAID
提高I/O能力:
磁盘并行读写
提高耐用性:
磁盘冗余来实现
RAID实现的方式:
Hardware RAID
外接式磁盘阵列,通过扩展RAID卡实现外界磁盘阵列;
内接式RAID:主板集成RAID控制器
Software RAID:
RAID控制器:可以控制多个硬盘的控制器
RAID LEVEL
级别:仅代表磁盘组织方式不同,没有上下之分;
RAID10优于RAID01
RAID0: 条带
性能提升: 读,写
冗余能力(容错能力): 无
空间利用率:1*min(S1,S2…)
至少2块盘
RAID1: 镜像
性能表现:写性能略有下降,读性能提升
冗余能力:有
空间利用率:1*min(S1,S2…)
至少2块盘
RAID2
RAID3
RAID4:
校验码:硬件备份和冗余机制
校验码盘容易成为性能瓶颈:解决方案,轮换成为校验码盘
有一块专门的盘存储校验码(异或制,此块盘为性能瓶颈)
性能表现:读,写提升
冗余能力:有
空间利用率:(N-1)*min(S1,S2,…)
至少需要3块
RAID5:
性能表现:读,写提升
冗余能力:有(1块盘)
空间利用率:(N-1)*min(S1,S2,…)
至少需要3块
RAID6:
性能表现:读,写提升
冗余能力:有(2块盘)
空间利用率:(N-2)*min(S1,S2,…)
至少需要3块
RAID10:先作镜像再做条带(相对更可靠)
性能表现:读、写提升
冗余能力:有(每组镜像最多只能坏一块)
空间利用率:N*min(S1,S2,…)/2
至少需要4块

RAID01:先做条带再做镜像,
性能表现:读、写提升
冗余能力:有
空间利用率:N*min(S1,S2,…)/2
至少需要4块

RAID50:(先做RAID5,再做RAID0)
性能表现:读、写提升
冗余能力:有
空间利用率:(N-2)/N*min(S1,S2,…)/2
至少需要6块
RAID7
jbod:将多个小盘组成一个大盘
性能表现:无提升
冗余能力:无
空间利用率:100%(SUM(S1,S2…..))
至少需要2块
常用级别:RAID0,RAID1,RAID5,RAID10,RAID50,JBOD
27.3 RAID实现
27.3.1 硬件RAID
在主板上有个RAID控制器,通过BIOS进行控制更改
27.3.2 软件RAID
通过内核中的md模块来手动配置RAID
md:multi disks/devices
软RAID设备必须被标识为fd类型,避免系统崩溃以后RAID设备无法再访问
/proc/mdstat:显示当前系统所有启动的RAID
逻辑RIAD:
/dev/md0
/dev/md1
mdadm(md管理器):
将任何块设备做成RAID(同一块硬盘两个分区也可以做,但没有性能提升的意义)
支持的RAID级别:LINEAR,RAID0,RAID1,RAID4,RAID5,RAID6,RAID10等
模式化的命令:
创建模式
-C
专用选项:
-l: 指明要创建的RAID级别
-n #: 设备个数
-a {yes|no}: 是否自动为其创建设备文件
-c: 指定CHUNK(条带)大小, 2^n,默认为64K
-x #: 指定空闲盘个数
管理模式
-a:添加磁盘
-r:移除磁盘
-f:标记指定磁盘为损坏
--add, --remove, --fail
mdadm /dev/md# --fail /dev/sda7
监控模式
-F
增长模式
-G
装配模式
-A
查看RAID阵列的详细信息
-D /dev/md#
--detail
--scan > /etc/mdadm.conf(将当前RAID信息保存至配置文件,以便以后进行装配:)
停止阵列:
-S /dev/md#
--stop
例:创建一个10G可用空间的3块盘
[root@localhost ~]# mdadm -C /dev/md0 -a yes -n 3 -l 5 /dev/sda1 /dev/sda2 /dev/sda3
mdadm: Fail to create md0 when using /sys/module/md_mod/parameters/new_array, fallback to creation via node
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
[root@localhost ~]# cat /proc/mdstat
Personalities : [raid6] [raid5] [raid4]
md0 : active raid5 sda3[3] sda2[1] sda1[0]
10475520 blocks super 1.2 level 5, 512k chunk, algorithm 2 [3/2] [UU_]
[==>..................] recovery = 10.6% (556540/5237760) finish=0.7min speed=111308K/sec
unused devices: <none>
[root@localhost ~]# mkdir /test
[root@localhost ~]# mount /dev/md0 /test
[root@localhost ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/centos-root 50G 2.6G 48G 6% /
devtmpfs 3.9G 0 3.9G 0% /dev
tmpfs 3.9G 0 3.9G 0% /dev/shm
tmpfs 3.9G 8.7M 3.9G 1% /run
tmpfs 3.9G 0 3.9G 0% /sys/fs/cgroup
/dev/sdb1 1014M 143M 872M 15% /boot
/dev/mapper/centos-home 42G 33M 42G 1% /home
tmpfs 781M 0 781M 0% /run/user/0
/dev/md0 9.8G 37M 9.2G 1% /test
[root@localhost ~]# mke2fs -t ext4 /etc/md0
mke2fs 1.42.9 (28-Dec-2013)
Could not stat /etc/md0 --- No such file or directory
The device apparently does not exist; did you specify it correctly?
[root@localhost ~]# mke2fs -t ext4 /dev/md0
mke2fs 1.42.9 (28-Dec-2013)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=128 blocks, Stripe width=256 blocks
655360 inodes, 2618880 blocks
130944 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=2151677952
80 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632
Allocating group tables: done
Writing inode tables: done
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done
[root@localhost ~]# mkdir /test
[root@localhost ~]# mount /dev/md0 /test
[root@localhost ~]# mount
sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime,seclabel)
proc on /proc type proc (rw,nosuid,nodev,noexec,relatime)
devtmpfs on /dev type devtmpfs (rw,nosuid,seclabel,size=3985976k,nr_inodes=996494,mode=755)
securityfs on /sys/kernel/security type securityfs (rw,nosuid,nodev,noexec,relatime)
tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev,seclabel)
devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,seclabel,gid=5,mode=620,ptmxmode=000)
tmpfs on /run type tmpfs (rw,nosuid,nodev,seclabel,mode=755)
tmpfs on /sys/fs/cgroup type tmpfs (ro,nosuid,nodev,noexec,seclabel,mode=755)
cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd)
pstore on /sys/fs/pstore type pstore (rw,nosuid,nodev,noexec,relatime)
cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer)
cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpuacct,cpu)
cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset)
cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices)
cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids)
cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_prio,net_cls)
cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event)
cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio)
cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb)
cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory)
configfs on /sys/kernel/config type configfs (rw,relatime)
/dev/mapper/centos-root on / type xfs (rw,relatime,seclabel,attr2,inode64,noquota)
selinuxfs on /sys/fs/selinux type selinuxfs (rw,relatime)
systemd-1 on /proc/sys/fs/binfmt_misc type autofs (rw,relatime,fd=30,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=12878)
debugfs on /sys/kernel/debug type debugfs (rw,relatime)
mqueue on /dev/mqueue type mqueue (rw,relatime,seclabel)
hugetlbfs on /dev/hugepages type hugetlbfs (rw,relatime,seclabel)
/dev/sdb1 on /boot type xfs (rw,relatime,seclabel,attr2,inode64,noquota)
/dev/mapper/centos-home on /home type xfs (rw,relatime,seclabel,attr2,inode64,noquota)
tmpfs on /run/user/0 type tmpfs (rw,nosuid,nodev,relatime,seclabel,size=799408k,mode=700)
/dev/md0 on /test type ext4 (rw,relatime,seclabel,stripe=256,data=ordered)
[root@localhost ~]# blkid /dev/md0
/dev/md0: UUID="827112e4-bd97-4394-b812-1394dcd10e71" TYPE="ext4"
[root@localhost ~]# mdadm -D /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Mon Jul 26 04:26:58 2021
Raid Level : raid5
Array Size : 10475520 (9.99 GiB 10.73 GB)
Used Dev Size : 5237760 (5.00 GiB 5.36 GB)
Raid Devices : 3
Total Devices : 3
Persistence : Superblock is persistent
Update Time : Mon Jul 26 04:32:45 2021
State : clean
Active Devices : 3
Working Devices : 3
Failed Devices : 0
Spare Devices : 0
Layout : left-symmetric
Chunk Size : 512K
Consistency Policy : resync
Name : localhost.localdomain:0 (local to host localhost.localdomain)
UUID : 5fb851eb:b930b950:288b80e1:e384eb79
Events : 18
Number Major Minor RaidDevice State
0 8 1 0 active sync /dev/sda1
1 8 2 1 active sync /dev/sda2
3 8 3 2 active sync /dev/sda3
[root@localhost ~]# mdadm /dev/md0 -f /dev/sda1
mdadm: set /dev/sda1 faulty in /dev/md0
[root@localhost ~]# mdadm -D /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Mon Jul 26 04:26:58 2021
Raid Level : raid5
Array Size : 10475520 (9.99 GiB 10.73 GB)
Used Dev Size : 5237760 (5.00 GiB 5.36 GB)
Raid Devices : 3
Total Devices : 3
Persistence : Superblock is persistent
Update Time : Mon Jul 26 04:35:01 2021
State : clean, degraded
Active Devices : 2
Working Devices : 2
Failed Devices : 1
Spare Devices : 0
Layout : left-symmetric
Chunk Size : 512K
Consistency Policy : resync
Name : localhost.localdomain:0 (local to host localhost.localdomain)
UUID : 5fb851eb:b930b950:288b80e1:e384eb79
Events : 20
Number Major Minor RaidDevice State
- 0 0 0 removed
1 8 2 1 active sync /dev/sda2
3 8 3 2 active sync /dev/sda3
0 8 1 - faulty /dev/sda1
[root@localhost ~]# mdadm /dev/md0 -f /dev/sda2
mdadm: set /dev/sda2 faulty in /dev/md0
[root@localhost ~]# mdadm -D /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Mon Jul 26 04:26:58 2021
Raid Level : raid5
Array Size : 10475520 (9.99 GiB 10.73 GB)
Used Dev Size : 5237760 (5.00 GiB 5.36 GB)
Raid Devices : 3
Total Devices : 3
Persistence : Superblock is persistent
Update Time : Mon Jul 26 04:35:40 2021
State : clean, FAILED
Active Devices : 1
Working Devices : 1
Failed Devices : 2
Spare Devices : 0
Layout : left-symmetric
Chunk Size : 512K
Consistency Policy : resync
Name : localhost.localdomain:0 (local to host localhost.localdomain)
UUID : 5fb851eb:b930b950:288b80e1:e384eb79
Events : 22
Number Major Minor RaidDevice State
- 0 0 0 removed
- 0 0 1 removed
3 8 3 2 active sync /dev/sda3
0 8 1 - faulty /dev/sda1
1 8 2 - faulty /dev/sda2
[root@localhost ~]# mdadm /dev/md0 -a /dev/sda1
mdadm: Cannot open /dev/sda1: Device or resource busy
[root@localhost ~]#
[root@localhost ~]# mdadm /dev/md0 -a /dev/sda2
mdadm: Cannot open /dev/sda2: Device or resource busy
[root@localhost ~]# mdadm /dev/md0 -r /dev/sda2
mdadm: hot removed /dev/sda2 from /dev/md0
[root@localhost ~]# mdadm /dev/md0 -r /dev/sda1
mdadm: hot removed /dev/sda1 from /dev/md0
[root@localhost ~]# mdadm -D /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Mon Jul 26 04:26:58 2021
Raid Level : raid5
Array Size : 10475520 (9.99 GiB 10.73 GB)
Used Dev Size : 5237760 (5.00 GiB 5.36 GB)
Raid Devices : 3
Total Devices : 1
Persistence : Superblock is persistent
Update Time : Mon Jul 26 04:37:12 2021
State : clean, FAILED
Active Devices : 1
Working Devices : 1
Failed Devices : 0
Spare Devices : 0
Layout : left-symmetric
Chunk Size : 512K
Consistency Policy : resync
Name : localhost.localdomain:0 (local to host localhost.localdomain)
UUID : 5fb851eb:b930b950:288b80e1:e384eb79
Events : 24
Number Major Minor RaidDevice State
- 0 0 0 removed
- 0 0 1 removed
3 8 3 2 active sync /dev/sda3
[root@localhost ~]# mdadm -S /dev/md0
mdadm: Cannot get exclusive access to /dev/md0:Perhaps a running process, mounted filesystem or active volume group?
[root@localhost ~]# unmount /dev/md0
-bash: unmount: command not found
[root@localhost ~]# umount /dev/md0
[root@localhost ~]# mdadm -S /dev/md0
mdadm: stopped /dev/md0
[root@localhost ~]# cat /proc/mdstat
Personalities : [raid6] [raid5] [raid4]
unused devices: <none>
[root@localhost ~]#
watch: 周期性地执行指定命令,并以全屏方式显示结果
-n #:指定周期长度,单位为秒,默认为2
格式: watch -n # 'COMMAND'
评论关闭。