例子4:实现raid5
[root@nginx ~]# mdadm --create /dev/md0 --level=5 --raid-devices=4 /dev/sd{b,c,d,e}
mdadm: /dev/sdb appears to contain an ext2fs file system
size=2097152K mtime=Wed Dec 31 16:00:00 1969
mdadm: /dev/sdc appears to contain an ext2fs file system
size=2097152K mtime=Wed Dec 31 16:00:00 1969
mdadm: /dev/sdd appears to contain an ext2fs file system
size=2097152K mtime=Wed Dec 31 16:00:00 1969
mdadm: /dev/sde appears to contain an ext2fs file system
size=2097152K mtime=Wed Dec 31 16:00:00 1969
Continue creating array? y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
# watch "cat /proc/mdstat"
Every 2.0s: cat /proc/mdstat Tue Nov 15 15:41:46 2011
Personalities : [raid0] [raid1] [raid6] [raid5] [raid4]
md0 : active raid5 sda9[3] sda8[1] sda7[0]
4016000 blocks level 5, 64k chunk, algorithm 2 [3/2] [UU_]
[=====>...............] recovery = 26.2% (527572/2008000) finish
=1.4min speed=17018K/sec
等待完成
模拟raid5原始设备出故障,然后手工移出故障设备,用好的设备替换它
[root@nginx ~]# mdadm /dev/md0 --fail /dev/sdc
mdadm: set /dev/sdc faulty in /dev/md0
[root@nginx ~]# cat /proc/mdstat
Personalities : [raid6] [raid5] [raid4]
md0 : active raid5 sde[4] sdd[2] sdc[1](F) sdb[0]
6286848 blocks super 1.2 level 5, 512k chunk, algorithm 2 [4/3] [U_UU]
unused devices: <none>
里面的数据依然可以访问
移走故障设备:
[root@nginx ~]# mdadm /dev/md0 --remove /dev/sdc
mdadm: hot removed /dev/sdc
[root@nginx ~]# cat /proc/mdstat
Personalities : [raid6] [raid5] [raid4]
md0 : active raid5 sde[4] sdd[2] sdb[0]
6286848 blocks super 1.2 level 5, 512k chunk, algorithm 2 [4/3] [U_UU]
unused devices: <none
用新设备替换回去进行数据恢复
[root@nginx ~]# mdadm /dev/md0 --add /dev/sdc
mdadm: added /dev/sdc
[root@nginx ~]# watch "cat /proc/mdstat"
Personalities : [raid6] [raid5] [raid4]
md0 : active raid5 sde[4] sdd[2] sdc[1] sdb[0]
6286848 blocks super 1.2 level 5, 512k chunk, algorithm 2 [4/4] [UUUU]
===================================
lvm
pv 物理卷带有逻辑卷元数据的物理设备:分区、硬盘、镜像文件、raid设备等
vg 卷组由多个物理卷组成,容量是所有物理卷的累计,提供容量的存储池
lv 逻辑卷真正操作的对象,对它进行格式化、挂载
盘碟硬盘分区
pv\/lv
vg
pv/\lv
动态调整容量、条带化、镜像、快照
例子1:创建逻辑卷
1、pv
# pvcreate /dev/sda7 /dev/sda8
# pvs
PV VG Fmt Attr PSize PFree
/dev/sda7 lvm2 -- 1.92G 1.92G
/dev/sda8 lvm2 -- 1.92G 1.92G
# pvdisplay
2、vg
# vgcreate mysql-vg /dev/sda7
Volume group "mysql-vg" successfully created
# vgs
VG #PV #LV #SN Attr VSize VFree
mysql-vg 1 0 0 wz--n- 1.91G 1.91G
# vgdisplay
# vgextend mysql-vg /dev/sda8 《---增加pv到vg中,进行扩容
Volume group "mysql-vg" successfully extended
# vgs
VG #PV #LV #SN Attr VSize VFree
mysql-vg 2 0 0 wz--n- 3.83G 3.83G
3、lv
# lvcreate -n mysql-lv -L 1G mysql-vg
Logical volume "mysql-lv" created
# lvs
LV VG Attr LSize Origin Snap% Move Log Copy% Convert
mysql-lv mysql-vg -wi-a- 1.00G
# lvdisplay
# ll /dev/mysql-vg/mysql-lv
lrwxrwxrwx 1 root root 31 11-15 16:27 /dev/mysql-vg/mysql-lv -> /dev/mapper/mysql--vg-mysql--lv
# mkfs.ext3 /dev/mysql-vg/mysql-lv
例子2:如何在线调整lv的容量
一. 增加容量
#vgcreate mysql-vg /dev/sda7
#lvcreate -n mysql-lv -L 1G mysql-vg
#mkfs.ext3 /dev/mysql-vg/mysql-lv
# mount /dev/mysql-vg/mysql-lv /mnt
# lvextend -L 2G /dev/mysql-vg/mysql-lv 《---逻辑卷是正在挂载使用
# resize2fs /dev/mysql-vg/mysql-lv
# df -h
文件系统 容量 已用 可用 已用% 挂载点
/dev/sda2 39G 27G 11G 73% /
/dev/sda1 190M 169M 12M 94% /boot
tmpfs 1009M 0 1009M 0% /dev/shm
/dev/sda5 94G 61G 29G 68% /vmware
/dev/sda6 46G 23G 21G 53% /soft
/dev/mapper/mysql--vg-mysql--lv
2.0G 34M 1.9G 2% /mnt