service clvmd start
Activating VG(s): No volume groups found [ OK ]
node01节点上:
# pvcreate /dev/sdc1
Physical volume "/dev/sdc1" successfully created
# pvs
PV VG Fmt Attr PSize PFree
/dev/sda2 vg_node01 lvm2 a-- 39.51g 0
/dev/sdc1 lvm2 a-- 156.25g 156.25g
# vgcreate gfsvg /dev/sdc1
Clustered volume group "gfsvg" successfully created
# lvcreate -l +100%FREE -n data gfsvg
Logical volume "data" created
node02节点上:
# /etc/init.d/clvmd start
node01节点上:
[root@node01 ~]# mkfs.gfs2 -p lock_dlm -t gfs:gfs2 -j 2 /dev/gfsvg/data
This will destroy any data on /dev/gfsvg/data.
It appears to contain: symbolic link to `../dm-2'
Are you sure you want to proceed? [y/n] y
Device: /dev/gfsvg/data
Blocksize: 4096
Device Size 156.25 GB (40958976 blocks)
Filesystem Size: 156.25 GB (40958975 blocks)
Journals: 2
Resource Groups: 625
Locking Protocol: "lock_dlm"
Lock Table: "gfs:gfs2"
UUID: e28655c6-29e6-b813-138f-0b22d3b15321
说明:
gfs:gfs2这个gfs就是集群的名字,gfs2是定义的名字,相当于标签。
-j是指定挂载这个文件系统的主机个数,不指定默认为1即为管理节点的。
这里实验有两个节点
node01,node02 上创建GFS挂载点
# mkdir /vmdata
(1)node01,node02手动挂载测试,挂载成功后,创建文件测试集群文件系统情况。
# mount.gfs2 /dev/gfsvg/data /vmdata
(2)配置开机自动挂载
# vi /etc/fstab
/dev/gfsvg/data /vmdata gfs2 defaults 0 0
[root@node01 vmdata]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/vg_node01-lv_root 36G 3.8G 30G 12% /
tmpfs 1.9G 32M 1.9G 2% /dev/shm
/dev/sda1 485M 39M 421M 9% /boot
/dev/gfsvg/data 157G 259M 156G 1% /vmdata
说明:
#表决磁盘是共享磁盘,无需要太大,本例采用/dev/sdc1 100MB来进行创建。
[root@node01 ~]# fdisk -l
Disk /dev/sdb: 134 MB, 134217728 bytes
5 heads, 52 sectors/track, 1008 cylinders
Units = cylinders of 260 * 512 = 133120 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x80cdfae9
Device Boot Start End Blocks Id System
/dev/sdb1 1 1008 131014 83 Linux
(1) 创建表决磁盘
[root@node01 ~]# mkqdisk -c /dev/sdb1 -l myqdisk
mkqdisk v0.6.0
Writing new quorum disk label 'myqdisk' to /dev/sdc1.
WARNING: About to destroy all data on /dev/sdc1; proceed [N/y] ? y
Initializing status block for node 1...
Initializing status block for node 2...
Initializing status block for node 3...
Initializing status block for node 4...
Initializing status block for node 5...
Initializing status block for node 6...
Initializing status block for node 7...
Initializing status block for node 8...
Initializing status block for node 9...
Initializing status block for node 10...
Initializing status block for node 11...
Initializing status block for node 12...
Initializing status block for node 13...
Initializing status block for node 14...
Initializing status block for node 15...
Initializing status block for node 16...
(2) 查看表决磁盘信息
[root@node01 ~]# mkqdisk -L
mkqdisk v3.0.12.1