因此,对于前述的vip和httpd可能会运行于不同节点的问题,可以通过以下命令来解决:
crm(live)configure# colocation httpd-with-ip INFUNTY: httpd vip
crm(live)configure# show
node node1.test.com
node node2.test.com \
attributes standby="off"
primitive httpd lsb:httpd
primitive vip ocf:heartbeat:IPaddr \
params ip="192.168.18.200" nic="eth0" cidr_netmask="24"
colocation httpd-with-ip INFUNTY: httpd vip
property $id="cib-bootstrap-options" \
dc-version="1.1.8-7.el6-394e906" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2" \
stonith-enabled="false" \
no-quorum-policy="ignore" \
last-lrm-refresh="1376553277"
rsc_defaults $id="rsc-options" \
resource-stickiness="100"
crm(live)configure# show xml
<rsc_colocation score-attribute="INFUNTY" rsc="httpd" with-rsc="vip"/>
[root@node2 ~]# crm_mon
Last updated: Thu Aug 15 16:12:18 2013
Last change: Thu Aug 15 16:12:05 2013 via cibadmin on node1.test.com
Stack: classic openais (with plugin)
Current DC: node2.test.com - partition with quorum
Version: 1.1.8-7.el6-394e906
2 Nodes configured, 2 expected votes
2 Resources configured.
Online: [ node1.test.com node2.test.com ]
vip (ocf::heartbeat:IPaddr): Started node1.test.com
httpd (lsb:httpd): Started node1.test.com
大家可以看到,所有资源全部运行在node1上,下面我们来测试访问一下
模拟一下故障,再进行测试
crm(live)# node
crm(live)node# standby
crm(live)node# show
node1.test.com: normal
standby: on
node2.test.com: normal
standby: off
[root@node2 ~]# crm_mon
Last updated: Thu Aug 15 16:14:33 2013
Last change: Thu Aug 15 16:14:23 2013 via crm_attribute on node1.test.com
Stack: classic openais (with plugin)
Current DC: node2.test.com - partition with quorum
Version: 1.1.8-7.el6-394e906
2 Nodes configured, 2 expected votes
2 Resources configured.
Node node1.test.com: standby
Online: [ node2.test.com ]
vip (ocf::heartbeat:IPaddr): Started node2.test.com
httpd (lsb:httpd): Started node2.test.com
大家可以看到,资源全部移动到node2上了,再进行测试
接着,我们还得确保httpd在某节点启动之前得先启动vip,这可以使用如下命令实现:
crm(live)# configure
crm(live)configure# order httpd-after-vip mandatory: vip httpd
crm(live)configure# verify
crm(live)configure# show
node node1.test.com \
attributes standby="on"
node node2.test.com \
attributes standby="off"
primitive httpd lsb:httpd \
meta target-role="Started"
primitive vip ocf:heartbeat:IPaddr \
params ip="192.168.18.200" nic="eth0" cidr_netmask="24" \
meta target-role="Started"
colocation httpd-with-ip INFUNTY: httpd vip
order httpd-after-vip inf: vip httpd
property $id="cib-bootstrap-options" \
dc-version="1.1.8-7.el6-394e906" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2" \
stonith-enabled="false" \
no-quorum-policy="ignore" \
last-lrm-refresh="1376554276"
rsc_defaults $id="rsc-options" \
resource-stickiness="100"
crm(live)configure# show xml
<rsc_order score="INFINITY" first="vip" then="httpd"/>
crm(live)configure# commit
此外,由于HA集群本身并不强制每个节点的性能相同或相近。所以,某些时候我们可能希望在正常时服务总能在某个性能较强的节点上运行,这可以通过位置约束来实现:
crm(live)configure# location prefer-node1 vip node_pref::200: node1
好了,到这里高可用的Web集群的基本配置全部完成,下面我们来讲一下增加nfs资源。
8.crmsh 配置nfs资源
(1).配置NFS服务器
[root@nfs ~]# mkdir -pv /web
mkdir: 已创建目录 “/web”
[root@nfs ~]# vim /etc/exports
/web/192.168.18.0/24(ro,async)
[root@nfs /]# echo '<h1>Cluster NFS Server</h1>' > /web/index.html
[root@nfs ~]# /etc/init.d/rpcbind start
启动 rpcbind : [确定]
[root@nfs /]# /etc/init.d/nfs start
启动 NFS 服务: [确定]
关掉 NFS 配额: [确定]
启动 NFS 守护进程: [确定]
启动 NFS mountd: [确定]
[root@nfs /]# showmount -e 192.168.18.208
Export list for192.168.18.208:
/web192.168.18.0/24
(2).节点测试挂载
node1:
[root@node1 ~]# mount -t nfs 192.168.18.208:/web /mnt
[root@node1 ~]# cd /mnt/
[root@node1 mnt]# ll
总计 4
-rw-r--r-- 1 root root 28 08-07 17:41 index.html
[root@node1 mnt]# mount
/dev/sda2on / typeext3 (rw)
proc on /proctypeproc (rw)
sysfs on /systypesysfs (rw)
devpts on /dev/ptstypedevpts (rw,gid=5,mode=620)
/dev/sda3on /datatypeext3 (rw)
/dev/sda1on /boottypeext3 (rw)
tmpfs on /dev/shmtypetmpfs (rw)
none on /proc/sys/fs/binfmt_misctypebinfmt_misc (rw)
sunrpc on /var/lib/nfs/rpc_pipefstyperpc_pipefs (rw)
192.168.18.208:/webon /mnttypenfs (rw,addr=192.168.18.208)
[root@node1 ~]# umount /mnt
[root@node1 ~]# mount
/dev/sda2on / typeext3 (rw)
proc on /proctypeproc (rw)
sysfs on /systypesysfs (rw)
devpts on /dev/ptstypedevpts (rw,gid=5,mode=620)
/dev/sda3on /datatypeext3 (rw)
/dev/sda1on /boottypeext3 (rw)
tmpfs on /dev/shmtypetmpfs (rw)
none on /proc/sys/fs/binfmt_misctypebinfmt_misc (rw)
sunrpc on /var/lib/nfs/rpc_pipefstyperpc_pipefs (rw)
node2:
[root@node2 ~]# mount -t nfs 192.168.18.208:/web /mnt
[root@node2 ~]# cd /mnt
[root@node2 mnt]# ll
总计 4
-rw-r--r-- 1 root root 28 08-07 17:41 index.html
[root@node2 mnt]# mount
/dev/sda2on / typeext3 (rw)
proc on /proctypeproc (rw)
sysfs on /systypesysfs (rw)
devpts on /dev/ptstypedevpts (rw,gid=5,mode=620)
/dev/sda3on /datatypeext3 (rw)
/dev/sda1on /boottypeext3 (rw)
tmpfs on /dev/shmtypetmpfs (rw)
none on /proc/sys/fs/binfmt_misctypebinfmt_misc (rw)
sunrpc on /var/lib/nfs/rpc_pipefstyperpc_pipefs (rw)
192.168.18.208:/webon /mnttypenfs (rw,addr=192.168.18.208)
[root@node2 ~]# umount /mnt
[root@node2 ~]# mount
/dev/sda2on / typeext3 (rw)
proc on /proctypeproc (rw)
sysfs on /systypesysfs (rw)
devpts on /dev/ptstypedevpts (rw,gid=5,mode=620)
/dev/sda3on /datatypeext3 (rw)
/dev/sda1on /boottypeext3 (rw)
tmpfs on /dev/shmtypetmpfs (rw)
none on /proc/sys/fs/binfmt_misctypebinfmt_misc (rw)
sunrpc on /var/lib/nfs/rpc_pipefstyperpc_pipefs (rw)
(3).配置资源 vip 、httpd、nfs
crm(live)# configure
crm(live)configure# show
node node1.test.com
node node2.test.com
property $id="cib-bootstrap-options" \
dc-version="1.1.8-7.el6-394e906" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2" \
stonith-enabled="false" \
no-quorum-policy="ignore" \
last-lrm-refresh="1376555949"
rsc_defaults $id="rsc-options" \
resource-stickiness="100"
crm(live)configure# primitive vip ocf:heartbeat:IPaddr params ip=192.168.18.200 nic=eth0 cidr_netmask=24
crm(live)configure# primitive httpd lsb:httpd
crm(live)configure# primitive nfs ocf:heartbeat:Filesystem params device=192.168.18.208:/web directory=/var/www/html fstype=nfs
crm(live)configure# verify
WARNING: nfs: default timeout 20s for start is smaller than the advised 60
WARNING: nfs: default timeout 20s for stop is smaller than the advised 60
crm(live)configure# show
node node1.test.com
node node2.test.com
primitive httpd lsb:httpd
primitive nfs ocf:heartbeat:Filesystem \
params device="192.168.18.208:/web" directory="/var/www/html" fstype="nfs"
primitive vip ocf:heartbeat:IPaddr \
params ip="192.168.18.200" nic="eth0" cidr_netmask="24"
property $id="cib-bootstrap-options" \
dc-version="1.1.8-7.el6-394e906" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2" \
stonith-enabled="false" \
no-quorum-policy="ignore" \
last-lrm-refresh="1376555949"
rsc_defaults $id="rsc-options" \
resource-stickiness="100"
crm(live)configure# commit
WARNING: nfs: default timeout 20s for start is smaller than the advised 60
WARNING: nfs: default timeout 20s for stop is smaller than the advised 60
查看一下定义的三个资源,大家可以看到三个资源不在同一个节点上,下面我们定义一下组资源,来使三个资源在同一节点上。
[root@node2 ~]# crm_mon
Last updated: Thu Aug 15 17:00:17 2013
Last change: Thu Aug 15 16:58:44 2013 via cibadmin on node1.test.com
Stack: classic openais (with plugin)
Current DC: node2.test.com - partition with quorum
Version: 1.1.8-7.el6-394e906
2 Nodes configured, 2 expected votes
3 Resources configured.
Online: [ node1.test.com node2.test.com ]
vip (ocf::heartbeat:IPaddr): Started node1.test.com
httpd (lsb:httpd): Started node2.test.com
nfs (ocf::heartbeat:Filesystem): Started node1.test.com
(4).定义组资源
crm(live)# configure
crm(live)configure# group webservice vip nfs httpd
crm(live)configure# verify
crm(live)configure# show
node node1.test.com
node node2.test.com
primitive httpd lsb:httpd
primitive nfs ocf:heartbeat:Filesystem \
params device="192.168.18.208:/web" directory="/var/www/html" fstype="nfs"
primitive vip ocf:heartbeat:IPaddr \
params ip="192.168.18.200" nic="eth0" cidr_netmask="24"
group webservice vip nfs httpd
property $id="cib-bootstrap-options" \
dc-version="1.1.8-7.el6-394e906" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2" \
stonith-enabled="false" \
no-quorum-policy="ignore" \
last-lrm-refresh="1376555949"
rsc_defaults $id="rsc-options" \
resource-stickiness="100"
crm(live)configure# commit
查看一下资源状态,所有资源全部在node1上,下面我们测试一下
[root@node2 ~]# crm_mon
Last updated: Thu Aug 15 17:03:20 2013
Last change: Thu Aug 15 17:02:44 2013 via cibadmin on node1.test.com
Stack: classic openais (with plugin)
Current DC: node2.test.com - partition with quorum
Version: 1.1.8-7.el6-394e906
2 Nodes configured, 2 expected votes
3 Resources configured.
Online: [ node1.test.com node2.test.com ]
Resource Group: webservice
vip (ocf::heartbeat:IPaddr): Started node1.test.com
nfs (ocf::heartbeat:Filesystem): Started node1.test.com
httpd (lsb:httpd): Started node1.test.com
(5).最后我们模拟一下资源故障
crm(live)# node
crm(live)node# standby
crm(live)node# show
node1.test.com: normal
standby: on
node2.test.com: normal
[root@node2 ~]# crm_mon
Last updated: Thu Aug 15 17:05:52 2013
Last change: Thu Aug 15 17:05:42 2013 via crm_attribute on node1.test.com
Stack: classic openais (with plugin)
Current DC: node2.test.com - partition with quorum
Version: 1.1.8-7.el6-394e906
2 Nodes configured, 2 expected votes
3 Resources configured.
Node node1.test.com: standby
Online: [ node2.test.com ]
Resource Group: webservice
vip (ocf::heartbeat:IPaddr): Started node2.test.com
nfs (ocf::heartbeat:Filesystem): Started node2.test.com
httpd (lsb:httpd): Started node2.test.com
当node1故障时,所有资源全部移动到时node2上,下面我们再来访问一下吧
大家可以看到,照样能访问,好了今天的博客就到这边,在下一篇博客中我们将重点讲解DRDB知识。^_^…… 见