十:corosync+pacemaker的安装和配置
node1: [root@node1 ~]# yum localinstall *.rpm -y --nogpgcheck
node2: [root@node2~]# yum localinstall *.rpm -y --nogpgcheck
十一:对各个节点进行相应的配置
node1:
1:切换到主配置文件的目录
[root@node1 ~]# cd /etc/corosync/                      
root@node1 corosync]# cp corosync.conf.example corosync.conf
[root@node1 corosync]# vim corosync.conf 
compatibility: whitetank
totem {   //这是用来传递心跳时的相关协议的信息 
        version: 2 
        secauth: off 
        threads: 0 
        interface { 
                ringnumber: 0 
                bindnetaddr: 192.168.2.0 //我们只改动这里就行啦 
                mcastaddr: 226.94.1.1 
                mcastport: 5405 
        } 
}
logging { 
        fileline: off 
        to_stderr: no  //是否发送标准出错 
        to_logfile: yes  //日志 
        to_syslog: yes   //系统日志 (建议关掉一个),会降低性能 
        logfile: /var/log/cluster/corosync.log  //需要手动创建目录cluster 
        debug: off // 排除时可以起来 
        timestamp: on //日志中是否记录时间
//******以下是openais的东西,可以不用代开*****// 
        logger_subsys { 
                subsys: AMF 
                debug: off 
        }   
}
amf { 
        mode: disabled 
} 
//*********补充一些东西,前面只是底层的东西,因为要用pacemaker ******//
service { 
        ver: 0 
        name: pacemaker
use_mgmtd: yes 
} 
//******虽然用不到openais ,但是会用到一些子选项 ********//
aisexec { 
        user: root 
        group: root 
}
2:创建cluster目录
[root@node1 corosync]# mkdir /var/log/cluster
3:为了便面其他主机加入该集群,需要认证,生成一authkey
[root@node1 corosync]# corosync-keygen
[root@node1 corosync]# ll 
-rw-r--r-- 1 root root 5384 Jul 28  2010 amf.conf.example 
-r-------- 1 root root  128 May  8 14:09 authkey 
-rw-r--r-- 1 root root  538 May  8 14:08 corosync.conf 
-rw-r--r-- 1 root root  436 Jul 28  2010 corosync.conf.example 
drwxr-xr-x 2 root root 4096 Jul 28  2010 service.d 
drwxr-xr-x 2 root root 4096 Jul 28  2010 uidgid.d
4:将node1节点上的文件拷贝到节点node2上面(记住要带-p)
[root@node1 corosync]# scp -p authkey  corosync.conf node2:/etc/corosync/ 
authkey                                          100%  128     0.1KB/s   00:00    
corosync.conf                                    100%  513     0.5KB/s   00:00
[root@node1 corosync]# ssh node2 'mkdir /var/log/cluster'
5:在node1和node2节点上面启动 corosync 的服务
[root@node1 corosync]# service corosync start
6:验证corosync引擎是否正常启动了
[root@node1 corosync]# grep -i  -e "corosync cluster engine" -e "configuration file" /var/log/messages  
ct 18 23:24:02 node1 smartd[2832]: Opened configuration file /etc/smartd.conf 
Oct 18 23:24:02 node1 smartd[2832]: Configuration file /etc/smartd.conf was parsed, found DEVICESCAN, scanning devices 
May  7 14:00:29 node1 smartd[2787]: Opened configuration file /etc/smartd.conf 
May  7 14:00:29 node1 smartd[2787]: Configuration file /etc/smartd.conf was parsed, found DEVICESCAN, scanning devices 
May  7 16:24:36 node1 corosync[686]:   [MAIN  ] Corosync Cluster Engine ('1.2.7'): started and ready to provide service. 
May  7 16:24:36 node1 corosync[686]:   [MAIN  ] Successfully read main configuration file '/etc/corosync/corosync.conf'.
7:查看初始化成员节点通知是否发出
[root@node1 corosync]# grep -i totem /var/log/messages 
May  7 16:24:36 node1 corosync[686]:   [TOTEM ] Initializing transport (UDP/IP). 
May  7 16:24:36 node1 corosync[686]:   [TOTEM ] Initializing transmit/receive security: libtomcrypt SOBER128/SHA1HMAC (mode 0). 
May  7 16:24:36 node1 corosync[686]:   [TOTEM ] The network interface is down. 
May  7 16:24:37 node1 corosync[686]:   [TOTEM ] A processor joined or left the membership and a new membership was formed. 
May  7 16:38:30 node1 corosync[754]:   [TOTEM ] Initializing transport (UDP/IP). 
May  7 16:38:30 node1 corosync[754]:   [TOTEM ] Initializing transmit/receive security: libtomcrypt SOBER128/SHA1HMAC (mode 0). 
May  7 16:38:30 node1 corosync[754]:   [TOTEM ] The network interface [192.168.2.10] is now up. 
May  7 16:38:31 node1 corosync[754]:   [TOTEM ] Process pause detected for 603 ms, flushing membership messages. 
May  7 16:38:31 node1 corosync[754]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
8:检查过程中是否有错误产生
[root@node1 corosync]#grep -i error: /var/log/messages |grep -v unpack_resources (避免stonith的错误)
9:检查pacemaker时候已经启动了
[root@node1 corosync]# grep -i pcmk_startup /var/log/messages 
May  7 16:24:36 node1 corosync[686]:   [pcmk  ] info: pcmk_startup: CRM: Initialized 
May  7 16:24:36 node1 corosync[686]:   [pcmk  ] Logging: Initialized pcmk_startup 
May  7 16:24:36 node1 corosync[686]:   [pcmk  ] info: pcmk_startup: Maximum core file size is: 4294967295 
May  7 16:24:36 node1 corosync[686]:   [pcmk  ] info: pcmk_startup: Service: 9 
May  7 16:24:36 node1 corosync[686]:   [pcmk  ] info: pcmk_startup: Local hostname: node1.a.com 
May  7 16:38:31 node1 corosync[754]:   [pcmk  ] info: pcmk_startup: CRM: Initialized 
May  7 16:38:31 node1 corosync[754]:   [pcmk  ] Logging: Initialized pcmk_startup
May  7 16:38:31 node1 corosync[754]:   [pcmk  ] info: pcmk_startup: Maximum core file size is: 4294967295 
May  7 16:38:31 node1 corosync[754]:   [pcmk  ] info: pcmk_startup: Service: 9 
May  7 16:38:31 node1 corosync[754]:   [pcmk  ] info: pcmk_startup: Local hostname: node1.a.com
node2:重复上面的5--9步骤
10:在node1上查看群集的状态
[root@node1 ~]# crm status
Last updated: Wed May 9 18:28:57 2012
Stack: openais
Current DC: node1.linuxidc.com - partition with quorum
Version: 1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f
2 Nodes configured, 2 expected votes
0 Resources configured.
============
Online: [ node1.linuxidc.com node2.linuxidc.com ]
