Hadoop1.0.0,HBase0.92.0三节点安装

版本:Hadoop-1.0.0,hbase-0.92.0 

在195,196,197上安装hadoop集群  
结果:195-》namenode  
      196-》jobtracker,datanode  
      197-> SecondaryNameNode,datanode  
1, 建hadoop用户  
groupadd hadoop  
useradd hadoop -g hadoop  
 
passwd hadoop  
 
mkdir -p /opt/hadoop  
mkdir -p /opt/data/hadoop1  
chown hadoop:hadoop /opt/hadoop  
chown hadoop:hadoop /opt/data/hadoop1  
 
2,修改hosts  
vi /etc/hosts  
10.10.249.195   master  
10.10.249.196   slaver1  
10.10.249.197   slaver2  
 
3,生成密钥对  
su - hadoop  
 
ssh-keygen -t rsa  
 
cd /home/hadoop/.ssh  
 
cat id_rsa.pub > authorized_keys  
cd /home/hadoop/.ssh  
chmod 700 ../.ssh  
chmod 600 *  
chmod 644 authorized_keys  
 
4,配置无密码 ssh登录  
master:  
cd /home/hadoop/.ssh  
scp -r id_rsa.pub hadoop@10.10.249.196:~/.ssh/pubkey_master  
scp -r id_rsa.pub hadoop@10.10.249.197:~/.ssh/pubkey_master  
slaver1:  
cd /home/hadoop/.ssh  
cat pubkey_master  >> authorized_keys   
 
scp -r id_rsa.pub hadoop@master:~/.ssh/pubkey_slaver1  
scp -r id_rsa.pub hadoop@slaver2:~/.ssh/pubkey_slaver1  
slaver2:  
cd /home/hadoop/.ssh  
cat pubkey_master  >> authorized_keys   
cat pubkey_slaver1  >> authorized_keys   
scp -r id_rsa.pub hadoop@master:~/.ssh/pubkey_slaver2  
 
master:  
cat pubkey_slaver1 >> authorized_keys   
cat pubkey_slaver2 >> authorized_keys   
 
# su root 修改 /etc/ssh/sshd_config  PasswordAuthentication no  
 
5:hadoop安装  
 
conf/core-site.xml  
<?xml-stylesheet type="text/xsl" href="https://www.linuxidc.com/configuration.xsl"?>  
<configuration>  
<property>  
  <name>fs.default.name</name>  
  <value>hdfs://master:54310</value>  
</property>  
<property>  
  <name>hadoop.tmp.dir</name>  
  <value>/home/hadoop/hadoop_home/var</value>  
</property>  
</configuration>  
 
conf/mapred-site.xml  
<?xml version="1.0"?>  
<?xml-stylesheet type="text/xsl" href="https://www.linuxidc.com/configuration.xsl"?>  
<configuration>  
<property>  
  <name>mapred.job.tracker</name>  
  <value>slaver1:54311</value>  
</property>  
<property>  
  <name>mapred.local.dir</name>  
  <value>/home/hadoop/hadoop_home/var</value>  
</property>  
</configuration>  
 
conf/hdfs-site.xml  
<?xml version="1.0"?>  
<?xml-stylesheet type="text/xsl" href="https://www.linuxidc.com/configuration.xsl"?>  
<configuration>  
<property>  
  <name>dfs.replication</name>  
  <value>3</value>  
</property>  
</configuration>  
 
 
echo slaver2 > masters  
echo slaver1 > slaves  
echo slaver2 >> slaves  
 
196,197同此配置  
 
master:  
 
cd /opt/hadoop/hadoop-1.0.0/bin  
./hadoop namenode -format  
 
start-all.sh  
 
./bin/hadoop dfs -copyFromLocal conf/ /user/hadoop/bookdata  
./bin/hadoop jar hadoop-examples-1.0.0.jar wordcount /user/hadoop/bookdata /user/hadoop/book_data_out  
 
job.tracker 50030 
task.tracker 50060 
dfs.http 50070 
datanode.http 50075 
secondary.http 50090 
 
hadoop fs -mkdir /user/test  
hadoop fs -chown test:test /user/test  
hadoop dfsadmin -setSpaceQuota 1t /user/test  
 
 
hbase安装步骤  
 
master:195 
RegionServer:196,197 
 
 
1,修改三台hadoop的配置hdfs-site.xml,   
   
<property>  
    <name>dfs.support.append</name>  
    <value>true</value>  
</property>  
<property>  
        <name>dfs.datanode.max.xcievers</name>  
        <value>4096</value>  
</property>  
 
restart hdfs  
 
#替换 hbase lib下的hadoop.jar为hdfs集群用的hadoop.jar  
 
 
2,修改hbase-site.xml  
 
<configuration>   
  <property>  
    <name>hbase.rootdir</name>  
    <value>hdfs://master:54310/hbase</value>  
    <description>The directory shared by RegionServers.  
    </description>  
  </property>  
  <property>  
    <name>hbase.cluster.distributed</name>  
    <value>true</value>  
    <description>The mode the cluster will be in. Possible values are  
      false: standalone and pseudo-distributed setups with managed Zookeeper  
      true: fully-distributed with unmanaged Zookeeper Quorum (see hbase-env.sh)  
    </description>  
  </property>  
  <property>  
    <name>dfs.support.append</name>  
    <value>true</value>  
</property>  
   
</configuration>  
 
3,修改regionservers  
echo master > regionservers  
echo slaver1 >> regionservers  
echo slaver2 >> regionservers  
 
4,修改hbase-env.sh :  
 export HBASE_MANAGES_ZK=false #使用了单独安装的zookeeper,   
   
 export JAVA_HOME=/export/servers/jdk1.6.0_25  
5,将zookeeper集群的zoo.cfg 复制到 hbase的conf目录下。  
 
 
6,复制配置好hbase到slaver1,slaver2 /opt/hadoop目录下  
 
7,增加可以打开的文件句柄数,三台机器都执行。  
su root  
echo -ne "  
* soft nofile 65536 
* hard nofile 65536 
" >>/etc/security/limits.conf  
 
 
安装完成,启动  
cd  /opt/hadoop/hbase-0.92.0/bin  
 
./start-hbase.sh    
 
master的web 端口,默认为60010,RegionServer web 端口 60030 

内容版权声明:除非注明,否则皆为本站原创文章。

转载注明出处:http://www.heiqu.com/8418127b44d54fc436d80b2c0bc8be66.html