5.使用scp将jdk拷贝到slave1和slave2
12 [root@master ~]# scp -r /usr/local/jdk1.8.0_131/ root@slave1:/usr/local/
[root@master ~]# scp -r /usr/local/jdk1.8.0_131/ root@slave2:/usr/local/
6.配置slave1和slave2上的环境变量(同步骤3),配置完后使用java -version验证一下
四、配置hadoop环境
1.解压hadoop并移动到/usr/local 下
[root@master ~]# cd /home/hadoop/
[root@master hadoop]# ls
hadoop-2.7.3.tar.gz jdk-8u131-linux-x64.tar.gz
[root@master hadoop]# tar -zxf hadoop-2.7.3.tar.gz
[root@master hadoop]# mv hadoop-2.7.3 /usr/local/hadoop
[root@master hadoop]# ls /usr/local/
bin etc games hadoop include jdk1.8.0_131 lib lib64 libexec sbin share src
2.更改hadoop的文件所属用户
[root@master ~]# cd /usr/local
[root@master local]# chown -R hadoop:hadoop /usr/local/hadoop
[root@master local]# ll
drwxr-xr-x 9 hadoop hadoop 149 Aug 17 2016 hadoop
[root@master local]#
3.配置hadoop环境变量
[root@master local]# vim /etc/profile
[root@master local]# tail -4 /etc/profile
#hadoop
export HADOOP_HOME=/usr/local/hadoop #注意路径
export PATH="$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin"
[root@master local]#
[root@master local]# source /etc/profile #使配置生效
4.测试
[root@master local]# hadoop version
Hadoop 2.7.3
Subversion https://git-wip-us.apache.org/repos/asf/hadoop.git -r baa91f7c6bc9cb92be5982de4719c1c8af91ccff
Compiled by root on 2016-08-18T01:41Z
Compiled with protoc 2.5.0
From source with checksum 2e4ce5f957ea4db193bce3734ff29ff4
This command was run using /usr/local/hadoop/share/hadoop/common/hadoop-common-2.7.3.jar
[root@master local]#
5.配置hadoop-env.sh
[root@master local]# cd $HADOOP_HOME/etc/hadoop
[root@master hadoop]# pwd
/usr/local/hadoop/etc/hadoop
[root@master hadoop]#
[root@master hadoop]# vim hadoop-env.sh
[root@master hadoop]# tail -1 hadoop-env.sh
export JAVA_HOME=/usr/local/jdk1.8.0_131 #在末尾添加
[root@master hadoop]#
6.配置core-site.xml
<configuration>
<!-- 指定hdfs的nameService -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://master:9000</value>
</property>
</configuration>
7.配置hdfs-site.xml
<configuration>
<!-- 数据节点数 -->
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<!-- nameNode数据目录 -->
#目录不存在需要手动创建,并把所属改为hadoop
<property>
<name>dfs.namenode.name.dir</name>
<value>/usr/local/hadoop/dfs/name</value>
</property>
<!-- dataNode数据目录 -->
#目录不存在需要手动创建,并把所属改为hadoop
<property>
<name>dfs.datanode.data.dir</name>
<value>/usr/local/hadoop/dfs/data</value>
</property>
</configuration>
8.配置yarn-site.xml
<configuration>
<!-- 指定YARN的ResourceManager的地址 -->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>master</value>
</property>
<!-- reducer取数据的方式 -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
9.配置mapred-site.xml
[root@master hadoop]# cp mapred-site.xml.template mapred-site.xml
[root@master hadoop]# vim mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
10.配置slaves
[root@master hadoop]# vim slaves
[root@master hadoop]# cat slaves
slave1
slave2
[root@master hadoop]#