<name>yarn.resourcemanager.store.class</name> <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value> </property> <property> <name>yarn.resourcemanager.zk-address</name> <value>dnzk1:2181,dnzk2:2181,dnzk3:2181</value> </property> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property>
root@nn1:/opt/hadoop# vim /opt/hadoop/etc/hadoop/slaves
添加:
dnzk1
dnzk2
dnzk3
安装spark
root@nn1:/opt/spark/conf# vim spark-env.sh
添加:
export SPARK_MASTER_IP=nn1
export SPARK_WORKER_MEMORY=256m
export JAVA_HOME=/usr/lib/jvm/java-8-sun
export SCALA_HOME=/opt/scala
export SPARK_HOME=/opt/spark
export HADOOP_CONF_DIR=/opt/hadoop/etc/hadoop
export SPARK_LIBRARY_PATH=$$SPARK_HOME/lib
export SCALA_LIBRARY_PATH=$SPARK_LIBRARY_PATH
export SPARK_WORKER_CORES=1
export SPARK_WORKER_INSTANCES=1
export SPARK_MASTER_PORT=7077
root@nn1:/opt/spark/conf# vim slaves
添加:
安装zookeeper
创建文件夹 /opt/zookeeper/tmp
创建文件 /opt/zookeeper/tmp/myid
echo 1 > /opt/zookeeper/tmp/myid
root@nn1:/opt/zookeeper/conf# vim zoo.cfg
修改
dataDir=/opt/zookeeper/tmp
server.1=dnzk1:2888:3888
server.2=dnzk2:2888:3888
server.3=dnzk3:2888:3888
生成密钥
ssh-keygen -t dsa
追加id_dsa.pub到宿主机的/home/docker/config/authorized_keys文件
root@nn1:/opt/hadoop# cat ~/.ssh/id_dsa.pub
执行
sudo docker commit -m "namenode1" installspark ubuntu:ns1
修改本地宿主机/home/docker/config/hosts文件
添加
172.17.0.11 nn1
172.17.0.12 nn2
172.17.0.13 rm1
172.17.0.14 rm2
172.17.0.15 dnzk1
172.17.0.16 dnzk2
172.17.0.17 dnzk3
启动docker
sudo docker run --name dnzk1 -h dnzk1 --net=none -p 2185:2181 -p 50075:50070 -p 9005:9000 -p 8485:8485 -p 7075:7077 -p 2885:2888 -v /home/docker/config/:/config -it spark1_7-hadoop2_7_1-scala1_1:basic
sudo docker run --name dnzk2 -h dnzk2 --net=none -p 2186:2181 -p 50076:50070 -p 9006:9000 -p 8486:8485 -p 7076:7077 -p 2886:2888 -v /home/docker/config/:/config -it spark1_7-hadoop2_7_1-scala1_1:basic
sudo docker run --name dnzk3 -h dnzk3 --net=none -p 2186:2181 -p 50076:50070 -p 9006:9000 -p 8486:8485 -p 7076:7077 -p 2887:2888 -v /home/docker/config/:/config -it spark1_7-hadoop2_7_1-scala1_1:basic
sudo docker run --name nn1 -h nn1 --net=none -p 2181:2181 -p 50071:50070 -p 9001:9000 -p 8481:8485 -p 7071:7077 -p 2881:2888 -v /home/docker/config/:/config -it spark1_7-hadoop2_7_1-scala1_1:basic
sudo docker run --name nn2 -h nn2 --net=none -p 2182:2181 -p 50072:50070 -p 9002:9000 -p 8482:8485 -p 7072:7077 -p 2882:2888 -v /home/docker/config/:/config -it spark1_7-hadoop2_7_1-scala1_1:basic
sudo docker run --name rm1 -h rm1 --net=none -p 2183:2181 -p 50073:50070 -p 9003:9000 -p 8483:8485 -p 7073:7077 -p 2883:2888 -v /home/docker/config/:/config -it spark1_7-hadoop2_7_1-scala1_1:basic
sudo docker run --name rm2 -h rm2 --net=none -p 2184:2181 -p 50074:50070 -p 9004:9000 -p 8484:8485 -p 7074:7077 -p 2884:2888 -v /home/docker/config/:/config -it spark1_7-hadoop2_7_1-scala1_1:basic
dnzk2(执行echo 2 > /opt/zookeeper/tmp/myid),dnzk2(执行echo 3 > /opt/zookeeper/tmp/myid)
配置网络
sudo pipework docker0 -i eth0 nn1 172.17.0.11/16
sudo pipework docker0 -i eth0 nn2 172.17.0.12/16
sudo pipework docker0 -i eth0 rm1 172.17.0.13/16
sudo pipework docker0 -i eth0 rm2 172.17.0.14/16
sudo pipework docker0 -i eth0 dnzk1 172.17.0.15/16
sudo pipework docker0 -i eth0 dnzk2 172.17.0.16/16
sudo pipework docker0 -i eth0 dnzk3 172.17.0.17/16
启动hadoop集群
在dnzk1/dnzk2/dnzk3上启动zookeeper和 hadoop journal
/opt/zookeeper/bin/zkServer.sh start
/opt/hadoop/sbin/hadoop-daemon.sh start journalnode
在nn1上格式化zookeeper启动和format hadoop
/opt/hadoop/bin/hdfs namenode -format
/opt/hadoop/bin/hdfs namenode -format
scp -r /opt/hadoop/namenode/ nn2:/opt/hadoop/
或
/opt/hadoop/bin/hdfs namenode -bootstrapStandby
/opt/hadoop/bin/hdfs zkfc -formatZK
/opt/hadoop/sbin/start-dfs.sh
在rm1上启动yarn
/opt/hadoop/sbin/start-yarn.sh
在rm2上启动
/opt/hadoop/sbin/yarn-daemon.sh start resourcemanager
启动spark
/opt/spark/sbin/start-all.sh
查看:
:50070 (active)
:50070(standby)
启动后集群服务情况
nn1 172.17.0.11 jdk、hadoop NameNode、DFSZKFailoverController(zkfc)
nn2 172.17.0.12 jdk、hadoop NameNode、DFSZKFailoverController(zkfc)
rm1 172.17.0.13 jdk、hadoop ResourceManager
rm2 172.17.0.14 jdk、hadoop ResourceManager
dnzk1 172.17.0.15 jdk、hadoop、zookeeper DataNode、NodeManager、JournalNode、QuorumPeerMain
dnzk2 172.17.0.16 jdk、hadoop、zookeeper DataNode、NodeManager、JournalNode、QuorumPeerMain
dnzk3 172.17.0.17 jdk、hadoop、zookeeper DataNode、NodeManager、JournalNode、QuorumPeerMain
下面关于Hadoop的文章您也可能喜欢,不妨看看: