zookeeper-kafka-hdfs-hbase-storm

—————————————————————————————————————————————————————————————————————————

存储库:
Sqlserver
Oracle
Redis

集群:
jdk --1.8
Kafka

Zookeeper https://archive.apache.org/dist/zookeeper/stable/ --3.4.12
Hdfs --2.9.0 source
Hbase --1.2.6.1
JStorm

——————————————————————————————— 安装: ——————————————————————————————— 

********************* 1.免密登录 *********************
ssh hostname

********************* 2.时间同步 *********************
2.1 ntp
2.2 date -s

********************* 3.Zookeeper *********************
tar -vxf zookeeper-3.4.12.tar.gz
mv zookeeper-3.4.12 /data/install/zookeeper3.4.12
cd /data/install/zookeeper3.4.12
mkdir zkData
[root@Node7 zookeeper3.4.12]# cd conf/
[root@Node7 conf]# mv zoo_sample.cfg zoo.cfg
[root@Node7 conf]# vim zoo.cfg
dataDir= /data/install/zookeeper3.4.12/zkData

server.1=Node7:2888:3888
server.2=Node8:2888:3888
server.3=Node9:2888:3888

[root@Node7 conf]# cd /data/install/zookeeper3.4.12/zkData
[root@Node7 conf]# vim myid
1

[root@Node7 data]# scp -r install root@Node8:/data/.
[root@Node7 data]# scp -r install root@Node9:/data/.

[root@Node8 conf]# cd /data/install/zookeeper3.4.12/zkData
[root@Node8 conf]# vim myid
2

[root@Node9 conf]# cd /data/install/zookeeper3.4.12/zkData
[root@Node9 conf]# vim myid
3

[root@Node7 bin]# ./zkServer.sh start
[root@Node8 bin]# ./zkServer.sh start
[root@Node9 bin]# ./zkServer.sh start

[root@Node7 bin]# ./zkServer.sh status
jps


********************* 4.Hadoop-Hdfs *********************

[root@Node7 softwares]# tar -vxf hadoop-2.9.0.tar.gz
[root@Node7 hadoop]# vim hadoop-env.sh
export JAVA_HOME=http://www.likecs.com/opt/jdk1.8.0_171

[root@Node7 hadoop]# vim core-site.xml

<property>
<name>hadoop.tmp.dir</name>
<value>/data/install/hadoop-2.9.0/tmp</value>
</property>
<property>
<name>fs.default.name</name>
<value>hdfs://Node7:9000</value>
</property>


[root@Node7 hadoop]# vim hdfs-site.xml

<property>
<name>dfs.name.dir</name>
<value>/data/install/hadoop-2.9.0/namenodedata</value>
</property>
<property>
<name>dfs.data.dir</name>
<value>/data/install/hadoop-2.9.0/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>

[root@Node7 hadoop]# vim slaves
Node7
Node8
Node9

[root@Node7 install]# scp -r hadoop-2.9.0 root@Node8:/data/install/.
[root@Node7 install]# scp -r hadoop-2.9.0 root@Node9:/data/install/.

[root@Node7 bin]# ./hdfs namenode -format

[root@Node7 sbin]# ./start-dfs.sh

jps
NameNode
SecondaryNameNode
DataNode


********************* 5.Hbase *********************

[root@Node7 conf]# vim hbase-env.sh

export JAVA_HOME=http://www.likecs.com/opt/jdk1.8.0_171/
export HBASE_MANAGES_ZK=false

[root@Node7 conf]# vim hbase-site.xml

<property>
<name>hbase.rootdir</name>
<value>hdfs://Node7:9000/hbase</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>Node7,Node8,Node9</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/data/install/zookeeper3.4.12/zkData</value>
</property>
<property>
<name>hbase.master.info.port</name>
<value>60010</value> --web ui port
</property>


[root@Node7 conf]# vim regionservers
Node7
Node8
Node9

[root@Node7 conf]# cat /etc/profile
export PATH=$PATH:/data/install/hbase-1.2.6.1/bin

jps
HMaster
HRegionServer
--HQuorumPeer 使用自带zk


********************* HBase

=======>>> 建表预分区

create 't1', 'f1', SPLITS => ['10', '20', '30', '40']
-- 指定文件
create 'split_table_test', 'cf', {SPLITS_FILE => 'region_split_info.txt'}
-- 对表SNAPPY压缩
create 'split_table_test',{NAME =>'cf', COMPRESSION => 'SNAPPY'}, {SPLITS_FILE => '/data/hbase-1.2.6.1/bin/region_split_info.txt'}

create 'GpsData',{NAME =>'cf', COMPRESSION => 'SNAPPY'}, {SPLITS_FILE => '/data/install/hbase-1.2.6.1/conf/VeArea_region_split_infoJS.txt'}

拷贝文件夹 Linux-amd64-64 否则不能压缩。
export HBASE_LIBRARY_PATH=http://www.likecs.com/data/install/hbase-1.2.6.1/lib/Linux-amd64-64


********************* 6.Jstorm *********************
6.1 配置 storm.yaml
6.2启动进程
bin/jstorm nimbus &
bin/jstorm supervisor &
bin/jstorm supervisor &

--pom.xml -- provided

../bin/jstorm jar storm_kafka_hdfs-4.0-GpsRealtime.jar StormHbase.StormHbaseTopo gpstopo --exclude-jars slf4j-log4j

../bin/jstorm kill gpstopo

内容版权声明:除非注明,否则皆为本站原创文章。

转载注明出处:https://www.heiqu.com/zzdgsw.html