天天看點

實戰hadoop2.6.3+zookeeper3.4.6+hbase1.0.2高可用叢集方案

實戰hadoop2.6.3+zookeeper3.4.6+hbase1.0.2高可用叢集方案

一、安裝前準備

1.環境5台

<a href="http://s4.51cto.com/wyfs02/M00/85/8A/wKioL1eoFfKBytUEAABpzvGSCME119.png-wh_500x0-wm_3-wmp_4-s_970805712.png" target="_blank"></a>

2、修改hosts檔案

[root@hadoop01 ~]# cat /etc/hosts

192.168.10.201hadoop01

192.168.10.202hadoop02

192.168.10.203hadoop03

192.168.10.204hadoop04

192.168.10.205hadoop05

3、ssh 免密碼登入

在每台操作

[root@hadoop01 ~]# mkidr ~/.ssh

[root@hadoop01 ~]# chmod 700 ~/.ssh

[root@hadoop01 ~]#cd ~/.ssh/

[root@hadoop01 .ssh ]ssh-keygen -t rsa

五台操作完成 後做成公鑰檔案

[root@hadoop01 .ssh ] ssh hadoop02 cat /root/.ssh/id_rsa.pub &gt;&gt; authorized_keys

[root@hadoop01 .ssh ] ssh hadoop03 cat /root/.ssh/id_rsa.pub &gt;&gt; authorized_keys

[root@hadoop01 .ssh ] ssh hadoop04 cat /root/.ssh/id_rsa.pub &gt;&gt; authorized_keys

[root@hadoop01 .ssh ] ssh hadoop05 cat /root/.ssh/id_rsa.pub &gt;&gt; authorized_keys

[root@hadoop01 .ssh ] ssh hadoop01 cat /root/.ssh/id_rsa.pub &gt;&gt; authorized_keys

[root@hadoop01 .ssh]# chmod 600 authorized_keys

[root@hadoop01 .ssh]# scp authorized_keys hadoop02:/root/.ssh/

[root@hadoop01 .ssh]# scp authorized_keys hadoop03:/root/.ssh/

[root@hadoop01 .ssh]# scp authorized_keys hadoop04:/root/.ssh/

[root@hadoop01 .ssh]# scp authorized_keys hadoop05:/root/.ssh/

測試ssh信任

[root@hadoop01 .ssh]# ssh hadoop02 date

Mon Aug  8 11:07:23 CST 2016

[root@hadoop01 .ssh]# ssh hadoop03 date

Mon Aug  8 11:07:26 CST 2016

[root@hadoop01 .ssh]# ssh hadoop04 date

Mon Aug  8 11:07:29 CST 2016

[root@hadoop01 .ssh]# ssh hadoop05 date

5.服務時間同步(五台操作)

yum -y install ntpdate

[root@hadoop01 .ssh]# crontab -l

0 * * * * /usr/sbin/ntpdate 0.rhel.pool.ntp.org &amp;&amp; /sbin/clock -w

可以采用别的方案同步時間

6.修改檔案打開數(五台操作)

[root@hadoop01 ~]# vi /etc/security/limits.conf

root soft nofile 65535

root hard nofile 65535

root soft nproc 32000

root hard nproc 32000

[root@hadoop01 ~]# vi /etc/pam.d/login

session  required        pam_limits.so

修改完後重新開機系統

二、安裝hadoop+zookeeper HA

1.安裝jdk  (五台操作)

解壓jdk

[root@hadoop01 ~] cd /opt

[root@hadoop01 opt]# tar zxvf jdk-7u21-linux-x64.tar.gz

[root@hadoop01 opt]# mv jdk1.7.0_21 jdk

配置到環境變量/etc/profile

[root@hadoop01 opt]# vi /etc/profile

#java

JAVA_HOME=/opt/jdk

PATH=$JAVA_HOME/bin:$PATH

CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

export JAVA_HOME

export PATH

export CLASSPATH

配置檔案生效

[root@hadoop01 opt]# source /etc/profile 

[root@hadoop01 opt]# java -version

java version "1.7.0_21"

Java(TM) SE Runtime Environment (build 1.7.0_21-b11)

Java HotSpot(TM) 64-Bit Server VM (build 23.21-b01, mixed mode)

以上說明生效

2.解壓hadoop并修改環境變量

[root@hadoop01 ~]# tar zxvf hadoop-2.6.3.tar.gz

[root@hadoop01 ~]#mkdir /data

[root@hadoop01 ~]# mv hadoop-2.6.3 /data/hadoop

[root@hadoop01 data]# vi /etc/profile

##hadoop

export HADOOP_HOME=/data/hadoop

export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/bin

[root@hadoop01 data]# source /etc/profile

3.修改hadoop配置檔案

[root@hadoop01 data]# cd /data/hadoop/etc/hadoop/

[root@hadoop01 hadoop]# vi slaves

hadoop01

hadoop02

hadoop03

hadoop04

hadoop05

以上利用hadoop01,hadoop02兩台磁盤空間,也增加進去了,不介意增加。

[root@hadoop01 hadoop]# vi hadoop-env.sh

<a href="http://s2.51cto.com/wyfs02/M01/85/8A/wKiom1eoFqOBSHdOAAA2BFMOv5w357.png-wh_500x0-wm_3-wmp_4-s_4223428174.png" target="_blank"></a>

[root@hadoop01 hadoop]# vi yarn-env.sh

修改core-site.xml檔案

[root@hadoop01 hadoop]# vi core-site.xml

&lt;?xml version="1.0" encoding="UTF-8"?&gt;

&lt;?xml-stylesheet type="text/xsl" href="configuration.xsl"?&gt;

&lt;!--

  Licensed under the Apache License, Version 2.0 (the "License");

  you may not use this file except in compliance with the License.

  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software

  distributed under the License is distributed on an "AS IS" BASIS,

  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

  See the License for the specific language governing permissions and

  limitations under the License. See accompanying LICENSE file.

--&gt;

&lt;!-- Put site-specific property overrides in this file. --&gt;

&lt;configuration&gt;

&lt;property&gt;

&lt;name&gt;fs.defaultFS&lt;/name&gt;

&lt;value&gt;hdfs://cluster&lt;/value&gt;

&lt;description&gt;The name of the default file system.&lt;/description&gt;

&lt;final&gt;true&lt;/final&gt;

&lt;/property&gt;

&lt;name&gt;hadoop.tmp.dir&lt;/name&gt;

&lt;value&gt;/data/hadoop/tmp&lt;/value&gt;

&lt;name&gt;ha.zookeeper.quorum&lt;/name&gt;

&lt;value&gt;hadoop01:2190,hadoop02:2190,hadoop03:2190,hadoop04:2190,hadoop05:2190&lt;/value&gt;

&lt;name&gt;io.file.buffer.size&lt;/name&gt;

&lt;value&gt;2048&lt;/value&gt;

        &lt;property&gt;

                &lt;name&gt;dfs.ha.fencing.methods&lt;/name&gt;

                &lt;value&gt;sshfence&lt;/value&gt;

        &lt;/property&gt;

        &lt;property&gt;

                &lt;name&gt;dfs.ha.fencing.ssh.private-key-files&lt;/name&gt;

                &lt;value&gt;/root/.ssh/id_dsa&lt;/value&gt;

&lt;/configuration&gt;

修改hdfs-site.xml檔案

[root@hadoop01 hadoop]# vi hdfs-site.xml

                &lt;name&gt;dfs.nameservices&lt;/name&gt;

                &lt;value&gt;cluster&lt;/value&gt;

                &lt;name&gt;dfs.ha.namenodes.cluster&lt;/name&gt;

                &lt;value&gt;nn1,nn2&lt;/value&gt;

                &lt;name&gt;dfs.namenode.rpc-address.cluster.nn1&lt;/name&gt;

                &lt;value&gt;hadoop01:8020&lt;/value&gt;

                &lt;name&gt;dfs.namenode.rpc-address.cluster.nn2&lt;/name&gt;

                &lt;value&gt;hadoop02:8020&lt;/value&gt;

                &lt;name&gt;dfs.namenode.http-address.cluster.nn1&lt;/name&gt;

                &lt;value&gt;hadoop01:50070&lt;/value&gt;

                &lt;name&gt;dfs.namenode.http-address.cluster.nn2&lt;/name&gt;

                &lt;value&gt;hadoop02:50070&lt;/value&gt;

                 &lt;name&gt;dfs.namenode.servicerpc-address.cluster.nn1&lt;/name&gt;

                 &lt;value&gt;hadoop01:53333&lt;/value&gt;

         &lt;/property&gt;

         &lt;property&gt;

                 &lt;name&gt;dfs.namenode.servicerpc-address.cluster.nn2&lt;/name&gt;

                 &lt;value&gt;hadoop02:53333&lt;/value&gt;

                &lt;name&gt;dfs.namenode.shared.edits.dir&lt;/name&gt;

                &lt;value&gt;qjournal://hadoop01:8485;hadoop02:8485;hadoop03:8485;hadoop04:8485;hadoop05:8485/cluster&lt;/value&gt;

                &lt;name&gt;dfs.client.failover.proxy.provider.cluster&lt;/name&gt;

                &lt;value&gt;org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider&lt;/value&gt;

                &lt;name&gt;dfs.journalnode.edits.dir&lt;/name&gt;

                &lt;value&gt;/data/hadoop/mydata/journal&lt;/value&gt;

&lt;name&gt;dfs.replication&lt;/name&gt;

&lt;value&gt;3&lt;/value&gt;

&lt;property&gt;  

       &lt;name&gt;dfs.namenode.name.dir&lt;/name&gt;  

       &lt;value&gt;file:/data/hadoop/mydata/name&lt;/value&gt;  

&lt;/property&gt;  

   &lt;name&gt;dfs.datanode.data.dir&lt;/name&gt;  

    &lt;value&gt;file:/data/hadoop/mydata/data&lt;/value&gt;  

       &lt;name&gt;dfs.ha.automatic-failover.enabled&lt;/name&gt;

&lt;value&gt;true&lt;/value&gt;

&lt;name&gt;dfs.webhdfs.enabled&lt;/name&gt;  

&lt;name&gt;dfs.journalnode.http-address&lt;/name&gt;  

&lt;value&gt;0.0.0.0:8480&lt;/value&gt;  

&lt;name&gt;dfs.journalnode.rpc-address&lt;/name&gt;  

&lt;value&gt;0.0.0.0:8485&lt;/value&gt;  

&lt;property&gt;    

&lt;name&gt;dfs.permissions&lt;/name&gt;    

&lt;value&gt;false&lt;/value&gt;    

修改mapred-site.xml

[root@hadoop01 hadoop]# vi mapred-site.xml

&lt;?xml version="1.0"?&gt;

&lt;name&gt;mapreduce.framework.name&lt;/name&gt;

&lt;value&gt;yarn&lt;/value&gt;

&lt;name&gt;mapreduce.cluster.temp.dir&lt;/name&gt;

&lt;value&gt;/data/hadoop/mydata/mr_temp&lt;/value&gt;

&lt;name&gt;mareduce.jobhistory.address&lt;/name&gt;

&lt;value&gt;hadoop01:10020&lt;/value&gt;

&lt;name&gt;mapreduce.jobhistory.webapp.address&lt;/name&gt;

&lt;value&gt;hadoop01:19888&lt;/value&gt;

修改yarn-site.xml檔案

[root@hadoop01 hadoop]# vi yarn-site.xml

&lt;!-- Site specific YARN configuration properties --&gt;

&lt;name&gt;yarn.resourcemanager.connect.retry-interval.ms&lt;/name&gt;

&lt;value&gt;60000&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.ha.enabled&lt;/name&gt;

&lt;name&gt;yarn.resourcemanager.cluster-id&lt;/name&gt;

&lt;value&gt;rm-cluster&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.ha.rm-ids&lt;/name&gt;

&lt;value&gt;rm1,rm2&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.ha.id&lt;/name&gt;

&lt;value&gt;rm1&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.hostname.rm1&lt;/name&gt;

&lt;value&gt;hadoop01&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.hostname.rm2&lt;/name&gt;

&lt;value&gt;hadoop02&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.recovery.enabled&lt;/name&gt;

&lt;name&gt;yarn.resourcemanager.store.class&lt;/name&gt;

&lt;value&gt;org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.zk-address&lt;/name&gt;

&lt;value&gt;hadoop01:2190,hadoop02:2190,hadoop03:2190,hadoop04:2190,hadoop05:2190,&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.address.rm1&lt;/name&gt;

&lt;value&gt;${yarn.resourcemanager.hostname.rm1}:23140&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.scheduler.address.rm1&lt;/name&gt;

&lt;value&gt;${yarn.resourcemanager.hostname.rm1}:23130&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.webapp.https.address.rm1&lt;/name&gt;

&lt;value&gt;${yarn.resourcemanager.hostname.rm1}:23189&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.webapp.address.rm1&lt;/name&gt;

&lt;value&gt;${yarn.resourcemanager.hostname.rm1}:23188&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.resource-tracker.address.rm1&lt;/name&gt;

&lt;value&gt;${yarn.resourcemanager.hostname.rm1}:23125&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.admin.address.rm1&lt;/name&gt;

&lt;value&gt;${yarn.resourcemanager.hostname.rm1}:23141&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.address.rm2&lt;/name&gt;

&lt;value&gt;${yarn.resourcemanager.hostname.rm2}:23140&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.scheduler.address.rm2&lt;/name&gt;

&lt;value&gt;${yarn.resourcemanager.hostname.rm2}:23130&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.webapp.https.address.rm2&lt;/name&gt;

&lt;value&gt;${yarn.resourcemanager.hostname.rm2}:23189&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.webapp.address.rm2&lt;/name&gt;

&lt;value&gt;${yarn.resourcemanager.hostname.rm2}:23188&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.resource-tracker.address.rm2&lt;/name&gt;

&lt;value&gt;${yarn.resourcemanager.hostname.rm2}:23125&lt;/value&gt;

&lt;name&gt;yarn.resourcemanager.admin.address.rm2&lt;/name&gt;

&lt;value&gt;${yarn.resourcemanager.hostname.rm2}:23141&lt;/value&gt;

                &lt;name&gt;yarn.resourcemanager.scheduler.class&lt;/name&gt;

                &lt;value&gt;org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler&lt;/value&gt;

                &lt;name&gt;yarn.scheduler.fair.allocation.file&lt;/name&gt;

                &lt;value&gt;${yarn.home.dir}/etc/hadoop/fairscheduler.xml&lt;/value&gt;

                &lt;name&gt;yarn.nodemanager.local-dirs&lt;/name&gt;

                &lt;value&gt;/data/hadoop/mydata/yarn_local&lt;/value&gt;

                &lt;name&gt;yarn.nodemanager.log-dirs&lt;/name&gt;

                &lt;value&gt;/data/hadoop/mydata/yarn_log&lt;/value&gt;

                &lt;name&gt;yarn.nodemanager.remote-app-log-dir&lt;/name&gt;

                &lt;value&gt;/data/hadoop/mydata/yarn_remotelog&lt;/value&gt;

                &lt;name&gt;yarn.app.mapreduce.am.staging-dir&lt;/name&gt;

                &lt;value&gt;/data/hadoop/mydata/yarn_userstag&lt;/value&gt;

                &lt;name&gt;mapreduce.jobhistory.intermediate-done-dir&lt;/name&gt;

                &lt;value&gt;/data/hadoop/mydata/yarn_intermediatedone&lt;/value&gt;

                &lt;name&gt;mapreduce.jobhistory.done-dir&lt;/name&gt;

                &lt;value&gt;/data/hadoop/mydata/yarn_done&lt;/value&gt;

        &lt;/property&gt;

                &lt;name&gt;yarn.log-aggregation-enable&lt;/name&gt;

                &lt;value&gt;true&lt;/value&gt;

                &lt;name&gt;yarn.nodemanager.resource.memory-mb&lt;/name&gt;

                &lt;value&gt;2048&lt;/value&gt;

                &lt;name&gt;yarn.nodemanager.vmem-pmem-ratio&lt;/name&gt;

                &lt;value&gt;4.2&lt;/value&gt;

                &lt;name&gt;yarn.nodemanager.resource.cpu-vcores&lt;/name&gt;

                &lt;value&gt;2&lt;/value&gt;

                &lt;name&gt;yarn.nodemanager.aux-services&lt;/name&gt;

                &lt;value&gt;mapreduce_shuffle&lt;/value&gt;

                &lt;name&gt;yarn.nodemanager.aux-services.mapreduce.shuffle.class&lt;/name&gt;

                &lt;value&gt;org.apache.hadoop.mapred.ShuffleHandler&lt;/value&gt;

                &lt;description&gt;Classpath for typical applications.&lt;/description&gt;

                &lt;name&gt;yarn.application.classpath&lt;/name&gt;

                &lt;value&gt;

                        $HADOOP_HOME/etc/hadoop,

                        $HADOOP_HOME/share/hadoop/common/*,

                        $HADOOP_HOME/share/hadoop/common/lib/*,

                        $HADOOP_HOME/share/hadoop/hdfs/*,

                        $HADOOP_HOME/share/hadoop/hdfs/lib/*,

                        $HADOOP_HOME/share/hadoop/mapreduce/*,

                        $HADOOP_HOME/share/hadoop/mapreduce/lib/*,

                        $HADOOP_HOME/share/hadoop/yarn/*,

                        $HADOOP_HOME/share/hadoop/yarn/lib/*

                &lt;/value&gt;

修改fairscheduler.xml檔案

[root@hadoop01 hadoop]# vi fairscheduler.xml

&lt;allocations&gt;

         &lt;queue name="news"&gt;

                 &lt;minResources&gt;1024 mb, 1 vcores &lt;/minResources&gt;

                 &lt;maxResources&gt;1536 mb, 1 vcores &lt;/maxResources&gt;

                 &lt;maxRunningApps&gt;5&lt;/maxRunningApps&gt;

                 &lt;minSharePreemptionTimeout&gt;300&lt;/minSharePreemptionTimeout&gt;

                 &lt;weight&gt;1.0&lt;/weight&gt;

                 &lt;aclSubmitApps&gt;root,yarn,search,hdfs&lt;/aclSubmitApps&gt;

         &lt;/queue&gt;

         &lt;queue name="crawler"&gt;

                 &lt;minResources&gt;1024 mb, 1 vcores&lt;/minResources&gt;

                 &lt;maxResources&gt;1536 mb, 1 vcores&lt;/maxResources&gt;

         &lt;queue name="map"&gt;

&lt;/allocations&gt;

建立相關xml配置中目錄

mkdir -p /data/hadoop/mydata/yarn

4.解壓zookeeper并修改環境變量

[root@hadoop01 ~]# tar zxvf zookeeper-3.4.6.tar.gz

[root@hadoop01 ~]#mv zookeeper-3.4.6 /data/zookeeper

[root@hadoop01 ~]# vi /etc/profile

##zookeeper

export ZOOKEEPER_HOME=/data/zookeeper

export PATH=$PATH:$ZOOKEEPER_HOME/bin:$ZOOKEEPER_HOME/conf

[root@hadoop01 ~]# source /etc/profile

5.修改zookeeper配置檔案

[root@hadoop01 ~]# cd /data/zookeeper/conf/

[root@hadoop01 conf]# cp zoo_sample.cfg zoo.cfg

[root@hadoop01 conf]# vi zoo.cfg

# The number of milliseconds of each tick

tickTime=2000

# The number of ticks that the initial

# synchronization phase can take

initLimit=10

# The number of ticks that can pass between

# sending a request and getting an acknowledgement

syncLimit=5

# the directory where the snapshot is stored.

# do not use /tmp for storage, /tmp here is just

# example sakes.

dataDir=/data/hadoop/mydata/zookeeper

dataLogDir=/data/hadoop/mydata/zookeeperlog

# the port at which the clients will connect

clientPort=2190

server.1=hadoop01:2888:3888

server.2=hadoop02:2888:3888

server.3=hadoop03:2888:3888

server.4=hadoop04:2888:3888

server.5=hadoop05:2888:3888

# the maximum number of client connections.

# increase this if you need to handle more clients

#maxClientCnxns=60

#

# Be sure to read the maintenance section of the

# administrator guide before turning on autopurge.

# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance

# The number of snapshots to retain in dataDir

#autopurge.snapRetainCount=3

# Purge task interval in hours

# Set to "0" to disable auto purge feature

#autopurge.purgeInterval=1

建立目錄

mkdir /data/hadoop/mydata/zookeeper

mkdir /data/hadoop/mydata/zookeeperlog

6.把配置hadoop、zookeeper檔案目錄到其他四台中

[root@hadoop01 ~]# scp -r /data/hadoop hadoop02:/data/

[root@hadoop01 ~]# scp -r /data/hadoop hadoop03:/data/

[root@hadoop01 ~]# scp -r /data/hadoop hadoop04:/data/

[root@hadoop01 ~]# scp -r /data/hadoop hadoop05:/data/

[root@hadoop01 ~]# scp -r /data/zookeeper hadoop02:/data/

[root@hadoop01 ~]# scp -r /data/zookeeper hadoop03:/data/

[root@hadoop01 ~]# scp -r /data/zookeeper hadoop04:/data/

[root@hadoop01 ~]# scp -r /data/zookeeper hadoop05:/data/

在hadoop02修改yarn-site.xml

[root@hadoop02 hadoop]# cd /data/hadoop/etc/hadoop/

把rm1修改成rm2

[root@hadoop02 hadoop]# vi yarn-site.xml

&lt;value&gt;rm2&lt;/value&gt;

[root@hadoop01 ~]# vi /data/hadoop/mydata/zookeeper/myid

1

[root@hadoop02 ~]# vi /data/hadoop/mydata/zookeeper/myid

2

[root@hadoop03 ~]# vi /data/hadoop/mydata/zookeeper/myid

3

[root@hadoop04 ~]# vi /data/hadoop/mydata/zookeeper/myid

4

[root@hadoop05 ~]# vi /data/hadoop/mydata/zookeeper/myid

5

7、啟動zookeeper

五台操作zkServer.sh start

[root@hadoop01 ~]# zkServer.sh start

 [root@hadoop01 ~]# zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /data/zookeeper/bin/../conf/zoo.cfg

Mode: follower

[root@hadoop03 ~]# zkServer.sh status

Mode: leader

You have new mail in /var/spool/mail/root

正常情況隻有一台leader狀态

8、格式化zookeeper叢集

在hadoop機器執行指令

[root@hadoop01 ~]# hdfs zkfc -formatZK

9.啟動journalnode程序

在每台啟動(五台)

[root@hadoop01 ~]# cd /data/hadoop/sbin/

[root@hadoop01 sbin]# ./hadoop-daemon.sh start journalnode

10.格式化namenode

在hadoop01上執行指令

[root@hadoop01 ~]# hdfs namenode -format

11.啟動namenode

在hadoop01執行指令

[root@hadoop01 sbin]# ./hadoop-daemon.sh start  namenode

12.将剛才格式化的namenode資訊同步麼備用namenode上

[root@hadoop01 ~]# hdfs namenode -bootstrapStandby

13.在hadoop02上啟動namenode

[root@hadoop02 ~]# cd /data/hadoop/sbin/

[root@hadoop02  sbin]# ./hadoop-daemon.sh start  namenode

14.啟動所有datanode

在每台執行這是根據slaves來的

[root@hadoop01 sbin]# ./hadoop-daemon.sh start  datanode

15.啟動yarn

root@hadoop01 ~]# cd /data/hadoop/sbin/

[root@hadoop01 sbin]# ./start-yarn.sh

16.啟動ZKFC

在hadoop01和hadoop02上啟動

[root@hadoop01 sbin]# ./hadoop-daemon.sh start zkfc

17.啟動成功結果

<a href="http://s1.51cto.com/wyfs02/M02/85/8A/wKioL1eoGDLDu0HfAAHAGNBDhVs350.png-wh_500x0-wm_3-wmp_4-s_3000818196.png" target="_blank"></a>

<a href="http://s3.51cto.com/wyfs02/M00/85/8A/wKiom1eoGD6xchrGAAHnuOGZ_jI101.png-wh_500x0-wm_3-wmp_4-s_1001019603.png" target="_blank"></a>

三、安裝hbase HA

1.解壓hbase修改配置檔案

[root@hadoop01 ~]# tar zxvf hbase-1.0.2-bin.tar.gz

[root@hadoop01 ~]# mv hbase-1.0.2 /data/hbase

配置環境變量

##hbase

export HBASE_HOME=/data/hbase

export PATH=$PATH:$HBASE_HOME/bin

[root@hadoop01 ~]# cd /data/hbase/conf/

[root@hadoop01 conf]# vi hbase-env.sh

# The java implementation to use.  Java 1.7+ required.

export JAVA_HOME="/opt/jdk"

# Extra Java CLASSPATH elements.  Optional.

#記得以下一定要配置,HMaster會啟動不了

export HBASE_CLASSPATH=/data/hadoop/etc/hadoop

# Where log files are stored.  $HBASE_HOME/logs by default.

export HBASE_LOG_DIR=/data/hbase/logs

# Tell HBase whether it should manage it's own instance of Zookeeper or not.

export HBASE_MANAGES_ZK=false

修改hbase-site.xml

[root@hadoop01 conf]# vi hbase-site.xml

/**

 *

 * Licensed to the Apache Software Foundation (ASF) under one

 * or more contributor license agreements.  See the NOTICE file

 * distributed with this work for additional information

 * regarding copyright ownership.  The ASF licenses this file

 * to you under the Apache License, Version 2.0 (the

 * "License"); you may not use this file except in compliance

 * with the License.  You may obtain a copy of the License at

 *     http://www.apache.org/licenses/LICENSE-2.0

 * Unless required by applicable law or agreed to in writing, software

 * distributed under the License is distributed on an "AS IS" BASIS,

 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

 * See the License for the specific language governing permissions and

 * limitations under the License.

 */

       &lt;property&gt; 

             &lt;name&gt;hbase.rootdir&lt;/name&gt; 

              &lt;value&gt;hdfs://cluster/hbase&lt;/value&gt;

       &lt;/property&gt; 

               &lt;name&gt;hbase.cluster.distributed&lt;/name&gt; 

               &lt;value&gt;true&lt;/value&gt; 

                &lt;name&gt;hbase.tmp.dir&lt;/name&gt;

              &lt;value&gt;/data/hbase/tmp&lt;/value&gt;

       &lt;/property&gt;

       &lt;property&gt;

                &lt;name&gt;hbase.master.port&lt;/name&gt;

               &lt;value&gt;60000&lt;/value&gt;

               &lt;name&gt;hbase.zookeeper.property.dataDir&lt;/name&gt;

               &lt;value&gt;/data/hadoop/mydata/zookeeper&lt;/value&gt;

                &lt;name&gt;hbase.zookeeper.quorum&lt;/name&gt; 

                &lt;value&gt;hadoop01,hadoop02,hadoop03,hadoop04,hadoop05&lt;/value&gt; 

        &lt;/property&gt; 

                &lt;name&gt;hbase.zookeeper.property.clientPort&lt;/name&gt;

                &lt;value&gt;2190&lt;/value&gt;

      &lt;property&gt;

              &lt;name&gt;zookeeper.session.timeout&lt;/name&gt;

                &lt;value&gt;120000&lt;/value&gt;

      &lt;/property&gt;

      &lt;property&gt;

               &lt;name&gt;hbase.regionserver.restart.on.zk.expire&lt;/name&gt;

[root@hadoop01 conf]# vi regionservers

~             

建立檔案目錄

mkdir /data/hbase/tmp

增加backup-master

 [root@hadoop01 conf]# vi backup-masters

以上都配置完成

2、把檔案傳到其他伺服器上

[root@hadoop01 conf]# scp -r /data/hbase hadoop02:/data/

[root@hadoop01 conf]# scp -r /data/hbase hadoop03:/data/

[root@hadoop01 conf]# scp -r /data/hbase hadoop04:/data/

[root@hadoop01 conf]# scp -r /data/hbase hadoop05:/data/

3.啟動hbase

[root@hadoop01 conf]# start-hbase.sh

4.啟動結果

可以通過jps檢視

[root@hadoop01 conf]# jps

2540 NodeManager

1686 QuorumPeerMain

2134 JournalNode

2342 DFSZKFailoverController

3041 HMaster

1933 DataNode

3189 HRegionServer

2438 ResourceManager

7848 Jps

1827 NameNode

以後啟動過程

每台執行(五台)

在hadoop01啟動

[root@hadoop01 sbin]# ./start-dfs.sh

最後啟動hbase

[root@hadoop01 sbin]# start-hbase.sh

關閉過程

先關閉hbase

stop-hbase.sh

在hadoop01關閉

[root@hadoop01 sbin]# ./stop-yarn.sh

[root@hadoop01 sbin]# ./stop-dfs.sh

<a href="http://down.51cto.com/data/2368030" target="_blank">附件:http://down.51cto.com/data/2368030</a>

版權聲明:原創作品,如需轉載,請注明出處。否則将追究法律責任

本文轉自 jxzhfei  51CTO部落格,原文連結:http://blog.51cto.com/jxzhfei/1835598