1. 在根目錄建立zookeeper檔案夾(service1、service2、service3都建立)
[root@localhost /]# mkdir zookeeper
通過xshell上傳檔案到service1伺服器:上傳zookeeper-3.4.6.tar.gz到/software檔案夾
2.遠端copy将service1下的/software/zookeeper-3.4.6.tar.gz到service2、service3
[root@localhost software]# scp -r /software/zookeeper-3.4.6.tar.gz [email protected]:/software/
[root@localhost software]# scp -r /software/zookeeper-3.4.6.tar.gz [email protected]:/software/
3.copy /software/zookeeper-3.4.6.tar.gz到/zookeeper/目錄(service1、service2、service3都執行)
[root@localhost software]# cp /software/zookeeper-3.4.6.tar.gz /zookeeper/
4.安裝解壓zookeeper-3.4.6.tar.gz(service1、service2、service3都執行)
[root@localhost /]# cd /zookeeper/
[root@localhost zookeeper]# tar -zxvf zookeeper-3.4.6.tar.gz
5.在/zookeeper建立兩個目錄:zkdata、zkdatalog(service1、service2、service3都建立)
[root@localhost zookeeper]# mkdir zkdata
[root@localhost zookeeper]# mkdir zkdatalog
6.進入/zookeeper/zookeeper-3.4.6/conf/目錄
[root@localhost zookeeper]# cd /zookeeper/zookeeper-3.4.6/conf/
[root@localhost conf]# ls
configuration.xsl log4j.properties zoo.cfg zoo_sample.cfg
7. 修改zoo.cfg檔案
# the number of milliseconds of each tick
ticktime=2000
# the number of ticks that the initial
# synchronization phase can take
initlimit=10
# the number of ticks that can pass between
# sending a request and getting an acknowledgement
synclimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
datadir=/zookeeper/zkdata
datalogdir=/zookeeper/zkdatalog
# the port at which the clients will connect
clientport=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxclientcnxns=60
#
# be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
# the number of snapshots to retain in datadir
#autopurge.snapretaincount=3
# purge task interval in hours
# set to "0" to disable auto purge feature
#autopurge.purgeinterval=1
server.1=192.168.2.211:12888:13888
server.2=192.168.2.212:12888:13888
server.3=192.168.2.213:12888:13888
8. 同步修改service2、service3的zoo.cfg配置
9. myid檔案寫入(進入/zookeeper/zkdata目錄下)
[root@localhost /]# cd /zookeeper/zkdata
[root@localhost /]# echo 1 > myid
10. myid檔案寫入service2、service3
echo 2 > myid
echo 3 > myid
11.檢視zk指令:
[root@localhost ~]# cd /zookeeper/zookeeper-3.4.6/bin/
[root@localhost bin]# ls
readme.txt zkcleanup.sh zkcli.cmd zkcli.sh zkenv.cmd zkenv.sh zkserver.cmd zkserver.sh zookeeper.out
12.執行zkserver.sh檢視詳細指令:
[root@localhost bin]# ./zkserver.sh
jmx enabled by default
using config: /zookeeper/zookeeper-3.4.6/bin/../conf/zoo.cfg
usage: ./zkserver.sh {start|start-foreground|stop|restart|status|upgrade|print-cmd}
13. 在service1、service2、service3分别啟動zk服務
[root@localhost bin]# ./zkserver.sh start
14. jps檢視zk程序
[root@localhost bin]# jps
31483 quorumpeermain
31664 jps
15. 分别在service1、service2、service3檢視zk狀态(可以看到leader和follower節點)
[root@localhost bin]# ./zkserver.sh status
mode: follower
mode: leader
16. 看到leader和follower節點已經安裝成功