部署kafka
- 部署jdk环境
- 部署zookeeper
- 部署kafka
- topic
部署jdk环境
192.168.3.1 node1
192.168.3.3 node2
192.168.3.10 node3
ntpdate ntp1.aliyun.com
useradd appUser
echo Abc123 | passwd --stdin appUser
tar xf jdk1.8.0_221.tar.gz -C /usr/local/
vim /etc/profile
export JAVA_HOME=/usr/local/jdk1.8.0_221
export JR_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export PATH=${JAVA_HOME}/bin:$PATH
source /etc/profile
[root@node3 kafka]# java -version
java version "1.8.0_201"
Java(TM) SE Runtime Environment (build 1.8.0_201-b09)
Java HotSpot(TM) 64-Bit Server VM (build 25.201-b09, mixed mode)
mkdir -p /export/packages/{kafka,zookeeper,java}
mkdir -p /export/data/{kafka,zookeeper}
mkdir -p /export/logs/{kafkalog,zkdatalog}
mkdir -p /export/servers
部署zookeeper
tar xf zookeeper-3.4.13.tar -C /export/packages/zookeeper/
cd /export/packages/zookeeper/zookeeper-3.4.13/conf
cp zoo_sample.cfg zoo.cfg
vim zoo.cfg
tickTime=2000 #通信心跳时间,Zookeeper服务器与客户端心跳时间,单位毫秒
initLimit=10 #Leader和Follower初始连接时能容忍的最多心跳数(tickTime的数量),这里表示为10*2s
syncLimit=5 #Leader和Follower之间同步通信的超时时间,这里表示如果超过5*2s,Leader认为Follwer死掉,并从服务器列表中删除Follwer
dataDir=/export/packages/zookeeper/zookeeper-3.4.13/data ●修改,指定保存Zookeeper中的数据的目录,目录需要单独创建
dataLogDir=/export/packages/zookeeper/zookeeper-3.4.13/logs ●添加,指定存放日志的目录,目录需要单独创建
clientPort=2181 #客户端连接端口
#添加集群信息
server.1=192.168.3.1:3188:3288
server.2=192.168.3.3:3188:3288
server.3=192.168.3.10:3188:3288
---》wq
[root@node3 kafka]# grep -Ev "^#|^$" /export/packages/zookeeper/zookeeper-3.4.13/conf/zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/export/packages/zookeeper/zookeeper-3.4.13/data
dataLogDir=/export/packages/zookeeper/zookeeper-3.4.13/logs
clientPort=2181
server.1=192.168.3.1:3188:3288
server.2=192.168.3.3:3188:3288
server.3=192.168.3.10:3188:3288
mkdir /export/packages/zookeeper/zookeeper-3.4.13/{data,logs}
scp /export/packages/zookeeper/zookeeper-3.4.13/conf/zoo.cfg 192.168.3.3:/export/packages/zookeeper/zookeeper-3.4.13/conf
scp /export/packages/zookeeper/zookeeper-3.4.13/conf/zoo.cfg 192.168.3.10:/export/packages/zookeeper/zookeeper-3.4.13/conf
- 在"每个"节点的dataDir指定的目录下创建一个 myid 的文件
echo 1 > /export/packages/zookeeper/zookeeper-3.4.13/data/myid
echo 2 > /export/packages/zookeeper/zookeeper-3.4.13/data/myid
echo 3 > /export/packages/zookeeper/zookeeper-3.4.13/data/myid
vim /etc/init.d/zookeeper
#!/bin/bash
#chkconfig:2345 20 90
#description:Zookeeper Service Control script
ZK_HOME='/export/packages/zookeeper/zookeeper-3.4.13/'
case $1 in
start)
echo "---------- zookeeper 启动 ------------"
$ZK_HOME/bin/zkServer.sh start
;;
stop)
echo "---------- zookeeper 停止 ------------"
$ZK_HOME/bin/zkServer.sh stop
;;
restart)
echo "---------- zookeeper 重启 ------------"
$ZK_HOME/bin/zkServer.sh restart
;;
status)
echo "---------- zookeeper 状态 ------------"
$ZK_HOME/bin/zkServer.sh status
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
esac
chmod +x /etc/init.d/zookeeper
chkconfig --add zookeeper
service zookeeper start
[root@node3 kafka]# netstat -antp | grep 2181
tcp6 0 0 :::2181 :::* LISTEN 16453/java
tcp6 0 0 192.168.3.10:2181 192.168.3.10:46452 ESTABLISHED 16453/java
tcp6 0 0 192.168.3.10:46452 192.168.3.10:2181 ESTABLISHED 16901/java
[root@node3 kafka]# service zookeeper status
---------- zookeeper 状态 ------------
ZooKeeper JMX enabled by default
Using config: /export/packages/zookeeper/zookeeper-3.4.13/bin/../conf/zoo.cfg
Mode: leader
ln -s /export/packages/zookeeper/zookeeper-3.4.13/bin/* /usr/local/bin
ln -s /export/packages/zookeeper/zookeeper-3.4.13 /export/servers/zookeeper
部署kafka
cd /export/servers/kafka/config/
cp -r server.properties server.properties.back
vim /export/servers/kafka/config/server.properties
所有节点修改对应配置文件,如下:
#21 broker.id=0 broker的全局唯一编号,每个broker不能重复
#31 listeners=PLAINTEXT://192.168.3.1:9092 指定监听的IP和端口,如果修改每个broker的IP需区分开
#60 log.dirs=/export/Logs/kafkalog kafka运行日志存放的路径,也是数据存放的路径
#123 zookeeper.connect=192.168.226.128:2181,192.168.226.130:2181,192.168.226.131:2181 配置连接Zookeeper集群地址
---->wq
PS:broker.id和listeners不相同,其他都相同
[root@node3 kafka]# grep -Ev "^#|^$" /export/servers/kafka/config/server.properties
broker.id=3
listeners=PLAINTEXT://192.168.3.10:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/export/logs/kafkalog
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.3.1:2181,192.168.3.3:2181,192.168.3.10:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
chown -R appUser.appUser /export/*
[root@node3 kafka]# cat /etc/init.d/kafka
#!/bin/bash
#chkconfig:2345 22 88
#description:Kafka Service Control script
KAFKA_HOME='/export/servers/kafka/'
case $1 in
start)
echo "---------- Kafka 启动 ------------"
${KAFKA_HOME}/bin/kafka-server-start.sh -daemon ${KAFKA_HOME}/config/server.properties
;;
stop)
echo "---------- Kafka 停止 ------------"
${KAFKA_HOME}/bin/kafka-server-stop.sh
;;
restart)
$0 stop
$0 start
;;
status)
echo "---------- Kafka 状态 ------------"
count=$(ps -ef | grep kafka | egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
echo "kafka is not running"
else
echo "kafka is running"
fi
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
esac
su appUser
[root@node1 kafka]# /export/servers/kafka/bin/kafka start
---------- Kafka 启动 ------------
[root@node1 kafka]# netstat -antp | grep 9092
tcp6 0 0 192.168.3.1:9092 :::* LISTEN 15770/java
tcp6 0 0 192.168.3.1:9092 192.168.3.3:59904 ESTABLISHED 15770/java
tcp6 0 0 192.168.3.1:43628 192.168.3.10:9092 ESTABLISHED 15770/java
topic
[appUser@node1 kafkalog]$ /export/servers/kafka/bin/kafka-topics.sh --create --zookeeper 192.168.3.1:2181 --replication-factor 2 --partitions 1 --topic ky11
Created topic ky11.
[root@node1 kafka]# /export/servers/kafka/bin/kafka-console-producer.sh --broker-list 192.168.3.1:9092 --topic test
>qw
>sas
>ww
[root@node3 kafka]# /export/servers/kafka/bin/kafka-console-producer.sh --broker-list 192.168.3.10:9092 --topic test
>er
>ewr
[appUser@node2 bin]$ /export/servers/kafka/bin/kafka-console-consumer.sh --bootstrap-server 192.168.3.3:9092 --topic test --from-beginning
test
;
exit
qi
qw
sas
ww
er
ewr
[appUser@node1 kafka]$ /export/servers/kafka/bin/kafka-topics.sh --list --zookeeper 192.168.3.1:2181 //查看topic
__consumer_offsets
ky11
qwe
test
[appUser@node1 kafka]$ /export/servers/kafka/bin/kafka-topics.sh --describe --zookeeper 192.168.3.1:2181 --topic test
Topic:test PartitionCount:1 ReplicationFactor:2 Configs:
Topic: test Partition: 0 Leader: 3 Replicas: 3,1 Isr: 3,1
含义:分区为为1。复制因子为2。他的test的分区为0。Replicas: 3,1。 复制的为3,1