意见箱
恒创运营部门将仔细参阅您的意见和建议,必要时将通过预留邮箱与您保持联络。感谢您的支持!
意见/建议
提交建议

kafka-2.11集群的搭建步骤

来源:恒创科技 编辑:恒创科技编辑部
2023-12-01 10:17:59

这篇文章主要介绍“kafka-2.11集群的搭建步骤”,在日常操作中,相信很多人在kafka-2.11集群的搭建步骤问题上存在疑惑,小编查阅了各式资料,整理出简单好用的操作方法,希望对大家解答”kafka-2.11集群的搭建步骤”的疑惑有所帮助!接下来,请跟着小编一起来学习吧!

producer:消息生产者,向kafka broker发消息的客户端

consumer:消息消费者,向kafka broker取消息的客户端


kafka-2.11集群的搭建步骤

Topic:发布到Kafka集群消息的一个类别

broker:一台kafka服务器就是一个broker,一个集群由多个broker组成,一个broker可以容纳多个topic

1.下载安装zookeeper(必须先安装zookeeper和jdk)

[root@node1 ~]# wget http://mirror.bit.edu.cn/apache/zookeeper/stable/zookeeper-3.4.13.tar.gz

[root@node1 ~]# tar xvf zookeeper-3.4.13.tar.gz -C /opt/

[root@node1 ~]# cd /opt/zookeeper-3.4.13/conf/

[root@node1 conf]# vim zoo.cfg

tickTime=2000

dataDir=/opt/zookeeper-3.4.13/data

clientPort=2181

initLimit=5

syncLimit=2

server.1=node1:2888:3888

server.2=node2:2888:3888

server.3=node3:2888:3888

[root@node1 conf]# mkdir /opt/zookeeper-3.4.13/data

[root@node1 conf]# cd /opt/zookeeper-3.4.13/data --myid必须要在data目录下面,否则会报错

[root@node1 data]# cat myid

1

[root@node1 zookeeper-3.4.13]# cd ..

[root@node1 opt]# scp -r zookeeper-3.4.13 node2:/opt/

[root@node1 opt]# scp -r zookeeper-3.4.13 node3:/opt/

2.在node2修改myid文件

[root@node2 opt]# cat /opt/zookeeper-3.4.13/data/myid

2

[root@node2 opt]#

3.在node3修改myid文件

[root@node3 ~]# cat /opt/zookeeper-3.4.13/data/myid

3

[root@node3 ~]# zkServer.sh start --每个节点都要启动zookeeper服务

ZooKeeper JMX enabled by default

Using config: /opt/zookeeper-3.4.13/bin/../conf/zoo.cfg

Starting zookeeper ... STARTED

[root@node3 opt]# zkCli.sh --使用客户端登陆

3.下载安装kafka(三个节点一样)

[root@node1 ~]# wget http://mirror.bit.edu.cn/apache/kafka/2.2.0/kafka_2.11-2.2.0.tgz

[root@node1 ~]# tar xvf kafka_2.11-2.2.0.tgz -C /opt/

[root@node1 ~]# cd /opt/kafka_2.11-2.2.0/

[root@node1 kafka_2.11-2.2.0]# cd config/

[root@node1 config]# vim server.properties

broker.id=0 --每个id不一样

zookeeper.connect=172.16.8.23:2181,172.16.8.24:2181,172.16.8.178:2181 --zookeeper集群IP地址

[root@node1 config]# cd /opt/

[root@node1 opt]# scp -r kafka_2.11-2.2.0/ node2:/opt/

[root@node1 opt]# scp -r kafka_2.11-2.2.0/ node3:/opt/

[root@node1 opt]# cd kafka_2.11-2.2.0/bin/

[root@node1 bin]# ./kafka-server-start.sh ../config/server.properties & --三台kafka都要后台启动服务

4.查看kafka服务是否启动正常

[root@node1 bin]# jps

30851 Kafka

3605 HMaster

12728 QuorumPeerMain

12712 DFSZKFailoverController

31656 Jps

3929 DataNode

15707 JournalNode

32188 NameNode

14335 ResourceManager

[root@node1 bin]# netstat -antulp | grep 30851

tcp6 0 0 :::9092 :::* LISTEN 30851/java

tcp6 0 0 :::37161 :::* LISTEN 30851/java

tcp6 0 0 172.16.8.23:40754 172.16.8.178:9092 ESTABLISHED 30851/java

tcp6 0 0 172.16.8.23:9092 172.16.8.23:39704 ESTABLISHED 30851/java

tcp6 0 0 172.16.8.23:45480 172.16.8.24:9092 ESTABLISHED 30851/java

tcp6 0 0 172.16.8.23:45294 172.16.8.178:2181 ESTABLISHED 30851/java

tcp6 0 0 172.16.8.23:39704 172.16.8.23:9092 ESTABLISHED 30851/java

[root@node1 bin]#

5.使用命令接口

[root@node1 bin]# ./kafka-topics.sh --create --zookeeper node1:2181 --topic tongcheng --replication-factor 3 --partitions 3 --创建topic

Created topic tongcheng.

[root@node1 bin]# ./kafka-topics.sh --list --zookeeper node1:2181 --查看topic

tongcheng

[root@node1 bin]# ./kafka-topics.sh --delete --zookeeper node1:2181 --topic tongcheng --删除topic

Topic tongcheng is marked for deletion.

Note: This will have no impact if delete.topic.enable is not set to true.

[root@node1 bin]# ./kafka-topics.sh --list --zookeeper node1:2181

[root@node1 bin]#

6.发送消息/接收消息

[root@node1 bin]# ./kafka-console-producer.sh --broker-list node2:9092 --topic ttt

>tongcheng is goods;

>tong is goods;

>cheng is goods!

>

--------接收端-------------

[root@node2 bin]# ./kafka-console-consumer.sh --topic ttt --bootstrap-server node1:9092,node2:9092,node3:9092 --from-beginning

tongcheng is goods;

tong is goods;

cheng is goods!

[root@node2 bin]# ./kafka-topics.sh --describe --zookeeper node1:2181 --topic ttt --查看分区数和副本数

Topic:tttPartitionCount:1ReplicationFactor:1Configs:

Topic: tttPartition: 0Leader: 0Replicas: 0Isr: 0

[root@node2 bin]#

7.查看zookeeper数据

[root@node1 bin]# ./zkCli.sh

Connecting to localhost:2181

[zk: localhost:2181(CONNECTED) 0] ls /

[cluster, controller, brokers, zookeeper, hadoop-ha, admin, isr_change_notification, log_dir_event_notification, controller_epoch, consumers, latest_producer_id_block, config, hbase]

[zk: localhost:2181(CONNECTED) 1]

8.接收组消息(当消费者发送消息时,只能是组中一个接收都者接收消息)

[root@node1 bin]# ./kafka-console-producer.sh --broker-list node1:9092 --topic tong --在node1节点发送消息

>

------启动两台消费者-----------

[root@node2 bin]# vim ../config/consumer.properties --两台消费都都要修改

group.id=wuhan

[root@node2 bin]# ./kafka-console-consumer.sh --topic tong --bootstrap-server node1:9092 --consumer.config ../config/consumer.properties

[2019-04-05 20:52:09,152] WARN [Consumer clientId=consumer-1, groupId=wuhan] Error while fetching metadata with correlation id 2 :

9.在发送端发送消息,接收端组接收消息

[root@node1 bin]# ./kafka-console-producer.sh --broker-list node1:9092 --topic tong

>[2019-04-05 20:51:31,094] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)

[2019-04-05 20:52:09,114] INFO Creating topic tong with configuration {} and initial partition assignment Map(0 -> ArrayBuffer(2)) (kafka.zk.AdminZkClient)

[2019-04-05 20:52:09,124] INFO [KafkaApi-0] Auto creation of topic tong with 1 partitions and replication factor 1 is successful (kafka.server.KafkaApis)

>hello ttt;

>

-----------接收端--------------

[root@node2 bin]# ./kafka-console-consumer.sh --topic tong --bootstrap-server node1:9092 --consumer.config ../config/consumer.properties --在node2节点接收到消息

[2019-04-05 20:52:09,152] WARN [Consumer clientId=consumer-1, groupId=wuhan] Error while fetching metadata with correlation id 2 : {tong=LEADER_NOT_AVAILABLE}

(org.apache.kafka.clients.NetworkClient)

hello ttt;

到此,关于“kafka-2.11集群的搭建步骤”的学习就结束了,希望能够解决大家的疑惑。理论与实践的搭配能更好的帮助大家学习,快去试试吧!若想继续学习更多相关知识,请继续关注恒创网站,小编会继续努力为大家带来更多实用的文章!

上一篇: mysql直接拷贝data目录下数据库源文件还原数据库方法 下一篇: JavaScript单线程和任务队列是什么