天天看點

Kafka配置筆記

Kafka 配置

############################# Server Basics #############################
# 唯一的 kafka 伺服器 id,每個 broker 都不一樣
broker.id=0
############################# Socket Server Settings #############################
# 允許删掉主題
delete.topic.enable=true
# 監聽ip和端口,listeners表示非kafka叢集内的機器通路kafka
listeners = PLAINTEXT://192.168.229.147:9092
# 節點的主機名會通知給生産者和消費者
advertised.listeners=PLAINTEXT://192.168.229.147:9092
# 接受網絡請求的線程數
num.network.threads=3
# 進行磁盤IO的線程數
num.io.threads=8
# 套接字伺服器使用的發送緩沖區大小
socket.send.buffer.bytes=102400
# 套接字伺服器使用的接收緩沖區大小
socket.receive.buffer.bytes=102400
# 單個請求最大能接收的資料量
socket.request.max.bytes=104857600
############################# Log Basics #############################
# 用來存儲日志檔案夾
log.dirs=/tmp/kafka-logs
# 每個主題的日志分區的預設數量。更多的分區允許更大的并行操作,但是它會導緻節點産生更多的檔案。副本數量,建議設定成3,免得單點故障
num.partitions=1
# 每個資料目錄中的線程數,用于在啟動時日志恢複,并在關閉時重新整理。
num.recovery.threads.per.data.dir=1
############################# Internal Topic Settings  #############################
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
############################# Log Flush Policy #############################
# 強制重新整理資料到磁盤之前要接受的消息數
#log.flush.interval.messages=10000
# 在強制重新整理之前,消息可以在日志中占用的最長時間
#log.flush.interval.ms=1000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion due to age
log.retention.hours=168
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
#log.retention.bytes=1073741824
# 日志檔案最大位元組,超過後重新建立新日志檔案
log.segment.bytes=1073741824
# 檢查日志段以檢視是否可以根據保留政策删除日志段的時間間隔
log.retention.check.interval.ms=300000
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=localhost:2181
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=6000
############################# Group Coordinator Settings #############################
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
# The default value for this is 3 seconds.
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
group.initial.rebalance.delay.ms=0