docker 배치zookeeper+kafka 집단
Docker 대칭복사
docker pull zookeeper:3.6.2
클러스터 서비스 계획
노드 IP
데이터 카탈로그
액세스 포트
통신 포트
선거 포트
3.1.101.33
/data/zookeeper/{data,conf,datalog}
2181
2888
3888
3.1.101.34
/data/zookeeper/{data,conf,datalog}
2181
2888
3888
3.1.101.35
/data/zookeeper/{data,conf,datalog}
2181
2888
3888
데이터 디렉토리 만들기
mkdir -pv /data/zookeeper/{
data,conf,datalog}
프로파일 생성
cat > /data/zookeeper/conf/zoo.cfg << 'EOF'
tickTime=2000
dataDir=/data
dataLogDir=/datalog
clientPort=2181
initLimit=5
syncLimit=2
server.0=3.1.101.33:2888:3888
server.1=3.1.101.34:2888:3888
server.2=3.1.101.35:2888:3888
EOF
Docker-compose 편성
서류를 편성하다
version: "3"
services:
zookeeper:
container_name: zookeeper
image: zookeeper:3.6.2
network_mode: host
restart: always
volumes:
- /etc/localtime:/etc/localtime
- /data/zookeeper/conf/zoo.cfg:/conf/zoo.cfg
- /data/zookeeper/data:/data
- /data/zookeeper/datalog:/datalog
environment:
ZOO_MY_ID: 0
ZOO 수정 필요MY_ID가 해당 서버입니다.id
두 가지 데이터 영구화 방법:
( , leader follower)
zkServer.sh status
zkCli.sh -server 3.1.101.33:2181,3.1.101.34:2181,3.1.101.35:2181
kafka 배포
Docker 대칭복사
docker pull wurstmeister/kafka:2.13-2.7.0
클러스터 서비스 계획
노트
데이터 카탈로그
포트
3.1.101.33
/data/kafka
9092
3.1.101.34
/data/kafka
9092
3.1.101.35
/data/kafka
9092
데이터 디렉토리 만들기
mkdir -pv /data/kafka/{
data,conf,logs}
프로파일 생성
cat > /data/kafka/conf/server.properties << 'EOF'
################################################## System ################################################
broker.id=0
listeners=PLAINTEXT://3.1.101.33:9092
advertised.listeners=PLAINTEXT://3.1.101.33:9092
advertised.port=9092
port=9092
group.initial.rebalance.delay.ms=0
########################################### Replication configurations #######################################
num.replica.fetchers=1
replica.fetch.max.bytes=1048576
replica.fetch.wait.max.ms=500
replica.high.watermark.checkpoint.interval.ms=5000
replica.socket.timeout.ms=30000
replica.socket.receive.buffer.bytes=65536
replica.lag.time.max.ms=10000
replica.lag.max.messages=4000
compression.codec:none
controller.socket.timeout.ms=30000
controller.message.queue.size=10
controlled.shutdown.enable=true
default.replication.factor:2
############################################### Topic configuration ##################################################
num.partitions=1
num.recovery.threads.per.data.dir=1
message.max.bytes=1000000
auto.create.topics.enable=true
auto.leader.rebalance.enable=true
offsets.topic.replication.factor=1
############################################### Log configuration ##################################################
log.dirs=/kafka
log.index.interval.bytes=4096
log.index.size.max.bytes=10485760
log.retention.hours=168 # ,
log.flush.interval.ms=10000 # 1 ,
log.flush.interval.messages=20000 #log
log.flush.scheduler.interval.ms=2000
log.roll.hours=72
log.retention.check.interval.ms=300000
log.segment.bytes=1073741824 #kafka (log.dir)
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
################################################# ZK configuration ####################################################
zookeeper.connect=3.1.101.33:2181,3.1.101.34:2181,3.1.101.35:2181
zookeeper.connection.timeout.ms=6000
zookeeper.sync.time.ms=2000
################################################# Socket server configuration#####################################
num.io.threads=9 # cpu 1
num.network.threads=8 # cpu 2 , 3
socket.request.max.bytes=104857600
socket.receive.buffer.bytes=1048576
socket.send.buffer.bytes=1048576
queued.max.requests=500
fetch.purgatory.purge.interval.requests=1000
producer.purgatory.purge.interval.requests=1000
EOF
Docker-compose 편성
예제
version: "3"
services:
kafka:
container_name: kafka
image: wurstmeister/kafka:2.13-2.7.0
network_mode: host
restart: always
volumes:
- /etc/localtime:/etc/localtime
- /data/kafka/conf/server.properties:/opt/kafka/config/server.properties
- /data/kafka/data:/kafka
- /data/kafka/logs:/opt/kafka/logs
environment:
KAFKA_ADVERTISED_PORT: 9092
KAFKA_ZOOKEEPER_CONNECT: 3.1.101.33:2181,3.1.101.34:2181,3.1.101.35:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://3.1.101.33:9092
KAFKA_LISTENERS: PLAINTEXT://3.1.101.33:9092
KAFKA_HEAP_OPTS: "-Xmx1G -Xms1G"
확인
topic
kafka-topics.sh --create --topic tnt --replication-factor 3 --partitions 2 --zookeeper 3.1.101.33:2181
kafka-topics.sh --describe --zookeeper 3.1.101.33:2181 --topic tnt
topic
kafka-topics.sh --list --zookeeper 3.1.101.34:2181
( )
kafka-console-producer.sh --broker-list 3.1.101.33:9092 --topic tnt
( )
kafka-console-consumer.sh --bootstrap-server 3.1.101.35:9092 --topic tnt --from-beginning
kafka 프로필 설명
############################# System #############################
# ID, 。
broker.id=0
# , 9092
port=9092
# ,
host.name=debugo01
#
num.network.threads=2
# I/O
num.io.threads=8
#
background.threads = 4
# IO
queued.max.requests = 500
# socket (SO_SNDBUF)
socket.send.buffer.bytes=1048576
# socket (SO_RCVBUF)
socket.receive.buffer.bytes=1048576
# socket 。 ,message.max.bytes
socket.request.max.bytes = 104857600
############################# Topic #############################
# topic , partition segment file
num.partitions=2
# topic , false, topic
auto.create.topics.enable =true
# topic , replication , broker 。
default.replication.factor =1
# ,
message.max.bytes = 1000000
############################# ZooKeeper #############################
# Zookeeper quorum 。
zookeeper.connect=debugo01:2181,debugo02,debugo03
# zk
zookeeper.connection.timeout.ms=1000000
# ZooKeeper leader follower
zookeeper.sync.time.ms = 2000
############################# Log #############################
# ,
log.dirs=/var/log/kafka
# , flush 。 10000
#log.flush.interval.messages=10000
# (ms) , flush 。interval.ms interval.messages , flush。 3000ms
#log.flush.interval.ms=1000
# flush
log.flush.scheduler.interval.ms = 3000
# (delete|compact)
log.cleanup.policy = delete
# (hours|minutes), 7 (168 )。 policy 。bytes minutes 。
log.retention.hours=168
# 。 policy 。
#log.retention.bytes=1073741824
# segment , segment (-1 )
log.segment.bytes=536870912
# , segment
log.roll.hours = 24*7
# , (log.retention.hours log.retention.bytes)
log.retention.check.interval.ms=60000
#
log.cleaner.enable=false
#
log.cleaner.delete.retention.ms = 1 day
# segment
log.index.size.max.bytes = 10 * 1024 * 1024
#y , 。
log.index.interval.bytes = 4096
############################# replica #############################
# partition management controller replicas
controller.socket.timeout.ms = 30000
# controller-to-broker-channels
controller.message.queue.size=10
# replicas leader , , replicas
replica.lag.time.max.ms = 10000
# broker , true, broker leader, broker
controlled.shutdown.enable = false
#
controlled.shutdown.max.retries = 3
#
controlled.shutdown.retry.backoff.ms = 5000
# relicas , partition relicas 。 , , replicas 。 ,leader relicas 。 broker , , .
replica.lag.max.messages = 4000
#leader relicas socket
replica.socket.timeout.ms= 30 * 1000
# leader socket
replica.socket.receive.buffer.bytes=64 * 1024
# replicas
replica.fetch.max.bytes = 1024 * 1024
# replicas leader ,
replica.fetch.wait.max.ms = 500
# fetch , leader ,
replica.fetch.min.bytes =1
# leader , relipca IO
num.replica.fetchers = 1
# replica flush
replica.high.watermark.checkpoint.interval.ms = 5000
# broker
auto.leader.rebalance.enable = false
# leader , ,
leader.imbalance.per.broker.percentage = 10
# leader
leader.imbalance.check.interval.seconds = 300
# offset
offset.metadata.max.bytes = 1024
#############################Consumer #############################
# Consumer group.id、zookeeper.connect
# Consumer ID,By setting the same group id multiple processes indicate that they are all part of the same consumer group.
group.id
# ID, ,
consumer.id
# ID , group.id
client.id =
# zookeeper , broker zk
zookeeper.connect=debugo01:2182,debugo02:2182,debugo03:2182
# zookeeper ,
zookeeper.session.timeout.ms = 6000
# zookeeper
zookeeper.connection.timeout.ms = 6000
# zookeeper follower leader
zookeeper.sync.time.ms = 2000
# zookeeper offset , offset 。
# smallest :
# largest:
# anything else: consumer
auto.offset.reset = largest
# socket , max.fetch.wait + socket.timeout.ms.
socket.timeout.ms= 30 * 1000
# socket
socket.receive.buffer.bytes=64 * 1024
# fetch
fetch.message.max.bytes = 1024 * 1024
# true ,Consumer offset zookeeper, Consumer , consumer zookeeper offset
auto.commit.enable = true
#
auto.commit.interval.ms = 60 * 1000
# , fetch.message.max.bytes
queued.max.message.chunks = 10
# consumer group , reblance, partitions consumer ,
rebalance.max.retries = 4
# reblance
rebalance.backoff.ms = 2000
# leader
refresh.leader.backoff.ms
# server , 。 1 。
fetch.min.bytes = 1
# fetch.min.bytes ,
fetch.wait.max.ms = 100
# , , -1
consumer.timeout.ms = -1
#############################Producer#############################
# :
# metadata.broker.list
# request.required.acks
# producer.type
# serializer.class
# (topics, partitions and replicas) , :host1:port1,host2:port2, vip
metadata.broker.list
#
# 0: , , , server , TCP
# 1: , leader ,
# -1: , leader , , ,
request.required.acks = 0
#
request.timeout.ms = 10000
# socket
send.buffer.bytes=100*1024
# key , , serializer.class
key.serializer.class
# ,
partitioner.class=kafka.producer.DefaultPartitioner
# , none, gzip snappy
compression.codec = none
# topic
compressed.topics=null
#
message.send.max.retries = 3
#
retry.backoff.ms = 100
# topic , 0,
topic.metadata.refresh.interval.ms = 600 * 1000
# , ,
client.id=""
# 。 100 100ms , ,
queue.buffering.max.ms = 5000
# ,
queue.buffering.max.messages = 10000
# , 。 0, , ,
queue.enqueue.timeout.ms = -1
# , , queue.buffering.max.messages queue.buffering.max.ms producer 。
batch.num.messages=200
broker.id =0
broker , 。 IP ,broker.id , consumers
log.dirs=/data/kafka-logs
kafka , /data/kafka-logs-1,/data/kafka-logs-2
port =9092
broker server
message.max.bytes =6525000
,
num.network.threads =4
broker ,
num.io.threads =8
broker IO ,
background.threads =4
, ,
queued.max.requests =500
IO , IO , , 。
host.name
broker , , , , , ZK,
socket.send.buffer.bytes=100*1024
socket ,socket SO_SNDBUFF
socket.receive.buffer.bytes =100*1024
socket ,socket SO_RCVBUFF
socket.request.max.bytes =100*1024*1024
socket , serverOOM,message.max.bytes socket.request.max.bytes, topic
log.segment.bytes =1024*1024*1024
topic segment , segment , topic
log.roll.hours =24*7
segment log.segment.bytes , segment topic
log.cleanup.policy = delete
:delete compact , , topic
log.retention.minutes=60*24 #
log.cleanup.policy ,
log.retention.bytes log.retention.minutes , , topic
log.retention.bytes=-1
topic , topic = *log.retention.bytes。-1 log.retention.bytes log.retention.minutes , , topic
log.retention.check.interval.ms=5minutes
, log.cleanup.policy
log.cleaner.enable=false
log.cleaner.threads = 2
log.cleaner.io.max.bytes.per.second=None
log.cleaner.dedupe.buffer.size=500*1024*1024
, ,
log.cleaner.io.buffer.size=512*1024
IO
log.cleaner.io.buffer.load.factor =0.9
hash
log.cleaner.backoff.ms =15000
log.cleaner.min.cleanable.ratio=0.5
, , , topic
log.cleaner.delete.retention.ms =1day
, , log.retention.minutes , 。 topic
log.index.size.max.bytes =10*1024*1024
segment , topic
log.index.interval.bytes =4096
fetch , offset , , , ,
log.flush.interval.messages=None
log ”sync” , IO , ” " , , " " " " . , "fsync" (IO ), , "fsync" , client . server , fsync .
log.flush.scheduler.interval.ms =3000
log.flush.interval.ms = None
interval , . "fsync" , , , .
log.delete.delay.ms =60000
log.flush.offset.checkpoint.interval.ms =60000
,
auto.create.topics.enable =true
topic, false, topic
default.replication.factor =1
topic, false, topic
num.partitions =1
topic , topic topic
kafka Leader,replicas
controller.socket.timeout.ms =30000
partition leader replicas ,socket
controller.message.queue.size=10
partition leader replicas ,
replica.lag.time.max.ms =10000
replicas partition leader , , replicas ISR(in-sync replicas), ,
replica.lag.max.messages =4000
follower leader , follower[ partition relicas]
## , follower leader , , replicas
## ,leader follower , replicas
## follower .
## broker , , .
replica.socket.timeout.ms=30*1000
follower leader socket
replica.socket.receive.buffer.bytes=64*1024
leader socket
replica.fetch.max.bytes =1024*1024
replicas
replica.fetch.wait.max.ms =500
replicas leader ,
replica.fetch.min.bytes =1
fetch , leader , ,
num.replica.fetchers=1
leader , follower IO
replica.high.watermark.checkpoint.interval.ms =5000
replica
controlled.shutdown.enable =false
broker , true, broker leader, broker
controlled.shutdown.max.retries =3
controlled.shutdown.retry.backoff.ms =5000
leader.imbalance.per.broker.percentage =10
leader , ,
leader.imbalance.check.interval.seconds =300
leader
offset.metadata.max.bytes
offset
kafka zookeeper
zookeeper.connect = localhost:2181
zookeeper , , hostname1:port1,hostname2:port2,hostname3:port3
zookeeper.session.timeout.ms=6000
ZooKeeper , , , ,
zookeeper.connection.timeout.ms =6000
ZooKeeper
zookeeper.sync.time.ms =2000
이 내용에 흥미가 있습니까?
현재 기사가 여러분의 문제를 해결하지 못하는 경우 AI 엔진은 머신러닝 분석(스마트 모델이 방금 만들어져 부정확한 경우가 있을 수 있음)을 통해 가장 유사한 기사를 추천합니다:
Docker For Mac 느림: 대책 실험(docker-sync)총결한 자료에서 실제 테스트의 내용을 총결하였다.(인간^-^) 나는 회사 Laravel 프로젝트의 Docker 개발 환경에서 시도한 적이 있다. 우선 Docker-compose입니다.나는 yml을 분석하고 동기화 대...
텍스트를 자유롭게 공유하거나 복사할 수 있습니다.하지만 이 문서의 URL은 참조 URL로 남겨 두십시오.
CC BY-SA 2.5, CC BY-SA 3.0 및 CC BY-SA 4.0에 따라 라이센스가 부여됩니다.