Docker-Compose配置zookeeper+KaFka+CMAK简单集群
基于Ubuntu24.04 TLS稳定版进行测试安装,仅供参考学习。
1. 本地DNS解析管理
# 编辑hosts文件
sudo nano /etc/hosts
# 添加以下三个主机IP
192.168.186.77 zoo1 k1
192.168.186.18 zoo2 k2
192.168.186.216 zoo3 k3
注:zoo1是192.168.186.77的别名,zoo2是192.168.186.18的别名,zoo3是192.168.186.126的别名,IP自行修改即可,其他配置可以直接使用。
2. 项目结构
注:本文将宿主机的当前目录下的data目录挂载到容器的/data目录,data目录是需要自己创建且跟docker-compose.yml是平级关系,读者可以根据需求自行调整配置文件。
3. docker-compose.yml (Zookeeper集群)
3.1 主机1 (IP:192.168.186.77)
version: '3.8' # 使用Docker Compose文件版本3.8
services:
zookeeper: # 定义了一个服务
image: "zookeeper:latest" # 指定使用的Docker镜像为zookeeper的最新版
hostname: zoo1 # 容器内的主机名
container_name: zookeeper1 # Docker容器名称
ports:
- "2181:2181" # 将容器的2181端口映射到宿主机的2181端口,用于客户端连接
- "2888:2888" # 将容器的2888端口映射到宿主机的2888端口,用于节点之间的数据同步
- "3888:3888" # 将容器的3888端口映射到宿主机的3888端口,用于集群中leader选举
environment:
ZOO_MY_ID: 1 # 设置该Zookeeper节点的ID为1,每个节点必须有一个唯一的ID
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
# 配置集群信息,定义了三个服务器的地址和端口配置
ZOOKEEPER_CONF: |
tickTime=2000 # Zookeeper服务器之间或客户端与服务器之间最长可以响应的时间间隔,以毫秒为单位
initLimit=10 # Follower在启动过程中与Leader同步所能容忍的最多心跳数(tickTime的数量)
syncLimit=5 # Follower与Leader之间发送消息、接收确认所能容忍的最多心跳数
autopurge.snapRetainCount=3 # 自动清理功能,保留的快照数量
autopurge.purgeInterval=1 # 清理快照的时间间隔,以小时为单位
volumes:
- ./data:/data # 将宿主机的当前目录下的data目录挂载到容器的/data目录,用于数据存储
restart: always # 设置容器总是重启
3.2 主机2 (IP:192.168.186.18)
version: '3.8'
services:
zookeeper:
image: "zookeeper:latest"
hostname: zoo2
container_name: zookeeper2
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
environment:
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
ZOOKEEPER_CONF: |
tickTime=2000
initLimit=10
syncLimit=5
autopurge.snapRetainCount=3
autopurge.purgeInterval=1
volumes:
- ./data:/data
restart: always
3.3 主机3 (IP:192.168.186.216)
version: '3.8'
services:
zookeeper:
image: "zookeeper:latest"
hostname: zoo3
container_name: zookeeper3
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
environment:
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
ZOOKEEPER_CONF: |
tickTime=2000
initLimit=10
syncLimit=5
autopurge.snapRetainCount=3
autopurge.purgeInterval=1
volumes:
- ./data:/data
restart: always
3.4 运行结果
4.1 主机1
liber@liber-VMware-Virtual-Platform:/home/zookeeper$ docker-compose up -d
[+] Running 9/9
✔ zookeeper 8 layers [⣿⣿⣿⣿⣿⣿⣿⣿] 0B/0B Pulled.......
liber@liber-VMware-Virtual-Platform:/home/zookeeper$ docker-compose ps
NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
zookeeper3 zookeeper:latest "/docker-entrypoint.…" zookeeper 29 seconds ago Up 24 seconds 0.0.0.0:2181->2181/tcp, :::2181->2181/tcp, 0.0.0.0:2888->2888/tcp, :::2888->2888/tcp, 0.0.0.0:3888->3888/tcp, :::3888->3888/tcp, 8080/tcpliber@liber-VMware-Virtual-Platform:/home/zookeeper$ docker-compose exec zookeeper zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
4.2 主机2
liber@liber-VMware-Virtual-Platform:/home/zookeeper$ docker-compose up -d
[+] Running 0/1
[+] Running 9/9lling .......liber@liber-VMware-Virtual-Platform:/home/zookeeper$ docker-compose exec zookeeper zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: leader
4.3 主机3
liber@liber-VMware-Virtual-Platform:/home/zookeeper$ docker-compose up -d
[+] Running 9/9
✔ zookeeper 8 layers [⣿⣿⣿⣿⣿⣿⣿⣿] 0B/0B Pulled ......
liber@liber-VMware-Virtual-Platform:/home/zookeeper$ docker-compose ps
NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
zookeeper3 zookeeper:latest "/docker-entrypoint.…" zookeeper 29 seconds ago Up 24 seconds 0.0.0.0:2181->2181/tcp, :::2181->2181/tcp, 0.0.0.0:2888->2888/tcp, :::2888->2888/tcp, 0.0.0.0:3888->3888/tcp, :::3888->3888/tcp, 8080/tcp
liber@liber-VMware-Virtual-Platform:/home/zookeeper$ docker-compose logs
zookeeper3 | ZooKeeper JMX enabled by default
zookeeper3 | Using config: /conf/zoo.cfg
zookeeper3 | 2024-07-21 03:17:03,935 [myid:] - INFO [main:QuorumPeerConfig@174] - Reading configuration from: /conf/zoo.cfg
zookeeper3 | 2024-07-21 03:17:03,948 [myid:] - INFO [main:QuorumPeerConfig@435] - clientPort is not set
zookeeper3 | 2024-07-21 03:17:03,949 [myid:] - INFO [main:QuorumPeerConfig@448] - secureClientPort is not set
zookeeper3 | 2024-07-21 03:17:03,949 [myid:] - INFO [main:QuorumPeerConfig@464] - observerMasterPort is not set
zookeeper3 | 2024-07-21 03:17:03,953 [myid:] - INFO [main:QuorumPeerConfig@481] - metricsProvider.className is org.apache.zookeeper.metrics.impl.DefaultMetricsProvider
zookeeper3 | 2024-07-21 03:17:04,021 [myid:3] - INFO [main:DatadirCleanupManager@78] - autopurge.snapRetainCount set to 3
.......liber@liber-VMware-Virtual-Platform:/home/zookeeper$ docker-compose exec zookeeper zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
4.4 标红代码的作用
docker-compose up -d命令会启动docker-compose.yml文件中定义的所有服务。如果服务所依赖的镜像尚未被下载,Docker Compose会自动从Docker Hub或指定的镜像仓库拉取,-d
参数确保容器在后台运行。docker-compose logs查看正在运行的服务的日志。docker-compose ps查看容器的状态。docker-compose exec zookeeper zkServer.sh status获取 Zookeeper 实例的当前状态和角色信息
5.docker-compose.yml(zookeeper集群+Kafka集群)
5.1 主机1 (IP:192.168.186.77)
services:
zookeeper:
image: "zookeeper:latest" # 使用Docker Hub上的最新官方Zookeeper镜像。
hostname: zoo1 # 容器内的主机名设置为 zoo1。
container_name: zookeeper1 # 设置 Docker 容器的名称为 zookeeper1。
ports:
- "2181:2181" # 将容器的 2181 端口映射到宿主机的同一端口,Zookeeper 客户端连接端口。
- "2888:2888" # 用于集群中的服务器之间相互通信的端口。
- "3888:3888" # 用于选举 leader 时使用的端口。
environment:
ZOO_MY_ID: 1 # 设置此 Zookeeper 实例的服务器 ID 为 1。
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181 # 配置集群中所有 Zookeeper 服务器的地址和端口。
volumes:
- ./data:/data # 将宿主机当前目录下的data目录挂载到容器的/data目录,用于数据持久化。
restart: always # 设置容器总是重新启动。
kafka:
image: "wurstmeister/kafka:latest" #使用Docker Hub上的最新wurstmeister/kafka镜像。
hostname: k1 # 设置容器的主机名为 k1。
container_name: kafka1 # 设置Docker容器的名称为kafka1。
ports:
- "9092:9092" # 将容器的9092端口映射到宿主机的同一端口,Kafka broker监听端口。
environment:
KAFKA_BROKER_ID: 1 # Kafka broker的唯一标识符。
KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181 # 连接到Zookeeper集群的地址,用于管理Kafka的状态和配置。
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092 # Kafka对内部通信开放的接口和端口。
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://k1:9092 # Kafka对外广播的接口和端口,通知给生产者和消费者。
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' # 允许自动创建主题。
volumes:
- /var/run/docker.sock:/var/run/docker.sock # 将宿主机的Docker套接字文件挂载到容器内部,允许Kafka容器内操作宿主机的Docker实例。
depends_on:
- zookeeper # 确保在启动Kafka服务前Zookeeper服务已经启动。
restart: always # 设置容器总是重新启动。
5.2 主机2 (IP:192.168.186.18)
services:
zookeeper:
image: "zookeeper:latest"
hostname: zoo2
container_name: zookeeper2
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
environment:
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
volumes:
- ./data:/data
restart: always
kafka:
image: "wurstmeister/kafka:latest"
hostname: k2
container_name: kafka2
ports:
- "9092:9092"
environment:
KAFKA_BROKER_ID: 2
KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://k2:9092
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
volumes:
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- zookeeper
restart: always
5.3 主机3 (IP:192.168.186.216)
services:
zookeeper:
image: "zookeeper:latest"
hostname: zoo3
container_name: zookeeper3
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
environment:
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
volumes:
- ./data:/data
restart: always
kafka:
image: "wurstmeister/kafka:latest"
hostname: k3
container_name: kafka3
ports:
- "9092:9092"
environment:
KAFKA_BROKER_ID: 3
KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://k3:9092
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
volumes:
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- zookeeper
restart: always
5.4 验证kafka集群
5.4.1 进入Kafka容器
docker-compose exec kafka /bin/bash
5.4.2 查看集群中的 brokers
kafka-broker-api-versions.sh --bootstrap-server localhost:9092
liber@liber-VMware-Virtual-Platform:/home/zookeeper$ docker-compose exec kafka /bin/bash
bash-5.1# kafka-broker-api-versions.sh --bootstrap-server localhost:9092
k3:9092 (id: 3 rack: null) -> (
Produce(0): 0 to 9 [usable: 9],
......
OffsetDelete(47): 0 [usable: 0],
DescribeClientQuotas(48): 0 to 1 [usable: 1],
AlterClientQuotas(49): 0 to 1 [usable: 1],
DescribeUserScramCredentials(50): 0 [usable: 0],
AlterUserScramCredentials(51): 0 [usable: 0],
AlterIsr(56): 0 [usable: 0],
UpdateFeatures(57): 0 [usable: 0],
DescribeCluster(60): 0 [usable: 0],
DescribeProducers(61): 0 [usable: 0]
)
k2:9092 (id: 2 rack: null) -> (
Produce(0): 0 to 9 [usable: 9],
Fetch(1): 0 to 12 [usable: 12],
ListOffsets(2): 0 to 6 [usable: 6],
Metadata(3): 0 to 11 [usable: 11],
........
InitProducerId(22): 0 to 4 [usable: 4],
OffsetForLeaderEpoch(23): 0 to 4 [usable: 4],
AddPartitionsToTxn(24): 0 to 3 [usable: 3],
AddOffsetsToTxn(25): 0 to 3 [usable: 3],
EndTxn(26): 0 to 3 [usable: 3],
WriteTxnMarkers(27): 0 to 1 [usable: 1],
TxnOffsetCommit(28): 0 to 3 [usable: 3],
DescribeAcls(29): 0 to 2 [usable: 2],
CreateAcls(30): 0 to 2 [usable: 2],
DeleteAcls(31): 0 to 2 [usable: 2],
DescribeConfigs(32): 0 to 4 [usable: 4],
AlterConfigs(33): 0 to 2 [usable: 2],
AlterReplicaLogDirs(34): 0 to 2 [usable: 2],
DescribeLogDirs(35): 0 to 2 [usable: 2],
SaslAuthenticate(36): 0 to 2 [usable: 2],
CreatePartitions(37): 0 to 3 [usable: 3],
CreateDelegationToken(38): 0 to 2 [usable: 2],
RenewDelegationToken(39): 0 to 2 [usable: 2],
ExpireDelegationToken(40): 0 to 2 [usable: 2],
DescribeDelegationToken(41): 0 to 2 [usable: 2],
DeleteGroups(42): 0 to 2 [usable: 2],
ElectLeaders(43): 0 to 2 [usable: 2],
IncrementalAlterConfigs(44): 0 to 1 [usable: 1],
AlterPartitionReassignments(45): 0 [usable: 0],
ListPartitionReassignments(46): 0 [usable: 0],
OffsetDelete(47): 0 [usable: 0],
DescribeClientQuotas(48): 0 to 1 [usable: 1],
AlterClientQuotas(49): 0 to 1 [usable: 1],
DescribeUserScramCredentials(50): 0 [usable: 0],
AlterUserScramCredentials(51): 0 [usable: 0],
AlterIsr(56): 0 [usable: 0],
UpdateFeatures(57): 0 [usable: 0],
DescribeCluster(60): 0 [usable: 0],
DescribeProducers(61): 0 [usable: 0]
)
k1:9092 (id: 1 rack: null) -> (
Produce(0): 0 to 9 [usable: 9],
Fetch(1): 0 to 12 [usable: 12],
ListOffsets(2): 0 to 6 [usable: 6],
Metadata(3): 0 to 11 [usable: 11],
LeaderAndIsr(4): 0 to 5 [usable: 5],
StopReplica(5): 0 to 3 [usable: 3],
UpdateMetadata(6): 0 to 7 [usable: 7],
ControlledShutdown(7): 0 to 3 [usable: 3],
OffsetCommit(8): 0 to 8 [usable: 8],
.........
InitProducerId(22): 0 to 4 [usable: 4],
OffsetForLeaderEpoch(23): 0 to 4 [usable: 4],
.........
UpdateFeatures(57): 0 [usable: 0],
DescribeCluster(60): 0 [usable: 0],
DescribeProducers(61): 0 [usable: 0]
)
5.4.3 创建主题
kafka-topics.sh --create --topic test --partitions 1 --replication-factor 1 --bootstrap-server localhost:9092
5.4.4 查看主题详细信息
主机1创建:
liber@liber-VMware-Virtual-Platform:/home/zookeeper$ docker-compose exec kafka /bin/bash
bash-5.1# kafka-topics.sh --create --topic test --partitions 1 --replication-factor 1 --bootstrap-server localhost:9092
Error while executing topic command : Topic 'test' already exists.
[2024-07-21 07:06:13,536] ERROR org.apache.kafka.common.errors.TopicExistsException: Topic 'test' already exists.
(kafka.admin.TopicCommand$)
bash-5.1# kafka-topics.sh --describe --topic test --bootstrap-server localhost:9092
Topic: test TopicId: AIpDyj5rTlyuMXFTPmdQFg PartitionCount: 1 ReplicationFactor: 1 Configs: segment.bytes=1073741824
Topic: test Partition: 0 Leader: 3 Replicas: 3 Isr: 32.主机2查询:
liber@liber-VMware-Virtual-Platform:/home/zookeeper$ docker-compose exec kafka /bin/bash
bash-5.1# kafka-topics.sh --describe --topic test --bootstrap-server localhost:9092
Topic: test TopicId: AIpDyj5rTlyuMXFTPmdQFg PartitionCount: 1 ReplicationFactor: 1 Configs: segment.bytes=1073741824
Topic: test Partition: 0 Leader: 3 Replicas: 3 Isr: 3
5.5.5 其他验证方法
# 进入zooper容器
docker-compose exec zookeeper zkCli.sh -server localhost:2181
# 查询brokers所有节点id
ls /brokers/ids
liber@liber-VMware-Virtual-Platform:/home/zookeeper$ docker-compose exec zookeeper zkCli.sh -server localhost:2181
Connecting to localhost:2181
2024-07-21 07:22:47,180 [myid:] - INFO [main:Environment@98] - Client environment:zookeeper.version=3.7.0-e3704b390a6697bfdf4b0bef79e3da7a4f6bac4b, built on 2021-03-17 09:46 UTC
2024-07-21 07:22:47,189 [myid:] - INFO [main:Environment@98] - Client environment:host.name=zoo2
2024-07-21 07:22:47,191 [myid:] - INFO [main:Environment@98] - Client environment:java.version=11.0.13
2024-07-21 07:22:47,199 [myid:] - INFO [main:Environment@98] - Client environment:java.vendor=Oracle Corporation
2024-07-21 07:22:47,201 [myid:] - INFO [main:Environment@98] - Client environment:java.home=/usr/local/openjdk-11
2024-07-21 07:22:47,204 [myid:] - INFO [main:Environment@98] - Client environment:java.class.path=/apache-zookeeper-3.7.0-bin/bin/../zookeeper-server/target/classes:/apache-zookeeper-3.7.0-bin/bin/../build/classes:/apache-zookeeper-3.7.0-bin/bin/../zookeeper-server/target/lib/*.jar:/apache-zookeeper-3.7.0-bin/bin/../build/lib/*.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/zookeeper-prometheus-metrics-3.7.0.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/zookeeper-jute-3.7.0.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/zookeeper-3.7.0.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/snappy-java-1.1.7.7.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/slf4j-log4j12-1.7.30.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/slf4j-api-1.7.30.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/simpleclient_servlet-0.9.0.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/simpleclient_hotspot-0.9.0.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/simpleclient_common-0.9.0.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/simpleclient-0.9.0.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/netty-transport-native-unix-common-4.1.59.Final.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/netty-transport-native-epoll-4.1.59.Final.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/netty-transport-4.1.59.Final.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/netty-resolver-4.1.59.Final.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/netty-handler-4.1.59.Final.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/netty-common-4.1.59.Final.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/netty-codec-4.1.59.Final.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/netty-buffer-4.1.59.Final.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/metrics-core-4.1.12.1.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/log4j-1.2.17.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/jline-2.14.6.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/jetty-util-ajax-9.4.38.v20210224.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/jetty-util-9.4.38.v20210224.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/jetty-servlet-9.4.38.v20210224.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/jetty-server-9.4.38.v20210224.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/jetty-security-9.4.38.v20210224.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/jetty-io-9.4.38.v20210224.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/jetty-http-9.4.38.v20210224.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/javax.servlet-api-3.1.0.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/jackson-databind-2.10.5.1.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/jackson-core-2.10.5.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/jackson-annotations-2.10.5.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/commons-cli-1.4.jar:/apache-zookeeper-3.7.0-bin/bin/../lib/audience-annotations-0.12.0.jar:/apache-zookeeper-3.7.0-bin/bin/../zookeeper-*.jar:/apache-zookeeper-3.7.0-bin/bin/../zookeeper-server/src/main/resources/lib/*.jar:/conf:
2024-07-21 07:22:47,207 [myid:] - INFO [main:Environment@98] - Client environment:java.library.path=/usr/java/packages/lib:/usr/lib64:/lib64:/lib:/usr/lib
2024-07-21 07:22:47,209 [myid:] - INFO [main:Environment@98] - Client environment:java.io.tmpdir=/tmp
2024-07-21 07:22:47,209 [myid:] - INFO [main:Environment@98] - Client environment:java.compiler=<NA>
2024-07-21 07:22:47,210 [myid:] - INFO [main:Environment@98] - Client environment:os.name=Linux
2024-07-21 07:22:47,211 [myid:] - INFO [main:Environment@98] - Client environment:os.arch=amd64
2024-07-21 07:22:47,211 [myid:] - INFO [main:Environment@98] - Client environment:os.version=6.8.0-31-generic
2024-07-21 07:22:47,213 [myid:] - INFO [main:Environment@98] - Client environment:user.name=root
2024-07-21 07:22:47,213 [myid:] - INFO [main:Environment@98] - Client environment:user.home=/root
2024-07-21 07:22:47,215 [myid:] - INFO [main:Environment@98] - Client environment:user.dir=/apache-zookeeper-3.7.0-bin
2024-07-21 07:22:47,215 [myid:] - INFO [main:Environment@98] - Client environment:os.memory.free=61MB
2024-07-21 07:22:47,223 [myid:] - INFO [main:Environment@98] - Client environment:os.memory.max=256MB
2024-07-21 07:22:47,224 [myid:] - INFO [main:Environment@98] - Client environment:os.memory.total=64MB
2024-07-21 07:22:47,234 [myid:] - INFO [main:ZooKeeper@637] - Initiating client connection, connectString=localhost:2181 sessionTimeout=30000 watcher=org.apache.zookeeper.ZooKeeperMain$MyWatcher@7946e1f4
2024-07-21 07:22:47,249 [myid:] - INFO [main:X509Util@77] - Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation
2024-07-21 07:22:47,266 [myid:] - INFO [main:ClientCnxnSocket@239] - jute.maxbuffer value is 1048575 Bytes
2024-07-21 07:22:47,292 [myid:] - INFO [main:ClientCnxn@1726] - zookeeper.request.timeout value is 0. feature enabled=false
Welcome to ZooKeeper!
JLine support is enabled
2024-07-21 07:22:47,384 [myid:localhost:2181] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@1171] - Opening socket connection to server localhost/0:0:0:0:0:0:0:1:2181.
2024-07-21 07:22:47,401 [myid:localhost:2181] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@1173] - SASL config status: Will not attempt to authenticate using SASL (unknown error)
2024-07-21 07:22:47,449 [myid:localhost:2181] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@1005] - Socket connection established, initiating session, client: /0:0:0:0:0:0:0:1:36038, server: localhost/0:0:0:0:0:0:0:1:2181
2024-07-21 07:22:47,499 [myid:localhost:2181] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@1438] - Session establishment complete on server localhost/0:0:0:0:0:0:0:1:2181, session id = 0x200023e46880000, negotiated timeout = 30000WATCHER::
WatchedEvent state:SyncConnected type:None path:null
[zk: localhost:2181(CONNECTED) 0] ls /brokers/ids
[1, 2, 3]
[zk: localhost:2181(CONNECTED) 1]
6. CMAK管理工具
6.1 主机1 (IP:192.168.186.77)
services:
zookeeper:
image: "zookeeper:latest"
hostname: zoo1
container_name: zookeeper1
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
environment:
ZOO_MY_ID: 1
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
volumes:
- ./data:/data
restart: always
kafka:
image: "wurstmeister/kafka:latest"
hostname: k1
container_name: kafka1
ports:
- "9092:9092"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://k1:9092
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
volumes:
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- zookeeper
restart: always
cmak:
image: hlebalbau/kafka-manager:latest # 使用Docker Hub上的最新CMAK镜像
hostname: cmak
container_name: cmak
ports:
- "9000:9000" # 将容器的9000端口映射到宿主机的同一端口
environment:
ZK_HOSTS: "zoo1:2181,zoo2:2181,zoo3:2181" # 配置连接到Zookeeper的地址
APPLICATION_SECRET: "random-secret" # 一个随机字符串,用作应用的密钥
depends_on:
- zookeeper
restart: always
注:只演示一个主机,如果需要自行修改配置文件。
6.3 浏览器访问
添加集群,主要是zookeeper的集群。
6.2 运行结果
7. 总结
基于Ubuntu24.04 TLS稳定版进行测试安装,仅供参考学习。
更多推荐
所有评论(0)