Create a Kafka cluster for development with Docker Compose

[root@localhost ~]# mkdir kafka-cluster
[root@localhost ~]# cd kafka-cluster
[root@localhost kafka-cluster]# wget https://downloads.apache.org/kafka/3.3.1/kafka_2.13-3.3.1.tgz

kafka_2.13-3.3.1.tgz 100%[============================>] 100.19M 2.72MB/s in 45s

2022-11-15 10:22:59 (2.24 MB/s) - ‘kafka_2.13-3.3.1.tgz’ saved [105053134/105053134]

[root@localhost kafka-cluster]# tar -xf kafka_2.13-3.3.1.tgz
[root@localhost kafka-cluster]# ll
total 102592
drwxr-xr-x. 7 root root 105 Sep 30 02:06 kafka_2.13-3.3.1
-rw-r--r--. 1 root root 105053134 Oct 3 06:05 kafka_2.13-3.3.1.tgz
[root@localhost kafka-cluster]#
[root@localhost kafka-cluster]# mkdir configfiles dockerfiles
# the directory where the snapshot is stored.
dataDir=/tmp/zookeeper
# the port at which the clients will connect
clientPort=14000
# disable the per-ip limit on the number of connections since this is a non-production config
maxClientCnxns=0
# Disable the adminserver by default to avoid port conflicts.
# Set the port to something non-conflicting if choosing to enable this
admin.enableServer=false
# admin.serverPort=8080
FROM openjdk:20-slim-buster
WORKDIR /app
COPY . .
CMD ./bin/zookeeper-server-start.sh config/zookeeper.properties
version: "3.9"

networks:
default:
name: kafka
driver: bridge

volumes:
zookeeperdata:

services:
zookeeper:
build:
context: kafka_2.13-3.3.1
dockerfile: ../dockerfiles/ZookeeperDockerfile
volumes:
- zookeeperdata:/tmp/zookeeper
- ./configfiles/zookeeper.properties:/app/config/zookeeper.properties
[root@localhost kafka-cluster]# tree .
.
├── configfiles
│ └── zookeeper.properties
├── docker-compose.yml
├── dockerfiles
│ └── ZookeeperDockerfile
├── kafka_2.13-3.3.1
│ ├── bin
│ │ ├── connect-distributed.sh
│ │ ├── connect-mirror-maker.sh
│ │ ├── connect-standalone.sh
│ │ ├── kafka-acls.sh
│ │ ├── kafka-broker-api-versions.sh
│ │ ├── kafka-cluster.sh
│ │ ├── kafka-configs.sh
│ │ ├── kafka-console-consumer.sh
│ │ ├── kafka-console-producer.sh
│ │ ├── kafka-consumer-groups.sh
│ │ ├── kafka-consumer-perf-test.sh
│ │ ├── kafka-delegation-tokens.sh
│ │ ├── kafka-delete-records.sh
│ │ ├── kafka-dump-log.sh
│ │ ├── kafka-features.sh
│ │ ├── kafka-get-offsets.sh
[root@localhost kafka-cluster]# docker compose up -d --build zookeeper
[+] Building 3.0s (8/8) FINISHED
=> [internal] load build definition from ZookeeperDockerfile 0.1s
=> => transferring dockerfile: 100B 0.0s
=> [internal] load .dockerignore 0.1s
=> => transferring context: 2B 0.0s
=> [internal] load metadata for docker.io/library/openjdk:20-slim-buster 2.7s
=> [internal] load build context 0.1s
=> => transferring context: 23.22kB 0.0s
=> [1/3] FROM docker.io/library/openjdk:20-slim-buster@sha256:07c677b7e30d0e09a97fb6c882a0ce23cdc2c3ae12d2cee6d836a29122e4e903 0.0s
=> CACHED [2/3] WORKDIR /app 0.0s
=> CACHED [3/3] COPY . . 0.0s
=> exporting to image 0.1s
=> => exporting layers 0.0s
=> => writing image sha256:ef70a2dd46b916a12f54004e8e29439c86b539f41440affee757653d2eb80404 0.0s
=> => naming to docker.io/library/kafka-cluster-zookeeper 0.0s

Use 'docker scan' to run Snyk tests against images to find vulnerabilities and learn how to fix them
[+] Running 1/1
⠿ Container kafka-cluster-zookeeper-1 Started 0.4s
[root@localhost kafka-cluster]#
[root@localhost kafka-cluster]# docker compose ps
NAME COMMAND SERVICE STATUS PORTS
kafka-cluster-zookeeper-1 "/bin/sh -c './bin/z…" zookeeper running
[root@localhost kafka-cluster]# docker compose logs -f zookeeper
FROM openjdk:20-slim-buster
WORKDIR /app
COPY . .
CMD ./bin/kafka-server-start.sh config/server.properties
broker.id=1
zookeeper.connect=zookeeper:14000
zookeeper.connection.timeout.ms=18000
sasl.enabled.mechanisms=PLAIN
sasl.mechanism.inter.broker.protocol=PLAIN
listener.name.internal.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
username="admin" \
password="admin-secret" \
user_admin="admin-secret";

listener.name.external.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
username="admin" \
password="admin-secret" \
user_admin="admin-secret";

listeners=INTERNAL://:9092,EXTERNAL://:4000
advertised.listeners=INTERNAL://leesin:9092,EXTERNAL://10.0.2.15:4000
inter.broker.listener.name=INTERNAL
listener.security.protocol.map=INTERNAL:SASL_PLAINTEXT,EXTERNAL:SASL_PLAINTEXT
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/tmp/kafka-logs
num.partitions=3
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.flush.interval.messages=10000
log.flush.interval.ms=1000
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
group.initial.rebalance.delay.ms=0
listeners=INTERNAL://:9092,EXTERNAL://:4000
advertised.listeners=INTERNAL://leesin:9092,EXTERNAL://10.0.2.15:4000
version: "3.9"

networks:
default:
name: kafka
driver: bridge

volumes:
leesindata:
zookeeperdata:

services:
zookeeper:
build:
context: kafka_2.13-3.3.1
dockerfile: ../dockerfiles/ZookeeperDockerfile
volumes:
- zookeeperdata:/tmp/zookeeper
- ./configfiles/zookeeper.properties:/app/config/zookeeper.properties

leesin:
build:
context: kafka_2.13-3.3.1
dockerfile: ../dockerfiles/KafkaDockerfile
volumes:
- leesindata:/tmp/kafka-logs
- ./configfiles/leesin/server.properties:/app/config/server.properties
ports:
- 4000:4000
open incoming traffic to kafka broker
[root@localhost kafka-cluster]# docker compose up -d leesin
[root@localhost kafka-cluster]# docker compose ps
NAME COMMAND SERVICE STATUS PORTS
kafka-cluster-leesin-1 "/bin/sh -c './bin/k…" leesin running 0.0.0.0:4000->4000/tcp, :::4000->4000/tcp
kafka-cluster-zookeeper-1 "/bin/sh -c './bin/z…" zookeeper running
[root@localhost kafka-cluster]#
broker.id=2
zookeeper.connect=zookeeper:14000
zookeeper.connection.timeout.ms=18000
sasl.enabled.mechanisms=PLAIN
sasl.mechanism.inter.broker.protocol=PLAIN
listener.name.internal.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
username="admin" \
password="admin-secret" \
user_admin="admin-secret";

listener.name.external.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
username="admin" \
password="admin-secret" \
user_admin="admin-secret";

listeners=INTERNAL://:9092,EXTERNAL://:5000
advertised.listeners=INTERNAL://garen:9092,EXTERNAL://10.0.2.15:5000
inter.broker.listener.name=INTERNAL
listener.security.protocol.map=INTERNAL:SASL_PLAINTEXT,EXTERNAL:SASL_PLAINTEXT
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/tmp/kafka-logs
num.partitions=3
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.flush.interval.messages=10000
log.flush.interval.ms=1000
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
group.initial.rebalance.delay.ms=0
broker.id=3
zookeeper.connect=zookeeper:14000
zookeeper.connection.timeout.ms=18000
sasl.enabled.mechanisms=PLAIN
sasl.mechanism.inter.broker.protocol=PLAIN
listener.name.internal.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
username="admin" \
password="admin-secret" \
user_admin="admin-secret";

listener.name.external.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
username="admin" \
password="admin-secret" \
user_admin="admin-secret";

listeners=INTERNAL://:9092,EXTERNAL://:6000
advertised.listeners=INTERNAL://temo:9092,EXTERNAL://10.0.2.15:6000
inter.broker.listener.name=INTERNAL
listener.security.protocol.map=INTERNAL:SASL_PLAINTEXT,EXTERNAL:SASL_PLAINTEXT
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/tmp/kafka-logs
num.partitions=3
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.flush.interval.messages=10000
log.flush.interval.ms=1000
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
group.initial.rebalance.delay.ms=0
[root@localhost kafka-cluster]# tree
.
├── configfiles
│ ├── garen
│ │ └── server.properties
│ ├── leesin
│ │ └── server.properties
│ ├── temo
│ │ └── server.properties
│ └── zookeeper.properties
├── docker-compose.yml
├── dockerfiles
│ ├── KafkaDockerfile
│ └── ZookeeperDockerfile
├── kafka_2.13-3.3.1
│ ├── bin
│ │ ├── connect-distributed.sh
│ │ ├── connect-mirror-maker.sh

│ │ ├── kafka-server-start.sh
│ │ ├── zookeeper-security-migration.sh
│ │ ├── zookeeper-server-start.sh
│ │ ├── zookeeper-server-stop.sh
│ │ └── zookeeper-shell.sh
│ ├── config
│ │ ├── connect-console-sink.properties
│ │ ├── connect-console-source.properties
│ │ ├── connect-distributed.properties
│ │ ├── connect-file-sink.properties
version: "3.9"

networks:
default:
name: kafka
driver: bridge

volumes:
leesindata:
garendata:
temodata:
zookeeperdata:

services:
zookeeper:
build:
context: kafka_2.13-3.3.1
dockerfile: ../dockerfiles/ZookeeperDockerfile
volumes:
- zookeeperdata:/tmp/zookeeper
- ./configfiles/zookeeper.properties:/app/config/zookeeper.properties

leesin:
build:
context: kafka_2.13-3.3.1
dockerfile: ../dockerfiles/KafkaDockerfile
volumes:
- leesindata:/tmp/kafka-logs
- ./configfiles/leesin/server.properties:/app/config/server.properties
ports:
- 4000:4000

garen:
build:
context: kafka_2.13-3.3.1
dockerfile: ../dockerfiles/KafkaDockerfile
volumes:
- garendata:/tmp/kafka-logs
- ./configfiles/garen/server.properties:/app/config/server.properties
ports:
- 5000:5000

temo:
build:
context: kafka_2.13-3.3.1
dockerfile: ../dockerfiles/KafkaDockerfile
volumes:
- temodata:/tmp/kafka-logs
- ./configfiles/temo/server.properties:/app/config/server.properties
ports:
- 6000:6000
[root@localhost kafka-cluster]# docker compose up -d garen temo
[root@localhost kafka-cluster]# docker compose ps
NAME COMMAND SERVICE STATUS PORTS
kafka-cluster-garen-1 "/bin/sh -c './bin/k…" garen running 0.0.0.0:5000->5000/tcp, :::5000->5000/tcp
kafka-cluster-leesin-1 "/bin/sh -c './bin/k…" leesin running 0.0.0.0:4000->4000/tcp, :::4000->4000/tcp
kafka-cluster-temo-1 "/bin/sh -c './bin/k…" temo running 0.0.0.0:6000->6000/tcp, :::6000->6000/tcp
kafka-cluster-zookeeper-1 "/bin/sh -c './bin/z…" zookeeper running
[root@localhost kafka-cluster]#
ui: 
image: provectuslabs/kafka-ui:latest
ports:
- "8080:8080"
env_file:
- env-files/ui.env
KAFKA_CLUSTERS_0_NAME=kafka-cluster
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=10.0.2.15:4000
KAFKA_CLUSTERS_0_ZOOKEEPER=zookeeper:14000
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SASL_PLAINTEXT
KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=PLAIN
KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG=org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";
[root@localhost kafka-cluster]# docker compose up ui
kafka management ui

--

--

Get the Medium app

A button that says 'Download on the App Store', and if clicked it will lead you to the iOS App store
A button that says 'Get it on, Google Play', and if clicked it will lead you to the Google Play store