开发工具:IEDA

在pom.xml中添加环境依赖:

<!-- kafka -->
<dependency>
    <groupId>org.apache.kafka</groupId>
    <artifactId>kafka_2.10</artifactId>
    <version>0.10.2.0</version>
</dependency>

生产者代码:

package com.aliyun.iot.demo;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;

import java.util.Properties;

public class kafka_demo {
    public static void main(String[] args){
        Properties properties = new Properties();
        //broker的地址清单,建议至少填写两个,避免宕机
        properties.put("bootstrap.servers", "120.70.243.70:9092");
        //acks指定必须有多少个分区副本接收消息,生产者才认为消息写入成功,用户检测数据丢失的可能性
        //acks=0:生产者在成功写入消息之前不会等待任何来自服务器的响应。无法监控数据是否发送成功,但可以以网络能够支持的最大速度发送消息,达到很高的吞吐量。
        //acks=1:只要集群的首领节点收到消息,生产者就会收到来自服务器的成功响应。
        //acks=all:只有所有参与复制的节点全部收到消息时,生产者才会收到来自服务器的成功响应。这种模式是最安全的,
        properties.put("acks", "all");
        //retries:生产者从服务器收到的错误有可能是临时性的错误的次数
        properties.put("retries", 0);
        //batch.size:该参数指定了一个批次可以使用的内存大小,按照字节数计算(而不是消息个数)
        properties.put("batch.size", 16384);
        //linger.ms:该参数指定了生产者在发送批次之前等待更多消息加入批次的时间,增加延迟,提高吞吐量
        properties.put("linger.ms", 1);
        //buffer.memory该参数用来设置生产者内存缓冲区的大小,生产者用它缓冲要发送到服务器的消息。
        properties.put("buffer.memory", 33554432);
        //key和value的序列化
        properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        Producer<String, String> producer = new KafkaProducer<String, String>(properties);
        try {
            //producer = new KafkaProducer<>(properties);
            for (int i = 0; i < 10; i++) {
                String values = "value" + i + "\t";
                producer.send(new ProducerRecord<String, String>("mykafka2",  "key"+ Integer.toString(i),values));
                Thread.sleep(500);
                System.out.println("Sent:" + values);
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            producer.close();
        }
    }
}

执行结果:

消费者代码:

package com.aliyun.iot.demo;
import java.util.Properties;
import java.util.Arrays;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.ConsumerRecord;

public class kafka_consumer {
    public static void main(String[] args) throws Exception {
        String topicName = "mykafka2";
        Properties props = new Properties();
        props.put("bootstrap.servers", "120.70.243.70:9092");
        props.put("group.id", "test");
        props.put("enable.auto.commit", "true");
        props.put("auto.commit.interval.ms", "1000");
        props.put("session.timeout.ms", "30000");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
        //Kafka Consumer subscribes list of topics here.
        consumer.subscribe(Arrays.asList(topicName));
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(100);
            for (ConsumerRecord<String, String> record : records)
                // print the offset,key and value for the consumer records.
                System.out.printf("offset = %d, key = %s, value = %s\n",
                        record.offset(), record.key(), record.value());
        }
    }
}

执行结果:

Logo

Kafka开源项目指南提供详尽教程,助开发者掌握其架构、配置和使用,实现高效数据流管理和实时处理。它高性能、可扩展,适合日志收集和实时数据处理,通过持久化保障数据安全,是企业大数据生态系统的核心。

更多推荐