kafka的API及自定义分区

1.导入kafka的依赖

    <dependencies>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>0.11.0.1</version>
        </dependency>
    </dependencies>

2.生产者

package cn.itcast.kafka.producer;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;

import java.util.Properties;

public class KafkaProducerStudy {
    public static void main(String[] args) {
        Properties props = new Properties();
        props.put("bootstrap.servers", "node-1:9092,node-2:9092,node-3:9092");
        props.put("acks", "all");
        props.put("retries", 0);
        props.put("batch.size", 16384);
        props.put("linger.ms", 1);
        props.put("buffer.memory", 33554432);
        //自定义分区类
        //props.put("partitioner.class", "cn.itcast.kafka.partitioner.MyOwnPartitioner");
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        Producer<String, String> producer = new KafkaProducer<String, String>(props);
        for (int i = 0; i < 100; i++)
            //第一种分区策略,手动指定分区,让数据进入到我们指定的分区里面去
            //producer.send(new ProducerRecord<String, String>("test_cheng",1,null, Integer.toString(i)));

            //第二种分区策略,通过key的hash取模来进行计算我们数据的分区,如果使用这种方式来存储,一定要注意,key值一定要变,如果不变就会造成数据的热点问题,也就是数据倾斜问题
            //producer.send(new ProducerRecord<String, String>("test_cheng","1", Integer.toString(i)));

            //第三种分区策略,通过轮询,实现数据的轮流发送到各个partition当中去
            producer.send(new ProducerRecord<String, String>("test_cheng", Integer.toString(i)));

        producer.close();

    }
}

3.消费者

package cn.itcast.kafka.consumer;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

import java.util.Arrays;
import java.util.Properties;

public class KafkaConsumerStudy {
    public static void main(String[] args) {
        Properties props = new Properties();
        props.put("bootstrap.servers", "node-1:9092,node-2:9092,node-3:9092");
        //指定我们找个消费属于哪个组,随便给一个字符串即可
        props.put("group.id", "test");
        //一定要开启kafka的offset自动提交的功能,可以保证我们消费者的数据不丢失
        props.put("enable.auto.commit", "true");
        props.put("auto.commit.interval.ms", "1000");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String,String>(props);
        //指定我们消费的topic
        consumer.subscribe(Arrays.asList("test_cheng"));
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(100);
            for (ConsumerRecord<String, String> record : records)
                System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
        }
    }
}

4.自定义分区

package cn.itcast.kafka.partitioner;

import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;

import java.util.Map;

public class MyOwnPartitioner implements Partitioner {
    //这个方法就决定了消息往哪个分区里面发送
    //这个方法的返回值就是表示我们的数据要去哪个分区,如果返回值是0,表示我们的数据去0分区
    public int partition(String topic, Object key, byte[] bytes, Object value, byte[] bytes1, Cluster cluster) {
        System.out.println(topic);
        System.out.println(value.toString());
        return 0;
    }

    public void close() {

    }

    public void configure(Map<String, ?> map) {

    }
}

生产者中添加

props.put("partitioner.class", "cn.itcast.kafka.partitioner.MyOwnPartitioner");

喜欢就点赞评论+关注吧

这里写图片描述

感谢阅读,希望能帮助到大家,谢谢大家的支持!

Logo

Kafka开源项目指南提供详尽教程,助开发者掌握其架构、配置和使用,实现高效数据流管理和实时处理。它高性能、可扩展,适合日志收集和实时数据处理,通过持久化保障数据安全,是企业大数据生态系统的核心。

更多推荐