分布式消息队列kafka系列介绍 — 核心API介绍及实例
转自:http://www.inter12.org/archives/834
转自:http://www.inter12.org/archives/834
一 PRODUCER的API
1.Producer的创建,依赖于ProducerConfig
public Producer(ProducerConfig config);
2.单个或是批量的消息发送
public void send(KeyedMessage<K,V> message);
public void send(List<KeyedMessage<K,V>> messages);
3.关闭Producer到所有broker的连接
public void close();
二 CONSUMER的高层API
主要是Consumer和ConsumerConnector,这里的Consumer是ConsumerConnector的静态工厂类
class Consumer {
public static kafka.javaapi.consumer.ConsumerConnector createJavaConsumerConnector(config: ConsumerConfig);
}
具体的消息的消费都是在ConsumerConnector中
创建一个消息处理的流,包含所有的topic,并根据指定的Decoder
public <K,V> Map<String, List<KafkaStream<K,V>>>
createMessageStreams(Map<String, Integer> topicCountMap, Decoder<K> keyDecoder, Decoder<V> valueDecoder);
创建一个消息处理的流,包含所有的topic,使用默认的Decoder
public Map<String, List<KafkaStream<byte[], byte[]>>> createMessageStreams(Map<String, Integer> topicCountMap);
获取指定消息的topic,并根据指定的Decoder
public <K,V> List<KafkaStream<K,V>>
createMessageStreamsByFilter(TopicFilter topicFilter, int numStreams, Decoder<K> keyDecoder, Decoder<V> valueDecoder);
获取指定消息的topic,使用默认的Decoder
public List<KafkaStream<byte[], byte[]>> createMessageStreamsByFilter(TopicFilter topicFilter);
提交偏移量到这个消费者连接的topic
public void commitOffsets();
关闭消费者
public void shutdown();
高层的API中比较常用的就是public List<KafkaStream<byte[], byte[]>> createMessageStreamsByFilter(TopicFilter topicFilter);和public void commitOffsets();
三 CONSUMER的简单API–SIMPLECONSUMER
批量获取消息
public FetchResponse fetch(request: kafka.javaapi.FetchRequest);
获取topic的元信息
public kafka.javaapi.TopicMetadataResponse send(request: kafka.javaapi.TopicMetadataRequest);
获取目前可用的偏移量
public kafka.javaapi.OffsetResponse getOffsetsBefore(request: OffsetRequest);
关闭连接
public void close();
对于大部分应用来说,高层API就已经足够使用了,但是若是想做更进一步的控制的话,可以使用简单的API,例如消费者重启的情况下,希望得到最新的offset,就该使用SimpleConsumer.
四 KAFKA HADOOP CONSUMER API
提供了一个可水平伸缩的解决方案来结合hadoop的使用参见
https://github.com/linkedin/camus/tree/camus-kafka-0.8/
五 实战
maven依赖:
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>0.8.0</version>
</dependency>
生产者代码:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
|
import
kafka.javaapi.producer.Producer;
import
kafka.producer.KeyedMessage;
import
kafka.producer.ProducerConfig;
import
java.util.Properties;
/**
* <pre>
* Created by zhaoming on 14-5-4 下午3:23
* </pre>
*/
public
class
KafkaProductor {
public
static
void
main(String[] args)
throws
InterruptedException {
Properties properties =
new
Properties();
properties.put(
"zk.connect"
,
"127.0.0.1:2181"
);
properties.put(
"metadata.broker.list"
,
"localhost:9092"
);
properties.put(
"serializer.class"
,
"kafka.serializer.StringEncoder"
);
ProducerConfig producerConfig =
new
ProducerConfig(properties);
Producer<String, String> producer =
new
Producer<String, String>(producerConfig);
// 构建消息体
KeyedMessage<String, String> keyedMessage =
new
KeyedMessage<String, String>(
"test-topic"
,
"test-message"
);
producer.send(keyedMessage);
Thread.sleep(
1000
);
producer.close();
}
}
|
消费端代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
|
import
java.io.UnsupportedEncodingException;
import
java.util.List;
import
java.util.Properties;
import
java.util.concurrent.TimeUnit;
import
kafka.consumer.*;
import
kafka.javaapi.consumer.ConsumerConnector;
import
kafka.message.MessageAndMetadata;
import
org.apache.commons.collections.CollectionUtils;
/**
* <pre>
* Created by zhaoming on 14-5-4 下午3:32
* </pre>
*/
public
class
kafkaConsumer {
public
static
void
main(String[] args)
throws
InterruptedException, UnsupportedEncodingException {
Properties properties =
new
Properties();
properties.put(
"zookeeper.connect"
,
"127.0.0.1:2181"
);
properties.put(
"auto.commit.enable"
,
"true"
);
properties.put(
"auto.commit.interval.ms"
,
"60000"
);
properties.put(
"group.id"
,
"test-group"
);
ConsumerConfig consumerConfig =
new
ConsumerConfig(properties);
ConsumerConnector javaConsumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);
//topic的过滤器
Whitelist whitelist =
new
Whitelist(
"test-topic"
);
List<KafkaStream<
byte
[],
byte
[]>> partitions = javaConsumerConnector.createMessageStreamsByFilter(whitelist);
if
(CollectionUtils.isEmpty(partitions)) {
System.out.println(
"empty!"
);
TimeUnit.SECONDS.sleep(
1
);
}
//消费消息
for
(KafkaStream<
byte
[],
byte
[]> partition : partitions) {
ConsumerIterator<
byte
[],
byte
[]> iterator = partition.iterator();
while
(iterator.hasNext()) {
MessageAndMetadata<
byte
[],
byte
[]> next = iterator.next();
System.out.println(
"partiton:"
+ next.partition());
System.out.println(
"offset:"
+ next.offset());
System.out.println(
"message:"
+
new
String(next.message(),
"utf-8"
));
}
}
}
}
|
更多推荐
所有评论(0)