Spark dataframe写入kafka
1// 创建推送kafka的对象val kafkaProducer: Broadcast[KafkaSink[String, String]] = {val kafkaProducerConfig = {val p = new Properties()p.setProperty("bootstrap.servers", "25.50.192.184:20092,25.50.192.185:2009
·
1 代码如下
// 创建推送kafka的对象
val kafkaProducer: Broadcast[KafkaSink[String, String]] = {
val kafkaProducerConfig = {
val p = new Properties()
p.setProperty("bootstrap.servers", "25.50.192.184:20092,25.50.192.185:20094,25.50.192.186:20093")
p.setProperty("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
p.setProperty("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")
p
}
//SparkSession=ss KafkaSink工具类在下面
ss.sparkContext.broadcast(KafkaSink[String, String](kafkaProducerConfig))
}
//aMpowerCurve2 是一个Dataframe
//将dafaframe中的字段拿出来 放到数组中
val arrcolumn: Array[String] =aMpowerCurve2.schema.fieldNames
//对dataframe中的每条数据进行处理
aMpowerCurve2.foreachPartition(row => {
row.foreach((data: Row) => {
val dataMap = new java.util.HashMap[String, Object]()
for (a <- arrcolumn) {
//将每个字段的名和字段对应的值 写入Map中
dataMap.put(a, data.getAs[String](a))
}
//将数据(Map)转化成json格式
//需要下载依赖 并导包import org.json.JSONObject
val mesJson = new JSONObject(dataMap)
//将数据写入对应的topic
val rm = kafkaProducer.value.send("cj_zhny_cons_load", mesJson.toString())
val recordMetadata = rm.get();
val topicname = recordMetadata.topic();
val partition = recordMetadata.partition();
val offset = recordMetadata.offset();
println("topicname:" + topicname + "partition:" + partition + "offset:" + offset);
})
})
//kafkaSink工具类(公司前辈写的)
package utils.kafka
import org.apache.kafka.clients.producer.{ KafkaProducer, ProducerRecord, RecordMetadata }
class KafkaSink[K, V](createProducer: () => KafkaProducer[K, V]) extends Serializable {
lazy val producer = createProducer()
def send(topic: String, key: K, value: V): java.util.concurrent.Future[RecordMetadata] =
producer.send(new ProducerRecord[K, V](topic, key, value))
def send(topic: String, value: V): java.util.concurrent.Future[RecordMetadata] =
producer.send(new ProducerRecord[K, V](topic, value))
}
object KafkaSink {
import scala.collection.JavaConversions._
def apply[K, V](config: Map[String, Object]): KafkaSink[K, V] = {
val createProducerFunc = () => {
val producer = new KafkaProducer[K, V](config)
sys.addShutdownHook {
producer.close()
}
producer
}
new KafkaSink(createProducerFunc)
}
def apply[K, V](config: java.util.Properties): KafkaSink[K, V] = apply(config.toMap)
}
更多推荐
所有评论(0)