栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 前沿技术 > 大数据 > 大数据系统

Kafka的Java客户端

Kafka的Java客户端

Kafka的Java客户端-demo版 0. 订单类
package com.hao.entity;

public class Order {

    private Long orderId;
    private int count;

    public Order(Long orderId, int count) {
        this.orderId = orderId;
        this.count = count;
    }

    public Order() {
    }

    public Long getOrderId() {
        return orderId;
    }

    public void setOrderId(Long orderId) {
        this.orderId = orderId;
    }

    public int getCount() {
        return count;
    }

    public void setCount(int count) {
        this.count = count;
    }
}


1. 依赖


    4.0.0

    com.aho
    kafka-demo
    1.0-SNAPSHOT

    
        8
        8
    

    

        
            com.alibaba
            fastjson
            1.2.47
        

        
            org.apache.kafka
            kafka-clients
            2.4.1
        



    



2. 生产者发送消息 2.1 同步发送
package com.hao.kafka;

import com.alibaba.fastjson.JSON;
import com.hao.entity.Order;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;
import java.util.concurrent.ExecutionException;

public class MyProducerDemo01 {
    // 1. topic名
    private static final String TOPIC_NAME = "my-replicated-topic";

    public static void main(String[] args) throws ExecutionException, InterruptedException {
        // 2. 配置
        Properties properties = new Properties();
        // 连接kafkaIP
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.100.108:9092,192.168.100.108:9093,192.168.100.108:9094");
        // 序列化器
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        // 设置acks
        properties.put(ProducerConfig.ACKS_CONFIG,"0");
        // 3. 创建一个要发送的消息对象,并带上配置
        Producer producer = new KafkaProducer(properties);


        // 4. 封装消息对象
        // 生成order对象
        Order order = new Order(1001L,2);
        // 封装
        ProducerRecord producerRecord = new ProducerRecord(TOPIC_NAME,
                String.valueOf(order.getOrderId()),
                JSON.toJSONString(order));

        Recordmetadata metadata = producer.send(producerRecord).get();
        System.out.println("同步方式发送消息结果:" + "topic-" + metadata.topic() + "|partition-"
                    + metadata.partition() + "|offset-" + metadata.offset());



    }

}

2.2 异步发送
package com.hao.kafka;

import com.alibaba.fastjson.JSON;
import com.hao.entity.Order;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;
import java.util.concurrent.CountDownLatch;

public class MyProducer02 {
    private static final String TOPIC_NAME = "my-replicated-topic";

    public static void main(String[] args) throws InterruptedException {
        // 1. 创建配置对象
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.100.108:9092,192.168.100.108:9093,192.168.100.108:9094");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());
        properties.put(ProducerConfig.ACKS_CONFIG,"0");
        // 2. 创建一个发送消息的对象,并携带配置对象
        Producer producer = new KafkaProducer(properties);
        // 门栓
        CountDownLatch countDownLatch = new CountDownLatch(5);
        for (int i = 0; i < 5 ; i++) {
            // 3. 封装消息对象
            // 生成消息---生成订单对象
            Order order = new Order((long)i,2);
            // 封装
            ProducerRecord producerRecord = new ProducerRecord(TOPIC_NAME,
                    String.valueOf(order.getOrderId()),
                    JSON.toJSONString(order));
            //异步回调方式发送消息
            producer.send(producerRecord, new Callback() {
                @Override
                public void onCompletion(Recordmetadata recordmetadata, Exception e) {
                    if (e != null) {
                        System.err.println("发送消息失败:"+e.getStackTrace());
                    }
                    if (recordmetadata != null) {
                        System.out.println("异步方式发送消息结果:" + "topic-" + recordmetadata.topic() + "|partition-"
                                + recordmetadata.partition() + "|offset-" + recordmetadata.offset());
                    }
                    countDownLatch.countDown();
                }
            });
        }
        // 当countDownLatch变成0之前,一直阻塞
        countDownLatch.await();
        producer.close();
    }


}

3. 消费者消费消息
package com.hao.kafka;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;

import java.time.Duration;
import java.util.Arrays;
import java.util.Map;
import java.util.Properties;

public class MyConsumer01 {
    private static final String TOPIC_NAME = "my-replicated-topic";
    private static final String GROUP_NAME = "testGroup999";

    public static void main(String[] args) {
        // 1. 配置
        Properties properties = new Properties();
        // kafka集群ip
        properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.100.108:9092,192.168.100.108:9093,192.168.100.108:9094");
        // 序列化器
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
        // 消费组的名称
        properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG,GROUP_NAME);

        

        
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

        // 是否自动提交offset(偏移量),手动提交,false
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        // 一次poll的最大拉去消息的条数,可以根据消费速度来设置
        properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,500);
        //consumer给broker发送心跳的间隔时间
        properties.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 1000);
        //kafka如果超过10秒没有收到消费者的心跳,则会把消费者踢出消费组,进行rebalance,把分区分配给其他消费者。
        properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 10 * 1000);
        //如果两次poll的时间如果超出了30s的时间间隔,kafka会认为其消费能力过弱,将其踢出消费组。将分区分配给其他消费者。-rebalance
        properties.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 30 * 1000);

        // 2. 创建一个要消费的消息对象,并带上配置
        KafkaConsumer consumer = new KafkaConsumer(properties);
        // 3. 订阅主题
        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        // 4. 处理消息
        while (true) {
            ConsumerRecords records = consumer.poll(Duration.ofMillis(1000));
            // 具体处理消息
            for (ConsumerRecord record : records) {
                System.out.printf("收到消息:partition = %d,offset = %d, key = %s, value = %s%n", record.partition(),
                        record.offset(), record.key(), record.value());
            }
            // 处理完消息后,需要手动提交
            if (records.count() > 0) {
                // 1. 手动同步提交
                // 在kafka返回ack给消费者之前,该方法一直阻塞
                // 默认使用手动同步提交,因为在提交之后没有其他逻辑了。
                 consumer.commitSync();

                 // 2. 手动异步提交
//                consumer.commitAsync(new OffsetCommitCallback() {
//                    @Override
//                    public void onComplete(Map map, Exception e) {
//                        if (e != null) {
//                            System.err.println("Commit failed for " + map);
//                            System.err.println("Commit failed exception: "+e.getStackTrace());
//                        }
//                    }
//                });

            }
        }

    }
}

转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/753561.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号