栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 前沿技术 > 大数据

spring boot集成多个kafka(生产,消费)

大数据 更新时间: 发布时间: IT归档 最新发布 模块sitemap 名妆网 法律咨询 聚返吧 英语巴士网 伯小乐 网商动力

spring boot集成多个kafka(生产,消费)

1.pom


    org.springframework.kafka
    spring-kafka

2.配置文件yml

self:
  kafka:

    kafka-bootstrap-servers: ip1:port1,ip2:port1,ip3:port3
    session-timeout-ms: 10000
    enable-auto-commit: false
    auto-commit-interval-ms: 1000
    auto-offset-reset: latest
    group-id: gid001
    topic: testtopic1
    retries: 0
    batch-size: 16384
    buffer-memory: 33554432
    acks: 1

self2:
  kafka:
    kafka-bootstrap-servers: ip4:port4,ip5:port5,ip6:port6
    session-timeout-ms: 10000
    enable-auto-commit: true
    auto-commit-interval-ms: 1000
    auto-offset-reset: latest
    group-id: gid002
    topic: testtopic2
    retries: 0
    batch-size: 16384
    buffer-memory: 33554432
    acks: 1

3.自定义配置.java

①SelfDefinedKafkaConfig.java(注意@primary:表示默认的生产和消费,必须写一下)

@Component
@ConfigurationProperties(prefix = "self.kafka")
@EnableKafka
@Data
public class SelfDefinedKafkaConfig {

    private String kafkaBootstrapServers;

    private Integer sessionTimeoutMs;

    private boolean enableAutoCommit;

    private Integer autoCommitIntervalMs;

    private String autoOffsetReset;

    private String groupId;

    private Integer batchSize;

    private Long bufferMemory;

    private String acks;

    private Integer retries;


    //自定义kafka属性配置文件
    @Bean
    public Map selfKafkaConfigs() {
        Map map = new HashMap<>(8);
        map.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBootstrapServers);
        map.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        map.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
        map.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitIntervalMs);
        map.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeoutMs);
        map.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        map.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        map.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ObjectDeserializer.class);

        map.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
        map.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
        map.put(ProducerConfig.ACKS_CONFIG, acks);
        map.put(ProducerConfig.RETRIES_CONFIG, retries);
        map.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        map.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ObjectSerializer.class);
//        map.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
//        map.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
//        map.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
//        map.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);//对字符串进行序列化

        return map;
    }



    //自定义生产端
    @Bean
    public ProducerFactory selfKafkaProducerFactory() {
        ProducerFactory productFactory = new DefaultKafkaProducerFactory<>(selfKafkaConfigs());
        return productFactory;
    }

    @Bean
    @Primary
    public KafkaTemplate selfKafkaTemplate() {//如果有自动配置的KafkaTemplate,这个就不生效
        return new KafkaTemplate(selfKafkaProducerFactory());
    }

    //自定义消费端
    //根据配置属性文件,生成自定义消费工厂

    @Bean
    @Primary//理解为默认优先选择当前容器下的消费者工厂
    KafkaListenerContainerFactory> listenerContainerFactory001() {
        ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setConcurrency(1);
        factory.getContainerProperties().setPollTimeout(3000);
        return factory;
    }

    @Bean//第一个消费者工厂的bean
    public ConsumerFactory consumerFactory() {
        return new DefaultKafkaConsumerFactory<>(selfKafkaConfigs());
    }




}

①SelfDefinedKafkaConfig2.java

package com.example.k5.config;


//import com.neusoft.tsp.chery.utils.kafka.serialization.ObjectDeserializer;
//import com.neusoft.tsp.chery.utils.kafka.serialization.ObjectSerializer;
import com.example.k5.ser.ObjectDeserializer;
import com.example.k5.ser.ObjectSerializer;
import lombok.Data;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Primary;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.stereotype.Component;

import java.util.HashMap;
import java.util.Map;


@Component
@ConfigurationProperties(prefix = "self2.kafka")
@EnableKafka
@Data
public class SelfDefinedKafkaConfig2 {

    private String kafkaBootstrapServers;

    private Integer sessionTimeoutMs;

    private boolean enableAutoCommit;

    private Integer autoCommitIntervalMs;

    private String autoOffsetReset;

    private String groupId;

    private Integer batchSize;

    private Long bufferMemory;

    private String acks;

    private Integer retries;


    //自定义生产端2
    @Bean
    public Map selfKafkaConfigs2() {
        Map map = new HashMap<>(8);
        map.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBootstrapServers);
        map.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        map.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
        map.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitIntervalMs);
        map.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeoutMs);
        map.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        map.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        map.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ObjectDeserializer.class);
        map.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
        map.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
        map.put(ProducerConfig.ACKS_CONFIG, acks);
        map.put(ProducerConfig.RETRIES_CONFIG, retries);
        map.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        map.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ObjectSerializer.class);
        return map;
    }

    @Bean
    public ProducerFactory selfKafkaProducerFactory2() {
        ProducerFactory productFactory = new DefaultKafkaProducerFactory<>(selfKafkaConfigs2());
        return productFactory;
    }

    @Bean
    public KafkaTemplate selfKafkaTemplate2() {
        return new KafkaTemplate(selfKafkaProducerFactory2());
    }



    //自定义消费端2
    @Bean
    KafkaListenerContainerFactory> listenerContainerFactory002() {
        ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory2());
        factory.setConcurrency(1);
        factory.getContainerProperties().setPollTimeout(3000);
        return factory;
    }

    @Bean//第一个消费者工厂的bean
    public ConsumerFactory consumerFactory2() {
        return new DefaultKafkaConsumerFactory<>(selfKafkaConfigs2());
    }
}

4.生产和消费的写法:

①.生产:

@GetMapping("/sendobj1")
    public void sendObj1(){
        try{
            log.info("发送给默认kafka 发对象");
            Map m = new HashMap();
            m.put("name", "jim111");
            byte[] sb = m.toString().getBytes("UTF-8");
            selfKafkaTemplate.send(topicName1, m);
            log.info("发送给默认kafka结束");
        }catch(Exception e){
            e.printStackTrace();
        }
    }


    @GetMapping("/sendobj2")
    public void sendObj2(){
        try{
            log.info("发送给自定义kafka 发对象");
            Map m = new HashMap();
            m.put("name", "jim222");
            byte[] sb = m.toString().getBytes("UTF-8");
            selfKafkaTemplate2.send(topicName2, m);
            log.info("发送给默认kafka结束");
        }catch(Exception e){
            e.printStackTrace();
        }
    }

②消费(注意消费容器listenerContainerFactory001, listenerContainerFactory002的配置):

 @KafkaListener(topics = {"${self.kafka.topic}"}, groupId = "test002",containerFactory = "listenerContainerFactory001",concurrency = "3",autoStartup = "true")
    public void onMessage2(ConsumerRecord record){

        log.info("简单消费001:"+record.topic()+"-"+record.partition()+"-"+record.key());
        String a = record.value().toString();
        log.info("消费的数据001:"+ a);
    }
@KafkaListener(topics = {"${self2.kafka.topic}"}, groupId = "test0021",containerFactory = "listenerContainerFactory002",concurrency = "3",autoStartup = "true")
    public void onMessage2(ConsumerRecord record){
        log.info("简单消费002:"+record.topic()+"-"+record.partition()+"-"+record.key());
        String a = record.value().toString();
        log.info("消费的数据002:"+ a);
    }

转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/278332.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号