TAILDIR-KAFKA
a1.sources = r1
a1.channels = c1 c2
a1.sources.r1.type = TAILDIR
a1.sources.r1.filegroups = f1
a1.sources.r1.filegroups.f1 = /tmp/logs/app-.*log
a1.sources.r1.positionFile = /opt/installs/flume1.9/logs/taildir_position.json
a1.sources.r1.interceptors = i1
a1.sources.r1.interceptors.i1.type = demo.MyInterceptors$Bulider
a1.sources.r1.selector.type = multiplexing
a1.sources.r1.selector.header = type
a1.sources.r1.selector.mapping.start = c1
a1.sources.r1.selector.mapping.event = c2
a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
a1.channels.c1.kafka.bootstrap.servers = hadoop11:9092,hadoop12:9092,hadoop13:9092
a1.channels.c1.kafka.topic = topic1
a1.channels.c1.parseAsFlumeEvent = false
a1.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
a1.channels.c2.kafka.bootstrap.servers = hadoop11:9092,hadoop12:9092,hadoop13:9092
a1.channels.c2.kafka.topic = topic2
a1.channels.c2.parseAsFlumeEvent = false
a1.sources.r1.channels = c1 c2
kafka-mem-hdfs
a1.sources = r1 r2
a1.channels = c1 c2
a1.sinks = k1 k2
a1.sources.r1.type = org.apache.flume.source.kafka.KafkaSource
a1.sources.r1.kafka.bootstrap.servers = hadoop11:9092,hadoop12:9092,hadoop13:9092
a1.sources.r1.kafka.topics = topic1
a1.sources.r1.kafka.consumer.group.id = g1
a1.sources.r2.type = org.apache.flume.source.kafka.KafkaSource
a1.sources.r2.kafka.bootstrap.servers = hadoop11:9092,hadoop12:9092,hadoop13:9092
a1.sources.r2.kafka.topics = topic2
a1.sources.r2.kafka.consumer.group.id = g1
a1.channels.c1.type = memory
a1.channels.c1.capacity = 10000
a1.channels.c1.transactionCapacity = 10000
a1.channels.c2.type = memory
a1.channels.c2.capacity = 10000
a1.channels.c2.transactionCapacity = 10000
a1.sinks.k1.type = hdfs
a1.sinks.k1.hdfs.path = hdfs://hdfs-cluster/origin_data/gmall/log/topic1/%Y-%m-%d
a1.sinks.k1.hdfs.useLocalTimeStamp = true
a1.sinks.k1.hdfs.fileType=DataStream
a1.sinks.k1.hdfs.rollInterval = 10
a1.sinks.k1.hdfs.rollSize = 104857600
a1.sinks.k1.hdfs.rollCount = 0
a1.sinks.k1.hdfs.filePrefix = logstart-
a1.sinks.k2.type = hdfs
a1.sinks.k2.hdfs.path = hdfs://hdfs-cluster/origin_data/gmall/log/topic2/%Y-%m-%d
a1.sinks.k2.hdfs.useLocalTimeStamp = true
a1.sinks.k2.hdfs.fileType=DataStream
a1.sinks.k2.hdfs.rollInterval = 10
a1.sinks.k2.hdfs.rollSize = 104857600
a1.sinks.k2.hdfs.rollCount = 0
a1.sinks.k2.hdfs.filePrefix = logevent-
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
a1.sources.r2.channels = c2
a1.sinks.k2.channel = c2
拦截器代码
public class MyInterceptors implements Interceptor {
@Override
public void initialize() {
}
@Override
public Event intercept(Event event) {
byte[] body = event.getBody();
String str = new String(body);
Map map = event.getHeaders();
if(str.contains(""en":"start"")){
map.put("type","start");
}else{
map.put("type","event");
}
return event;
}
@Override
public List intercept(List list) {
for(Event event:list){
intercept(event);//循环,使集合中的每个event调用intercept方法,改变每个event中map的值为type-N/L
}
return list;
}
@Override
public void close() {
}
public static class Bulider implements Interceptor.Builder{
@Override
public Interceptor build() {
return new MyInterceptors();
}
@Override
public void configure(Context context) {
}
}
}



