根据给定数据统计每一个手机号耗费的总上行流量、总下行流量、总流量
编写MapReduce程序 编写Bean对象package com.ljx.mr.writable;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class FlowBean implements Writable {
private long upFlow;//上行流量
private long downFlow;//下行流量
private long sumFlow;//总流量
public long getUpFlow() {
return upFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public long getDownFlow() {
return downFlow;
}
public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setSumFlow() {
this.sumFlow = this.upFlow + this.downFlow;
}
public FlowBean(){
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(upFlow);
out.writeLong(downFlow);
out.writeLong(sumFlow);
}
@Override
public void readFields(DataInput in) throws IOException {
this.upFlow = in.readLong();
this.downFlow = in.readLong();
this.sumFlow = in.readLong();
}
@Override
public String toString() {
return upFlow + "t" + downFlow + "t" + sumFlow;
}
}
编写Mapper类
package com.ljx.mr.writable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; import java.io.IOException; public class FlowMapper extends Mapper编写Reducer类{ private Text outK = new Text(); private FlowBean outV = new FlowBean(); @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { //获取一行数据 String line = value.toString(); //切割 String[] split = line.split("t"); //抓取自己想要的数据 String phone = split[1]; String up = split[split.length - 3]; String down = split[split.length - 2]; //封装 outK.set(phone); outV.setUpFlow(Long.parseLong(up)); outV.setDownFlow(Long.parseLong(down)); outV.setSumFlow(); //写出 context.write(outK,outV); } }
package com.ljx.mr.writable; import org.apache.hadoop.mapreduce.Reducer; import javax.xml.soap.Text; import java.io.IOException; public class FLowReducer extends Reducer编写Driver驱动类{ private FlowBean outV = new FlowBean(); @Override protected void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException { //遍历集合累加值 long totalUp = 0; long totalDown = 0; for (FlowBean value : values) { totalUp += value.getUpFlow(); totalDown += value.getDownFlow(); } //封装outK,outV outV.setUpFlow(totalUp); outV.setDownFlow(totalDown); outV.setSumFlow(); context.write(key,outV); } }
package com.ljx.mr.writable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import javax.xml.soap.Text;
import java.io.IOException;
public class FlowDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
//获取job
Configuration configuration = new Configuration();
Job job = Job.getInstance(configuration);
//设置jar
job.setJarByClass(FlowDriver.class);
//关联mapper和reducer
job.setMapperClass(FlowMapper.class);
job.setReducerClass(FLowReducer.class);
//设置mapper 输出的key和value类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class);
//设置最终数据输出的key和value类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
//设置数据的输入路径和输出路径
FileInputFormat.setInputPaths(job,new Path("E:\input\inputword"));
FileOutputFormat.setOutputPath(job,new Path("E:\input\output1"));
//提交job
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}
输出结果如下所示:



