参考资料一、预备知识
1.1 序列化定义1.2 Hadoop序列化的目的1.3 Hadoop为什么不采用Java原生序列化1.4 Hadoop 序列化特点 二、使用案例
2.1 自定义序列化对象2.2 Mapper阶段2.3 Reducer 阶段2.4 Driver 阶段 + 运行测试
参考资料视频资料
一、预备知识1.1 序列化定义
序列化就是把内存中的对象,转换成字节序列(或其他数据传输协议)以便于存储到磁盘(持久化)和网络传输。 反序列化就是将收到字节序列(或其他数据传输协议)或者是磁盘的持久化数据,转换成内存中的对象。1.2 Hadoop序列化的目的
一般来说,“活的”对象只生存在内存里,关机断电就没有了。 而且“活的”对象只能由本地的进程使用,不能被发送到网络上的另外一台计算机。 然而序列化可以存储“活的”对象,可以将“活的”对象发送到远程计算机。1.3 Hadoop为什么不采用Java原生序列化
Java的序列化是一个重量级序列化框架(Serializable) 一个对象被序列化后,会附带很多额外的信息(各种校验信息,Header,继承体系等),不便于在网络中高效传输。 所以,Hadoop自己开发了一套序列化机制(Writable)。1.4 Hadoop 序列化特点
紧凑 :高效使用存储空间。快速:读写数据的额外开销小。互操作:支持多语言的交互 二、使用案例
需求:统计一个手机耗费的总上行流量、总下行流量、总流量
测试数据:
phon_data.txt
1 13736230513 192.196.100.1 www.atguigu.com 2481 24681 200 2 13846544121 192.196.100.2 264 0 200 3 13956435636 192.196.100.3 132 1512 200 4 13966251146 192.168.100.1 240 0 404 5 18271575951 192.168.100.2 www.atguigu.com 1527 2106 200 6 84188413 192.168.100.3 www.atguigu.com 4116 1432 200 7 13590439668 192.168.100.4 1116 954 200 8 15910133277 192.168.100.5 www.hao123.com 3156 2936 200 9 13729199489 192.168.100.6 240 0 200 10 13630577991 192.168.100.7 www.shouhu.com 6960 690 200 11 15043685818 192.168.100.8 www.baidu.com 3659 3538 200 12 15959002129 192.168.100.9 www.atguigu.com 1938 180 500 13 13560439638 192.168.100.10 918 4938 200 14 13470253144 192.168.100.11 180 180 200 15 13682846555 192.168.100.12 www.qq.com 1938 2910 200 16 13992314666 192.168.100.13 www.gaga.com 3008 3720 200 17 13509468723 192.168.100.14 www.qinghua.com 7335 110349 404 18 18390173782 192.168.100.15 www.sogou.com 9531 2412 200 19 13975057813 192.168.100.16 www.baidu.com 11058 48243 200 20 13768778790 192.168.100.17 120 120 200 21 13568436656 192.168.100.18 www.alibaba.com 2481 24681 200 22 13568436656 192.168.100.19 1116 954 200
输入的数据格式
7 13560436666 120.196.100.99 1116 954 200 id 手机号码 网络ip 上行流量 下行流量 网络状态码
期望输出数据格式:
13560436666 1116 954 2070 手机号码 上行流量 下行流量 总流量2.1 自定义序列化对象
package com.uni.writable;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class FlowBean implements Writable {
private long upFlow; // 上行流量
private long downFlow; // 下行流量
private long sumFlow; // 总流量
// 无参构造
public FlowBean(){}
public long getUpFlow() {
return upFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public long getDownFlow() {
return downFlow;
}
public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setSumFlow() {
this.sumFlow = this.upFlow + this.downFlow;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(upFlow);
out.writeLong(downFlow);
out.writeLong(sumFlow);
}
@Override
public void readFields(DataInput in) throws IOException {
this.upFlow = in.readLong();
this.downFlow = in.readLong();
this.sumFlow = in.readLong();
}
@Override
public String toString() {
return upFlow + "t" + downFlow + "t" + sumFlow;
}
}
2.2 Mapper阶段
package com.uni.writable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; import java.io.IOException; public class FlowMapper extends Mapper2.3 Reducer 阶段{ private Text outputKey = new Text(); private FlowBean outputValue = new FlowBean(); @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { // 1.获取一行 7 13560436666 120.196.100.99 1116 954 200 String line = value.toString(); // 2.切割成 [7,13560436666,120.196.100.99,1116,954,200] String[] split = line.split("t"); // 3. 获取想要的数据: 手机号 13560436666, 上行流量和下行流量: 1116、954 String phone = split[1]; // 顺序的话数据有残缺,故逆序取上行、下行流量 String up = split[split.length - 3]; String down = split[split.length - 2]; // 4. 封装 outputKey.set(phone); outputValue.setUpFlow(Long.parseLong(up)); outputValue.setDownFlow(Long.parseLong(down)); outputValue.setSumFlow(); // 5. 写出 context.write(outputKey, outputValue); } }
package com.uni.writable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer; import java.io.IOException; public class FlowReducer extends Reducer2.4 Driver 阶段 + 运行测试{ private FlowBean outputValue = new FlowBean(); @Override protected void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException { // 1. 遍历集合,累加上下行流量 long totalUp = 0; long totalDown = 0; for (FlowBean value : values) { totalUp += value.getUpFlow(); totalDown += value.getDownFlow(); } // 2. 封装 outputKey,outputValue outputValue.setUpFlow(totalUp); outputValue.setDownFlow(totalDown); outputValue.setSumFlow(); // 3. 写出 context.write(key, outputValue); } }
package com.uni.writable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FlowDriver {
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
// 1. 创建连接
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
// 2. 设置jar
job.setJarByClass(FlowDriver.class);
// 3. 关联 Mapper 、Reducer
job.setMapperClass(FlowMapper.class);
job.setReducerClass(FlowReducer.class);
// 4. 设置 mapper的输出值 kv类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class);
// 5. 设置 最终的输出值 kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
// 6. 设置数据的输入路径和输出路径
FileInputFormat.setInputPaths(job, new Path("input"));
FileOutputFormat.setOutputPath(job, new Path("output"));
// 7. 提交Job
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}
本地运行结果:



