在MapReduce流程中,map的输出
二:代码
package exer1;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class FileMerge1 {
//此类继承自Mapper类,负责重写Map方法
public static class map extends Mapper {
//读取输入的第一行文本设为line,类型为text
private static Text line = new Text();
//重写map方法
@Override
protected void map(LongWritable key, Text value, Mapper.Context context)
throws IOException, InterruptedException {
line = value;
context.write(line, NullWritable.get());
}
}
//此类继承Reducer类,负责重写reduce方法
public static class reduce extends Reducer{
//重写reduce方法
@Override
protected void reduce(Text key, Iterable value,
Reducer.Context context) throws IOException, InterruptedException {
context.write(key, NullWritable.get());
}
}
public static void main(String[] args) throws Exception {
//1获取job
//传递配置信息
Configuration conf = new Configuration();
Job job = Job.getInstance(conf ,"去重");
//2设置jar路径
//关联jar包
job.setJarByClass(FileMerge1.class);
//3关联mapper和reducer
//mapper,reducer,driver建立联系,通过job纽带连载一起
job.setMapperClass(map.class);
job.setReducerClass(reduce.class);
//4设置map输出的kv类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
//5设置最终输出kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
//6设置输入路径和输出路径
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//7提交job,结束程序
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
三:目标文件
A:
20160708 20161113 20160702 20160906 20161011 20160901 20160108 20160609 20160221 20160308 20161001 20161012 20160309 20161023 20161104 20160806
B:
20160708 20161113 20160422 20160604 20161122 20160308 20161001 20161012
四:将本地文件提交到HDFS目录
使用上传命令
hdfs dfs -put /(路径) /(将要上传到的hdfs路径)
五:将java程序导出为jar包
导出jar包
六:在虚拟机上运行jar包,并开始任务
hadoop jar /(jar包) /(上传的hdfs目录) /(hdfs 输出路径)
七:运行成功



