过程
pom.xml
4.0.0 org.example hdfs-api 1.0-SNAPSHOT 16 16 org.apache.hadoop hadoop-common 2.7.4 org.apache.hadoop hadoop-hdfs 2.7.4 org.apache.hadoop hadoop-client 2.7.4 org.apache.hadoop hadoop-mapreduce-client-core 2.7.4 junit junit RELEASE
WordCountMapper.java
package com.wordcount; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; import java.io.IOException; public class WordCountMapper extends Mapper{ @Override protected void map(LongWritable key,Text value,Mapper .Context context) throws IOException,InterruptedException{ String line=value.toString();//接收传进来的一行文本,把数据类型转换为Java类型String String[] words=line.split(" ");//将这行内容按空格分割,存入数组words中 //遍历数组,<单词,1> for (String word:words){//使用context,把map阶段处理的数据发送给Reduce阶段作为输入数据 context.write(new Text(word),new IntWritable(1));//把Java数据类型转换为大数据数据类型 } } }
WordCountReducer.java
package com.wordcount; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer; import java.io.IOException; public class WordCountReducer extends Reducer{ @Override protected void reduce(Text key,Iterable value,Reducer .Context context)throws IOException,InterruptedException{ int count=0;//定义计数器 for (IntWritable iw:value){//遍历一组迭代器,把每一个数量1累加起来构成单词总数 count += iw.get(); } context.write(key,new IntWritable(count)); } }
WordCountDriver.java
package com.wordcount;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCountDriver {
public static void main(String[] args) throws Exception {
// 1. 获取 job
Configuration conf = new Configuration();
//conf.set("fs.defaultFS", "hdfs://hadoop01:50070");
conf.set("mapreduce.framework.name","local");
//2.加载jar驱动
Job job = Job.getInstance(conf);
job.setJarByClass(WordCountDriver.class);
// 3. 关联 mapper 和 reducer
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
// 4. 设置 map 输出的 k v 类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 5. 设置最终输出的k v类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 6. 设置输入路径和输出路径
FileInputFormat.setInputPaths(job,"E:/hadoopw/input");
FileOutputFormat.setOutputPath(job,new Path("E:/hadoopw/output"));
// 7. 提交程序并监控执行情况
boolean res=job.waitForCompletion(true);
System.exit(res ? 0 : 1);
}
}
输出结果



