1.创建maven工程
在pom.xml文件中添加如下依赖
org.apache.hadoop hadoop-client3.1.3 junit junit4.13.2 org.slf4j slf4j-nop1.7.35 maven-compiler-plugin 3.6.1 1.8 1.8
2.在项目的resources目录下,创建一个文件,命名为 log4j.properties ,在文中填入以下内容
log4j.rootLogger=INFO, stdout log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n log4j.appender.logfile=org.apache.log4j.FileAppender log4j.appender.logfile.File=target/spring.log log4j.appender.logfile.layout=org.apache.log4j.PatternLayout log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
3.Map类
import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; import java.io.IOException; public class WordCountMapper extends Mapper{ //定义结尾输出key的类型 private Text outK = new Text(); //定义结尾输出value的类型,并设默认值为 1 private IntWritable outV = new IntWritable(1); @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { //将传递进来的每一行数据转换为String类型 String line = value.toString(); //将每一行的数据按空格分割,得到每个单词 String[] words = line.split(" "); for(String word:words){ //这里将String类型的word转换为开头定义好的text类型,赋值给outKey outK.set(word); //输出key和value context.write(outK, outV); } } }
4.Reduce类
import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer; import java.io.IOException; public class WordCountReducer extends Reducer{ //定义了value的输出类型 private IntWritable outValue = new IntWritable(); @Override //Iterable values :这里的value值其实是一个类似集合的存在 长这样----> < (1,1)> protected void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException { int sum = 0; //用增强for循环来遍历 for(IntWritable value:values){ //因为这里的value是IntWritable类型,sum是int类型,所以value要调用get()方法,转变成int类型 sum += value.get(); } //这里将int类型的sum转换为开头定义好的IntWritable类型,赋值给outValue outValue.set(sum); //输出 context.write(key, outValue); } }
5.Driver类
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class WordCountDriver {
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
// 1.获取、配置job
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
// 2.设置jar包路径
job.setJarByClass(WordCountDriver.class);
// 3.关联map和reduce
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
// 4.设置map输出的kv类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 5.设置最终输出的kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 6.设置输入和输出路径
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//
// 7.提交job
boolean result = job.waitForCompletion(true);
System.exit(result ? 0:1);
}
最后打jar包,传到linux上的hadoop文件下,运行代码如下:
hadoop jar 包名 Driver类的路径 输入路径 输出路径



