栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 前沿技术 > 大数据 > 大数据系统

MapReduce编程实例

MapReduce编程实例

MapReduce编程实例

文章目录
    • MapReduce编程实例
      • 前言
      • 注意事项
      • 单词统计 WordCount
      • MapReduce 经典案例——倒排索引
      • MapReduce 经典案例——数据去重
      • MapReduce 经典案例——TopN
    • Github下载地址

前言

简介

讲解_Hadoop 中文网

Hadoop测试项目:HadoopDemo

注意事项

如果下载了HadoopDemo作为测试,用到HDFS_CRUD.java
需要提前准备winutils。最好对应版本。

单词统计 WordCount

WordCountMapper.java

package top.rabbitcrows.hadoop.mr;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;


public class WordCountMapper extends Mapper {
    
    @Override
    protected void map(LongWritable key, Text value,
                       Mapper.Context context)
            throws IOException, InterruptedException {
        // 拿到传入进来的一行内容,把数据类型转换为String
        String line = value.toString();
        // 将这行内容按照分隔符切割
        String[] words = line.split(" ");
        // 遍历数组,每出现一个单词就标记一个数组1 例如:<单词,1>
        for (String word : words) {
            // 使用MR上下文context,把Map阶段处理的数据发送给Reduce阶段作为输入数据
            context.write(new Text(word), new IntWritable(1));
            //第一行 hadoop hadoop spark  发送出去的是
        }
    }
}

WordCountReducer.java

package top.rabbitcrows.hadoop.mr;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

//都要继承Reducer 这就是我们所说的变成模型,只需要套模板就行了


public class WordCountReducer extends Reducer {

    
    @Override
    protected void reduce(Text key, Iterable value,
                          Reducer.Context context)
            throws IOException, InterruptedException {
        //定义一个计数器
        int count = 0;
        //遍历一组迭代器,把每一个数量1累加起来就构成了单词的总次数

        //
        for (IntWritable iw : value) {
            count += iw.get();
        }
        context.write(key, new IntWritable(count));
    }
}

WordCountCombiner.java

package top.rabbitcrows.hadoop.mr;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class WordCountCombiner extends Reducer {

    @Override
    protected void reduce(Text key, Iterable values,
                          Reducer.Context context)
            throws IOException, InterruptedException {
        // 1.局部汇总
        int count = 0;
        for (IntWritable v : values) {
            count += v.get();
        }
        context.write(key, new IntWritable(count));
    }
}

WordCountDriver.java

package top.rabbitcrows.hadoop.mr;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


public class WordCountDriver {

    public static void main(String[] args) throws Exception {
        // 通过Job来封装本次MR的相关信息
        Configuration conf = new Configuration();
        conf.set("mapreduce.framework.name", "local");
        Job wcjob = Job.getInstance(conf);

        // 指定MR Job jar包运行主类
        wcjob.setJarByClass(WordCountDriver.class);
        // 指定本次MR所有的Mapper Reducer类
        wcjob.setMapperClass(WordCountMapper.class);
        wcjob.setReducerClass(WordCountReducer.class);

        // 设置我们的业务逻辑 Mapper类的输出 key和 value的数据类型
        wcjob.setMapOutputKeyClass(Text.class);
        wcjob.setMapOutputValueClass(IntWritable.class);

        // 设置我们的业务逻辑 Reducer类的输出 key和 value的数据类型
        wcjob.setOutputKeyClass(Text.class);
        wcjob.setOutputValueClass(IntWritable.class);

        //设置Combiner组件
        wcjob.setCombinerClass(WordCountCombiner.class);

        // 指定要处理的数据所在的位置
        FileInputFormat.setInputPaths(wcjob, new Path("input/mr"));
        // 指定处理完成之后的结果所保存的位置
        FileOutputFormat.setOutputPath(wcjob, new Path("output/mr"));

        // 提交程序并且监控打印程序执行情况
        boolean res = wcjob.waitForCompletion(true);
        System.exit(res ? 0 : 1);
    }
}
MapReduce 经典案例——倒排索引

InvertedIndexMapper.java

package top.rabbitcrows.mr.InvertedIndex;

import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

import java.io.IOException;


public class InvertedIndexMapper extends Mapper {

    //存储单词和文档名称
    private static Text KeyInfo = new Text();

    //存储词频,初始化为1
    private static final Text valueInfo = new Text("1");

    @Override
    protected void map(LongWritable key, Text value, Mapper.Context context) throws IOException, InterruptedException {
        String line = value.toString();
        String[] fileds = StringUtils.split(line, " ");
        //得到这行数据所在的文件切片
        FileSplit fileSplit = (FileSplit) context.getInputSplit();
        //根据文件切片得到文件名
        String fileName = fileSplit.getPath().getName();
        for (String filed : fileds) {
            //key值由单词和文档名称组成,如“MapReduce:file1.txt”
            KeyInfo.set(filed + ":" + fileName);
            context.write(KeyInfo, valueInfo);
        }
    }
}

InvertedIndexCombiner.java

package top.rabbitcrows.mr.InvertedIndex;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;


public class InvertedIndexCombiner extends Reducer {

    private static Text info = new Text();
    //输入:
    //输出:

    @Override
    protected void reduce(Text key, Iterable values, Reducer.Context context) throws IOException, InterruptedException {
        int sum = 0;    //统计词频
        for (Text value : values) {
            sum += Integer.parseInt(value.toString());
        }
        int splitIndex = key.toString().indexOf(":");
        //重新设置value值并由文档名称和词频组成
        info.set(key.toString().substring(splitIndex + 1) + ":" + sum);
        //重新设置key值为单词
        key.set(key.toString().substring(0, splitIndex));
        context.write(key, info);
    }
}

InvertedIndexReducer.java

package top.rabbitcrows.mr.InvertedIndex;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;


public class InvertedIndexReducer extends Reducer {
    private static Text result = new Text();
    //输入:
    //输出:

    @Override
    protected void reduce(Text key, Iterable values, Reducer.Context context) throws IOException, InterruptedException {
        //生成文档列表
        String fileList = new String();
        for (Text value : values) {
            fileList += value.toString() + ";";
        }
        result.set(fileList);
        context.write(key, result);
    }
}

InvertedIndexDriver.java

package top.rabbitcrows.mr.InvertedIndex;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;


public class InvertedIndexDriver {

    public static void main(String[] args)
            throws IOException, ClassNotFoundException, InterruptedException {
        Configuration conf = new Configuration();
//        conf.set("mapreduce.framework.name", "local");
        Job job = Job.getInstance(conf);

        job.setJarByClass(InvertedIndexDriver.class);

        job.setMapperClass(InvertedIndexMapper.class);
        job.setReducerClass(InvertedIndexReducer.class);
        job.setCombinerClass(InvertedIndexCombiner.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);


        // 指定要处理的数据所在的位置
        FileInputFormat.setInputPaths(job,
                new Path("input/InvertedIndex/"));
        // 指定处理完成之后的结果所保存的位置
        FileOutputFormat.setOutputPath(job,
                new Path("output/InvertedIndex"));

        // 提交程序并且监控打印程序执行情况
        boolean res = job.waitForCompletion(true);
        System.exit(res ? 0 : 1);
    }
}
MapReduce 经典案例——数据去重

DedupMapper.java

package top.rabbitcrows.mr.dedup;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;


public class DedupMapper extends Mapper {

    private static Text field = new Text();

    //<0,2021-11-1 a><11,2021-11-2 b>
    @Override
    protected void map(LongWritable key, Text value, Mapper.Context context) throws IOException, InterruptedException {
        field = value;
        //NullWritable.get()方法设置空值
        context.write(field, NullWritable.get());
        // <2018-3-3 c,null> <2018-3-4 d,null>

    }
}

DedupReducer.java

package top.rabbitcrows.mr.dedup;

import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;


public class DedupReducer extends Reducer {
    //<2021-11-1,a,null><2021-11-2,b,null><2021-11-3,c,null>

    @Override
    protected void reduce(Text key, Iterable values, Reducer.Context context) throws IOException, InterruptedException {
        context.write(key,NullWritable.get());
    }
}

DedupDriver.java

package top.rabbitcrows.mr.dedup;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;


public class DedupDriver {

    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);
        job.setJarByClass(DedupDriver.class);
        job.setMapperClass(DedupMapper.class);
        job.setReducerClass(DedupReducer.class);

        job.setOutputKeyClass(Text.class);
        job.setMapOutputValueClass(NullWritable.class);

        FileInputFormat.setInputPaths(job, new Path("input/Dedup"));

        // 指定处理完成之后的结果所保存的位置
        FileOutputFormat.setOutputPath(job, new Path("output/Dedup"));

        job.waitForCompletion(true);

    }
}
MapReduce 经典案例——TopN

TopNMapper.java

package top.rabbitcrows.mr.topN;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;
import java.util.TreeMap;


public class TopNMapper extends Mapper {

    private TreeMap repToRecordMap = new TreeMap();

    // <0,10 3 8 7 6 5 1 2 9 4>
    // 
    @Override
    protected void map(LongWritable key, Text value, Mapper.Context context) throws IOException, InterruptedException {
        String line = value.toString();
        String[] nums = line.split(" ");
        for (String num : nums) {
            //读取每行数据写入TreeMap,超过5个就会移除最小的数值
            repToRecordMap.put(Integer.parseInt(num), " ");
            if (repToRecordMap.size() > 5) {
                repToRecordMap.remove(repToRecordMap.firstKey());
            }
        }
    }

    //重写cleanup()方法,读取完所有文件行数据后,再输出到Reduce阶段
    @Override
    protected void cleanup(Mapper.Context context) throws IOException, InterruptedException {
        for (Integer i : repToRecordMap.keySet()) {
            try {
                context.write(NullWritable.get(), new IntWritable(i));
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }
}

TopNReducer.java

package top.rabbitcrows.mr.topN;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;
import java.util.Comparator;
import java.util.TreeMap;


public class TopNReducer extends Reducer {

    private TreeMap repToRecordMap = new TreeMap(new Comparator() {

        //返回一个基本类型的整型,谁大谁排后面.
        //返回负数表示:o1 小于o2
        //返回0表示:表示:o1和o2相等
        //返回正数表示:o1大于o2。
        public int compare(Integer a, Integer b) {
            return b - a;
        }
    });

    public void reduce(NullWritable key, Iterable values, Context context)
            throws IOException, InterruptedException {
        for (IntWritable value : values) {
            repToRecordMap.put(value.get(), " ");
            if (repToRecordMap.size() > 5) {
                repToRecordMap.remove(repToRecordMap.firstKey());
            }
        }
        for (Integer i : repToRecordMap.keySet()) {
            context.write(NullWritable.get(), new IntWritable(i));
        }
    }
}

TopNDriver.java

package top.rabbitcrows.mr.topN;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


public class TopNDriver {

    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        Job job = Job.getInstance();
        job.setJarByClass(TopNDriver.class);
        job.setMapperClass(TopNMapper.class);
        job.setReducerClass(TopNReducer.class);
        job.setNumReduceTasks(1);
        //map阶段输出的key
        job.setMapOutputKeyClass(NullWritable.class);
        //map阶段输出的value
        job.setMapOutputValueClass(IntWritable.class);
        //reduce阶段输出的key
        job.setOutputKeyClass(NullWritable.class);
        //reduce阶段输出的value
        job.setMapOutputValueClass(IntWritable.class);

        FileInputFormat.setInputPaths(job, new Path("input/TopN/num.txt"));
        FileOutputFormat.setOutputPath(job, new Path("output/TopN"));

        boolean res = job.waitForCompletion(true);
        System.out.println(res ? 0 : 1);

    }

}
Github下载地址

(HadoopDemo)[https://github.com/lehoso/HadoopDemo]

转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/488282.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号