栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 前沿技术 > 大数据 > 大数据系统

hadoop MapReducer的join操作 类似数据库的join操作

hadoop MapReducer的join操作 类似数据库的join操作

需求如下


相关文件资源见:相关文件和完整源码


java代码如下:

1、TableBean类
package com.lqs.mapreduce.reducejoin;

import org.apache.hadoop.io.Writable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;



public class TableBean implements Writable {

    
    private String id;
    
    private String pid;
    
    private int amount;
    
    private String pname;

    
    private String flag;

    public TableBean() {
    }

    public String getId() {
        return id;
    }

    public void setId(String id) {
        this.id = id;
    }

    public String getPid() {
        return pid;
    }

    public void setPid(String pid) {
        this.pid = pid;
    }

    public int getAmount() {
        return amount;
    }

    public void setAmount(int amount) {
        this.amount = amount;
    }

    public String getPname() {
        return pname;
    }

    public void setPname(String pname) {
        this.pname = pname;
    }

    public String getFlag() {
        return flag;
    }

    public void setFlag(String flag) {
        this.flag = flag;
    }

    @Override
    public String toString() {
        return id +"t"+ pname +"t"+ amount;
    }


    @Override
    public void write(DataOutput dataOutput) throws IOException {
        dataOutput.writeUTF(id);
        dataOutput.writeUTF(pid);
        dataOutput.writeInt(amount);
        dataOutput.writeUTF(pname);
        dataOutput.writeUTF(flag);
    }

    @Override
    public void readFields(DataInput dataInput) throws IOException {
        this.id=dataInput.readUTF();
        this.pid=dataInput.readUTF();
        this.amount=dataInput.readInt();
        this.pname=dataInput.readUTF();
        this.flag=dataInput.readUTF();
    }
}

2、TableMapper类
package com.lqs.mapreduce.reducejoin;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

import java.io.IOException;



public class TableMapper extends Mapper {

    private String filename;
    private Text outK;
    private TableBean outV;

    @Override
    protected void setup(Mapper.Context context) throws IOException, InterruptedException {
        outK = new Text();
        outV = new TableBean();

        //获取对应文件名称
        InputSplit inputSplit = context.getInputSplit();
        FileSplit fileSplit = (FileSplit) inputSplit;
        filename = fileSplit.getPath().getName();
    }

    
    @Override
    protected void map(LongWritable key, Text value, Mapper.Context context) throws IOException, InterruptedException {

        //获取一行
        String line = value.toString();

        //判断是哪个文件,然后针对文件进行不同的操作
        //订单表的处理
        if (filename.contains("order")){
            String[] split = line.split("t");
            //封装outK
            //pid
            outK.set(split[1]);
            //封装outV
            outV.setId(split[0]);
            outV.setPid(split[1]);
            outV.setAmount(Integer.parseInt(split[2]));
            //因为order表没有pname
            outV.setPname("");
            outV.setFlag("order");
        }else {
            //商品列表处理
            String[] split = line.split("t");
            //封装outK
            //pid
            outK.set(split[0]);
            //封装outV
            outV.setId("");
            outV.setPid(split[0]);
            outV.setAmount(0);
            outV.setPname(split[1]);
            outV.setFlag("pd");
        }

        //写出kv
        context.write(outK,outV);

    }
}

3、TableReducer类
package com.lqs.mapreduce.reducejoin;

import org.apache.commons.beanutils.BeanUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;



public class TableReducer extends Reducer {

    @Override
    protected void reduce(Text key, Iterable values, Reducer.Context context) throws IOException, InterruptedException {
        ArrayList orderBeans = new ArrayList<>();
        TableBean pdBean = new TableBean();

        for (TableBean value : values) {

            //判断数据来自哪个表
            //本判断是判断其是否为订单表
            if ("order".equals(value.getFlag())){

                //创建一个临时TableBean对象接收value
                TableBean tmpOrderBean = new TableBean();

                try {
                    BeanUtils.copyProperties(tmpOrderBean,value);
                } catch (IllegalAccessException e) {
                    e.printStackTrace();
                } catch (InvocationTargetException e) {
                    e.printStackTrace();
                }

                //将临时 TableBean 对象添加到集合 orderBeans
                orderBeans.add(tmpOrderBean);

            } else {
                //处理商品表
                try {
                    BeanUtils.copyProperties(pdBean,value);
                } catch (IllegalAccessException e) {
                    e.printStackTrace();
                } catch (InvocationTargetException e) {
                    e.printStackTrace();
                }
            }

        }

        //遍历集合 orderBeans,替换掉每个 orderBean 的 pid 为 pname,然后写出
        for (TableBean orderBean : orderBeans) {
            orderBean.setPname(pdBean.getPname());

            //写出修改后的orderBean对象
            context.write(orderBean,NullWritable.get());

        }


    }
}

4、TableDriver类如下:
package com.lqs.mapreduce.reducejoin;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.log4j.BasicConfigurator;

import java.io.IOException;



public class TableDriver {

    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {

        BasicConfigurator.configure();

        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);

        job.setJarByClass(TableDriver.class);

        job.setMapperClass(TableMapper.class);
        job.setReducerClass(TableReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(TableBean.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(NullWritable.class);

        FileInputFormat.setInputPaths(job, new Path("F:\hdpData\Input\inputtable"));
        FileOutputFormat.setOutputPath(job, new Path("F:\hdpData\Output\outputTable3"));

        boolean b = job.waitForCompletion(true);

        System.exit(b ? 0 : 1);

    }

}

配置文件pom.xml如下:


    4.0.0

    org.example
    MapReduceDemo
    1.0-SNAPSHOT
    
    
        8
        8
    

    
        
            org.apache.hadoop
            hadoop-client
            3.1.3
        
        
            junit
            junit
            4.12
        
        
            org.slf4j
            slf4j-log4j12
            1.7.30
        
    

    
        
            
                maven-compiler-plugin
                3.6.1
                
                    1.8
                    1.8
                
            

            
                maven-assembly-plugin
                
                    
                        jar-with-dependencies
                    
                
                
                    
                        make-assembly
                        package
                        
                            single
                        
                    
                
            
        
    

转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/663122.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号