검은 원숭이의 집: MapReduce 트래픽 요약 프로그램 사례 3

6074 단어
통계 결과를 총 유량에 따라 역순으로 정렬하기 (전체 정렬)
1. 데이터
https://www.jianshu.com/p/bbebc7b959a8
2. 분석
(1) 프로그램을 두 단계로 나누어 첫 번째 단계는 전체 데이터를 정상적으로 통계하고 두 번째 단계는 결과를 정렬한다(2)context.write(총 데이터, 핸드폰 번호)(3)FlowBean Writable Comparable 인터페이스 다시 쓰기compareTo 실현 방법
@Override
public int compareTo(FlowBean o) {
    //     ,    
    return this.sumFlow > o.getSumFlow() ? -1 : 1;
}

3. FlowBean 대상은 수요 1에 비교 기능을 추가했다.
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.WritableComparable;

public class FlowBean implements WritableComparable {

    private long upFlow;
    private long downFlow;
    private long sumFlow;

    //      ,            ,     
    public FlowBean() {
        super();
    }

    public FlowBean(long upFlow, long downFlow) {
        super();
        this.upFlow = upFlow;
        this.downFlow = downFlow;
        this.sumFlow = upFlow + downFlow;
    }

    public void set(long upFlow, long downFlow) {
        this.upFlow = upFlow;
        this.downFlow = downFlow;
        this.sumFlow = upFlow + downFlow;
    }

    public long getSumFlow() {
        return sumFlow;
    }

    public void setSumFlow(long sumFlow) {
        this.sumFlow = sumFlow;
    }

    public long getUpFlow() {
        return upFlow;
    }

    public void setUpFlow(long upFlow) {
        this.upFlow = upFlow;
    }

    public long getDownFlow() {
        return downFlow;
    }

    public void setDownFlow(long downFlow) {
        this.downFlow = downFlow;
    }

    /**
     *      
     * @param out
     * @throws IOException
     */
    @Override
    public void write(DataOutput out) throws IOException {
        out.writeLong(upFlow);
        out.writeLong(downFlow);
        out.writeLong(sumFlow);
    }

    /**
     *                            
     * @param in
     * @throws IOException
     */
    @Override
    public void readFields(DataInput in) throws IOException {
        upFlow = in.readLong();
        downFlow = in.readLong();
        sumFlow = in.readLong();
    }

    @Override
    public String toString() {
        return upFlow + "\t" + downFlow + "\t" + sumFlow;
    }

    @Override
    public int compareTo(FlowBean o) {
        //     ,    
        return this.sumFlow > o.getSumFlow() ? -1 : 1;
    }
}

4. 맵 방법은 하나의 대상으로 최적화되고 Reduce 방법은 결과를 직접 출력하면 되며 구동 함수는 입력과 출력에 따라 설정을 다시 쓰면 된다.
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class FlowCountSort {
    static class FlowCountSortMapper extends Mapper{
        FlowBean bean = new FlowBean();
        Text v = new Text();
        
        @Override
        protected void map(LongWritable key, Text value, Context context)
                throws IOException, InterruptedException {
            
            // 1                 ,             
            String line = value.toString();
            
            // 2            、    、    
            String[] fields = line.split("\t");
            String phoneNbr = fields[0];
            
            long upFlow = Long.parseLong(fields[1]);
            long downFlow = Long.parseLong(fields[2]);
            
            // 3     
            bean.set(upFlow, downFlow);
            v.set(phoneNbr);
            
            // 4   
            context.write(bean, v);
        }
    }
    
    static class FlowCountSortReducer extends Reducer{
        
        @Override
        protected void reduce(FlowBean bean, Iterable values, Context context)
                throws IOException, InterruptedException {
            context.write(values.iterator().next(), bean);
        }
    }
    
    public static void main(String[] args) throws Exception {

        // 1       ,  job    
        Configuration configuration = new Configuration();
        Job job = Job.getInstance(configuration);

        // 6       jar        
        job.setJarByClass(FlowCountSort.class);

        // 2      job    mapper/Reducer   
        job.setMapperClass(FlowCountSortMapper.class);
        job.setReducerClass(FlowCountSortReducer.class);

        // 3   mapper     kv  
        job.setMapOutputKeyClass(FlowBean.class);
        job.setMapOutputValueClass(Text.class);

        // 4           kv  
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(FlowBean.class);

        // 5   job           
        FileInputFormat.setInputPaths(job, new Path(args[0]));
        
        Path outPath = new Path(args[1]);
        //FileSystem fs = FileSystem.get(configuration);
        //if (fs.exists(outPath)) {
        //fs.delete(outPath, true);
        //}
        FileOutputFormat.setOutputPath(job, outPath);

        // 7  job        ,  job   java    jar ,    yarn   
        boolean result = job.waitForCompletion(true);
        System.exit(result ? 0 : 1);
    }
}

5. 프로그램을jar 패키지로 만들어hadoop 그룹에 복사합니다.
6. Hadoop 클러스터 시작
7、flowcountsort 프로그램 실행
[victor@hadoop102 software]$ hadoop jar flowcountsort.jar com.victor.mr.sort.FlowCountSort /user/victor/flowcount/output /user/victor/flowcount/output_sort

8. 결과 보기
[victor@hadoop102 software]$ hadoop fs -cat /user/victor/flowcount/output_sort/part-r-00000
13502468823 7335    110349  117684
13925057413 11058   48243   59301
13726238888 2481    24681   27162
13726230503 2481    24681   27162
18320173382 9531    2412    11943

9、Code -> GitHub
https://github.com/liufengji/hadoop_mapreduce.git

좋은 웹페이지 즐겨찾기