wiki:waue/2010/0416
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class WordCountV020 extends Configured implements Tool {

  static Set<String> patternsToSkip = new HashSet<String>();

  static void parseSkipFile(Path patternsFile) {
    try {
      BufferedReader fis = new BufferedReader(new FileReader(patternsFile
          .toString()));
      String pattern = null;
      while ((pattern = fis.readLine()) != null) {
        patternsToSkip.add(pattern);
      }
    } catch (IOException ioe) {
      System.err
          .println("Caught exception while parsing the cached file '"
              + patternsFile + "' : "
              + StringUtils.stringifyException(ioe));
    }
  }

  public static class Map extends
      Mapper<LongWritable, Text, Text, IntWritable> {

    static enum Counters {
      INPUT_WORDS
    }

    boolean caseSensitive;

    private final static IntWritable one = new IntWritable(1);
    private Text word = new Text();

    public void setup(Context context) {
      Configuration conf = context.getConfiguration();
      caseSensitive = conf.getBoolean("wordcount.case.sensitive", true);
    }

    public void map(LongWritable key, Text value, Context context)
        throws IOException, InterruptedException {

      String line = (caseSensitive) ? value.toString() : value.toString()
          .toLowerCase();

      for (String pattern : patternsToSkip) {
        line = line.replaceAll(pattern, "");
      }

      StringTokenizer tokenizer = new StringTokenizer(line);
      while (tokenizer.hasMoreTokens()) {
        word.set(tokenizer.nextToken());
        context.write(word, one);

      }

    }
  }

  public static class Reduce extends
      Reducer<Text, IntWritable, Text, IntWritable> {
    public void reduce(Text key, Iterator<IntWritable> values,
        Context context) throws IOException, InterruptedException {
      int sum = 0;
      while (values.hasNext()) {
        sum += values.next().get();
      }
      context.write(key, new IntWritable(sum));
    }
  }

  public int run(String[] args) throws Exception {

    Configuration conf = new Configuration();
    // 宣告job 取得conf 並設定名稱 Hadoop Hello World
    Job job = new Job(conf, "Hadoop Hello World");
    // 設定此運算的主程式
    job.setJarByClass(WordCountV020.class);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    job.setMapperClass(Map.class);
    job.setCombinerClass(Reduce.class);
    job.setReducerClass(Reduce.class);

    List<String> other_args = new ArrayList<String>();

    for (int i = 0; i < args.length; ++i) {
      if ("-skip".equals(args[i])) {
        DistributedCache
            .addCacheFile(new Path(args[++i]).toUri(), conf);
        conf.setBoolean("wordcount.skip.patterns", true);
      } else {
        other_args.add(args[i]);
      }
    }

    Path[] patternsFiles = null;
    if (conf.getBoolean("wordcount.skip.patterns", false)) {

      try {
        patternsFiles = DistributedCache.getLocalCacheFiles(conf);

      } catch (Exception ioe) {
        System.err
            .println("Caught exception while getting cached files: "
                + StringUtils.stringifyException(ioe));
      }
      if (patternsFiles != null) {

        for (Path patternsFile : patternsFiles) {
          System.err.println(patternsFile);
          parseSkipFile(patternsFile);
        }
      } else {
        System.err.print("no path");
      }

    }

    FileInputFormat.setInputPaths(job, new Path(other_args.get(0)));
    // 設定輸出路徑
    FileOutputFormat.setOutputPath(job, new Path(other_args.get(1)));

    // CheckAndDelete.checkAndDelete(other_args.get(1), conf);
    job.waitForCompletion(true);
    return 0;
  }

  public static void main(String[] args) throws Exception {
    String[] argv = { "-Dwordcount.case.sensitive=false",
        "/user/waue/input", "/user/waue/output-v020", "-skip",
        "/user/waue/patterns" };
    args = argv;
    int res = ToolRunner
        .run(new Configuration(), new WordCountV020(), args);
    System.exit(res);
  }
}
10/04/16 10:17:05 WARN conf.Configuration: DEPRECATED: hadoop-site.xml found in the classpath. Usage of hadoop-site.xml is deprecated. Instead use core-site.xml, mapred-site.xml and hdfs-site.xml to override properties of core-default.xml, mapred-default.xml and hdfs-default.xml respectively
no path10/04/16 10:17:05 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same.
10/04/16 10:17:06 INFO input.FileInputFormat: Total input paths to process : 4
10/04/16 10:17:06 INFO mapred.JobClient: Running job: job_201003231850_0008
10/04/16 10:17:07 INFO mapred.JobClient:  map 0% reduce 0%
10/04/16 10:17:14 INFO mapred.JobClient: Task Id : attempt_201003231850_0008_m_000005_0, Status : FAILED
java.io.IOException: Task process exit with nonzero status of 126.
	at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:418)

10/04/16 10:17:14 WARN mapred.JobClient: Error reading task outputhttp://vpro:50060/tasklog?plaintext=true&taskid=attempt_201003231850_0008_m_000005_0&filter=stdout
10/04/16 10:17:14 WARN mapred.JobClient: Error reading task outputhttp://vpro:50060/tasklog?plaintext=true&taskid=attempt_201003231850_0008_m_000005_0&filter=stderr
Last modified 15 years ago Last modified on Apr 16, 2010, 10:24:03 AM