| 1 | {{{ |
| 2 | #!html |
| 3 | <div style="text-align: center; color:#151B8D"><big style="font-weight: bold;"><big><big> |
| 4 | Hadoop 進階課程 |
| 5 | </big></big></big></div> <div style="text-align: center; color:#7E2217"><big style="font-weight: bold;"><big> |
| 6 | 範例練習 |
| 7 | </big></big></div> |
| 8 | }}} |
| 9 | |
| 10 | [wiki:NCHCCloudCourse100928_4_EXM3 上一關 < ] 第四關 [wiki:NCHCCloudCourse100928_4_EXM5 > 下一關] |
| 11 | |
| 12 | |
| 13 | {{{ |
| 14 | #!java |
| 15 | package org.nchc.hadoop; |
| 16 | import java.io.IOException; |
| 17 | import java.util.StringTokenizer; |
| 18 | |
| 19 | import org.apache.hadoop.conf.Configuration; |
| 20 | import org.apache.hadoop.fs.Path; |
| 21 | import org.apache.hadoop.io.IntWritable; |
| 22 | import org.apache.hadoop.io.Text; |
| 23 | import org.apache.hadoop.mapreduce.Job; |
| 24 | import org.apache.hadoop.mapreduce.Mapper; |
| 25 | import org.apache.hadoop.mapreduce.Reducer; |
| 26 | import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; |
| 27 | import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; |
| 28 | import org.apache.hadoop.util.GenericOptionsParser; |
| 29 | //WordCount |
| 30 | //說明: |
| 31 | // 用於字數統計 |
| 32 | // |
| 33 | //測試方法: |
| 34 | // 將此程式運作在hadoop 0.20 平台上,執行: |
| 35 | // --------------------------- |
| 36 | // hadoop jar WordCount.jar <input> <output> |
| 37 | // --------------------------- |
| 38 | // |
| 39 | //注意: |
| 40 | //1. 在hdfs 上來源檔案的路徑為 你所指定的 <input> |
| 41 | //請注意必須先放資料到此hdfs上的資料夾內,且此資料夾內只能放檔案,不可再放資料夾 |
| 42 | //2. 運算完後,程式將執行結果放在hdfs 的輸出路徑為 你所指定的 <output> |
| 43 | // |
| 44 | public class WordCount { |
| 45 | |
| 46 | public static class TokenizerMapper extends |
| 47 | Mapper<Object, Text, Text, IntWritable> { |
| 48 | |
| 49 | private final static IntWritable one = new IntWritable(1); |
| 50 | private Text word = new Text(); |
| 51 | |
| 52 | public void map(Object key, Text value, Context context) |
| 53 | throws IOException, InterruptedException { |
| 54 | StringTokenizer itr = new StringTokenizer(value.toString()); |
| 55 | while (itr.hasMoreTokens()) { |
| 56 | word.set(itr.nextToken()); |
| 57 | context.write(word, one); |
| 58 | } |
| 59 | } |
| 60 | } |
| 61 | |
| 62 | public static class IntSumReducer extends |
| 63 | Reducer<Text, IntWritable, Text, IntWritable> { |
| 64 | private IntWritable result = new IntWritable(); |
| 65 | |
| 66 | public void reduce(Text key, Iterable<IntWritable> values, |
| 67 | Context context) throws IOException, InterruptedException { |
| 68 | int sum = 0; |
| 69 | for (IntWritable val : values) { |
| 70 | sum += val.get(); |
| 71 | } |
| 72 | result.set(sum); |
| 73 | context.write(key, result); |
| 74 | } |
| 75 | } |
| 76 | |
| 77 | public static void main(String[] args) throws Exception { |
| 78 | // debug using |
| 79 | // String[] argv = { "/user/hadooper/input", "/user/hadooper/output-wc" }; |
| 80 | // args = argv; |
| 81 | |
| 82 | Configuration conf = new Configuration(); |
| 83 | |
| 84 | String[] otherArgs = new GenericOptionsParser(conf, args) |
| 85 | .getRemainingArgs(); |
| 86 | if (otherArgs.length != 2) { |
| 87 | System.err |
| 88 | .println("Usage: hadoop jar WordCount.jar <input> <output>"); |
| 89 | System.exit(2); |
| 90 | } |
| 91 | |
| 92 | Job job = new Job(conf, "Word Count"); |
| 93 | job.setJarByClass(WordCount.class); |
| 94 | job.setMapperClass(TokenizerMapper.class); |
| 95 | job.setCombinerClass(IntSumReducer.class); |
| 96 | job.setReducerClass(IntSumReducer.class); |
| 97 | job.setOutputKeyClass(Text.class); |
| 98 | job.setOutputValueClass(IntWritable.class); |
| 99 | FileInputFormat.addInputPath(job, new Path(args[0])); |
| 100 | FileOutputFormat.setOutputPath(job, new Path(args[1])); |
| 101 | CheckAndDelete.checkAndDelete(args[1], conf); |
| 102 | System.exit(job.waitForCompletion(true) ? 0 : 1); |
| 103 | } |
| 104 | } |
| 105 | |
| 106 | }}} |
| 107 | |
| 108 | |