import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class WordCount2for020 extends Configured implements Tool {
// 問題一: configuration 所設定的參數值沒有傳遞到map裡
// 問題二: inputFile 也沒用到
// 問題三:由於configuration 的問題,以致 DistributedCache 也無法取得正確的LocalCacheFiles
public static class Map extends
Mapper<LongWritable, Text, Text, IntWritable> {
static enum Counters {
INPUT_WORDS
}
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
private boolean caseSensitive = true;
private Set<String> patternsToSkip = new HashSet<String>();
private long numRecords = 0;
private String inputFile;
public void setup(Context context) {
System.err.println("yes yes yes !!! setup work");
Configuration conf = context.getConfiguration();
caseSensitive = conf.getBoolean("wordcount.case.sensitive", false);
// inputFile = conf.get("map.input.file","");
// inputFile = "/user/waue/patterns";
// 此處 inputFile 之後就在也沒用到,因此在這裡處理一下
boolean wsp = conf.getBoolean("wordcount.skip.patterns", true);
System.err.println("caseSensitive = " + caseSensitive);
System.err.println("wordcount.skip.patterns = " + wsp);
if (wsp) {
Path[] patternsFiles = new Path[0];
try {
patternsFiles = DistributedCache.getLocalCacheFiles(conf);
} catch (IOException ioe) {
System.err
.println("Caught exception while getting cached files: "
+ StringUtils.stringifyException(ioe));
}
for (Path patternsFile : patternsFiles) {
parseSkipFile(patternsFile);
System.err.println("parseSkipFile = " + patternsFile);
}
}
}
private void parseSkipFile(Path patternsFile) {
try {
BufferedReader fis = new BufferedReader(new FileReader(
patternsFile.toString()));
String pattern = null;
while ((pattern = fis.readLine()) != null) {
patternsToSkip.add(pattern);
}
} catch (IOException ioe) {
System.err
.println("Caught exception while parsing the cached file '"
+ patternsFile
+ "' : "
+ StringUtils.stringifyException(ioe));
}
}
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String line = (caseSensitive) ? value.toString() : value.toString()
.toLowerCase();
for (String pattern : patternsToSkip) {
line = line.replaceAll(pattern, "");
}
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens()) {
word.set(tokenizer.nextToken());
context.write(word, one);
Counter count = context.getCounter(Counters.INPUT_WORDS);
count.increment(1);
}
if ((++numRecords % 100) == 0) {
context.setStatus("Finished processing " + numRecords
+ " records " + "from the input file: " + inputFile);
}
}
}
public static class Reduce extends
Reducer<Text, IntWritable, Text, IntWritable> {
/* on V0.20, we should use Iterable to replace Iterator */
// public void reduce(Text key, Iterator<IntWritable> values,
public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
/* these three line only be used on V0.19 nor V0.20 */
// while (values.hasNext()) {
// sum += values.next().get();
// }
for (IntWritable val : values) {
sum += val.get();
}
context.write(key, new IntWritable(sum));
}
}
public int run(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = new Job(conf);
job.setJarByClass(WordCount2for020.class);
job.setJobName("wordcount");
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(Map.class);
job.setCombinerClass(Reduce.class);
job.setReducerClass(Reduce.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
List<String> other_args = new ArrayList<String>();
for (int i = 0; i < args.length; ++i) {
if ("-skip".equals(args[i])) {
DistributedCache
.addCacheFile(new Path(args[++i]).toUri(), conf);
System.err.println("cache file = " + args[i]);
conf.setBoolean("wordcount.skip.patterns", true);
System.err.println("wordcount.skip.patterns = true");
} else {
other_args.add(args[i]);
}
}
// conf.set("mapred.job.tracker", "local");
// conf.set("fs.default.name", "file:///");
FileInputFormat.setInputPaths(job, new Path(other_args.get(0)));
FileOutputFormat.setOutputPath(job, new Path(other_args.get(1)));
job.waitForCompletion(true);
return 0;
}
public static void main(String[] args) throws Exception {
String[] argv = { "-Dwordcount.case.sensitive=false",
"/user/waue/text_input", "/user/waue/output-v020", "-skip",
"/user/waue/patterns/patterns.txt" };
args = argv;
int res = ToolRunner.run(new Configuration(), new WordCount2for020(),
args);
System.exit(res);
}
}
問題
- -skip 參數、以及 case.sensitive 的參數無用
問題研判
// 問題一: configuration 所設定的參數值沒有傳遞到map裡
// 問題二: inputFile 也沒用到
// 問題三:由於configuration 的問題,以致 DistributedCache 也無法取得正確的LocalCacheFiles
10/04/19 16:02:58 WARN conf.Configuration: DEPRECATED: hadoop-site.xml found in the classpath. Usage of hadoop-site.xml is deprecated. Instead use core-site.xml, mapred-site.xml and hdfs-site.xml to override properties of core-default.xml, mapred-default.xml and hdfs-default.xml respectively
cache file = /user/waue/patterns/patterns.txt
wordcount.skip.patterns = true
10/04/19 16:02:59 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same.
10/04/19 16:02:59 INFO input.FileInputFormat: Total input paths to process : 4
10/04/19 16:02:59 INFO mapred.JobClient: Running job: job_201004190849_0011
10/04/19 16:03:00 INFO mapred.JobClient: map 0% reduce 0%
10/04/19 16:03:10 INFO mapred.JobClient: Task Id : attempt_201004190849_0011_m_000000_0, Status : FAILED
java.lang.NullPointerException
at WordCount2for020$Map.setup(WordCount2for020.java:79)
at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:142)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:583)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:305)
at org.apache.hadoop.mapred.Child.main(Child.java:170)