- Timestamp:
- Jul 3, 2008, 4:45:54 PM (16 years ago)
- Location:
- sample/hadoop-0.16
- Files:
-
- 29 edited
Legend:
- Unmodified
- Added
- Removed
-
sample/hadoop-0.16/WordCount2.java
r24 r25 14 14 import org.apache.hadoop.io.LongWritable; 15 15 import org.apache.hadoop.io.Text; 16 import org.apache.hadoop.mapred.FileInputFormat;17 import org.apache.hadoop.mapred.FileOutputFormat;18 16 import org.apache.hadoop.mapred.JobClient; 19 17 import org.apache.hadoop.mapred.JobConf; … … 131 129 conf.setNumReduceTasks(reduceTasks); 132 130 133 //conf.setInputPath(new Path(filename));134 FileInputFormat.setInputPaths(conf,new Path(filename)); 131 conf.setInputPath(new Path(filename)); 132 135 133 conf.setOutputKeyClass(Text.class); 136 134 conf.setOutputValueClass(IntWritable.class); 137 135 138 //conf.setOutputPath(new Path(outputPath));139 FileOutputFormat.setOutputPath( conf, new Path(filename)); 136 conf.setOutputPath(new Path(outputPath)); 137 140 138 141 139 … … 146 144 // Delete the output directory if it exists already 147 145 Path outputDir = new Path(outputPath); 148 FileSystem.get(conf).delete(outputDir ,true);146 FileSystem.get(conf).delete(outputDir); 149 147 JobClient.runJob(conf); 150 148 } -
sample/hadoop-0.16/test.java
r24 r25 8 8 import java.io.RandomAccessFile; 9 9 import java.util.StringTokenizer; 10 11 10 import org.apache.hadoop.fs.FileStatus; 12 11 import org.apache.hadoop.fs.FileSystem; … … 156 155 boolean b = admin.tableExists(text_table_name); 157 156 System.out.println(b); 158 /* 157 159 158 if (!admin.tableExists(text_table_name)) { 160 159 … … 177 176 System.out.println("table exist!"); 178 177 } 179 */ 178 180 179 181 180 } -
sample/hadoop-0.16/tw/org/nchc/code/HBaseRecordPro.java
r23 r25 228 228 conf.setNumMapTasks(mapTasks); 229 229 conf.setNumReduceTasks(reduceTasks); 230 // 0.16231 //conf.setInputPath(text_path);232 Convert.setInputPath(conf, text_path); 230 231 conf.setInputPath(text_path); 232 233 233 234 234 conf.setMapperClass(IdentityMapper.class); … … 240 240 // delete tmp file 241 241 // 0.16 242 // FileSystem.get(conf).delete(text_path); 243 FileSystem.get(conf).delete(text_path,true); 242 FileSystem.get(conf).delete(text_path); 244 243 245 244 setup.deleteFile(conf_tmp); -
sample/hadoop-0.16/tw/org/nchc/code/WordCount.java
r23 r25 110 110 conf.setNumMapTasks(mapTasks); 111 111 conf.setNumReduceTasks(reduceTasks); 112 // 0.16113 //conf.setInputPath(new Path(wc.filepath));114 Convert.setInputPath(conf, new Path(wc.filepath)); 112 113 conf.setInputPath(new Path(wc.filepath)); 114 115 115 conf.setOutputKeyClass(Text.class); 116 116 conf.setOutputValueClass(IntWritable.class); 117 // 0.16 118 // conf.setOutputPath(new Path(wc.outputPath)); 119 Convert.setOutputPath(conf, new Path(wc.outputPath)); 117 118 conf.setOutputPath(new Path(wc.outputPath)); 120 119 121 120 conf.setMapperClass(MapClass.class); … … 125 124 // Delete the output directory if it exists already 126 125 Path outputDir = new Path(wc.outputPath); 127 // 0.16 128 FileSystem.get(conf).delete(outputDir,true); 129 126 FileSystem.get(conf).delete(outputDir); 130 127 JobClient.runJob(conf); 131 128 } -
sample/hadoop-0.16/tw/org/nchc/code/WordCountFromHBase.java
r23 r25 168 168 // input is Hbase format => TableInputFormat 169 169 conf.setInputFormat(TableInputFormat.class); 170 // 0.16 171 // conf.setOutputPath(new Path(outputPath)); 172 Convert.setOutputPath(conf, new Path(outputPath)); 170 conf.setOutputPath(new Path(outputPath)); 173 171 // delete the old path with the same name 174 FileSystem.get(conf).delete(new Path(outputPath) ,true);172 FileSystem.get(conf).delete(new Path(outputPath)); 175 173 JobClient.runJob(conf); 176 174 } -
sample/hadoop-0.16/tw/org/nchc/demo/DemoWordCount.java
r21 r25 28 28 import org.apache.hadoop.mapred.Reducer; 29 29 import org.apache.hadoop.mapred.Reporter; 30 31 import tw.org.nchc.code.Convert;32 30 33 31 /** … … 107 105 conf.setNumMapTasks(mapTasks); 108 106 conf.setNumReduceTasks(reduceTasks); 109 //0.16 110 //conf.setInputPath(new Path(filename));111 Convert.setInputPath(conf, new Path(filename)); 107 108 conf.setInputPath(new Path(filename)); 109 112 110 conf.setOutputKeyClass(Text.class); 113 111 conf.setOutputValueClass(IntWritable.class); 114 // 0.16 115 // conf.setOutputPath(new Path(outputPath)); 116 Convert.setInputPath(conf, new Path(outputPath)); 112 113 conf.setOutputPath(new Path(outputPath)); 117 114 conf.setMapperClass(MapClass.class); 118 115 conf.setCombinerClass(ReduceClass.class); … … 121 118 // Delete the output directory if it exists already 122 119 Path outputDir = new Path(outputPath); 123 // 0.16 124 // FileSystem.get(conf).delete(outputDir); 125 FileSystem.get(conf).delete(outputDir,true); 120 FileSystem.get(conf).delete(outputDir); 126 121 JobClient.runJob(conf); 127 122 } -
sample/hadoop-0.16/tw/org/nchc/demo/LogFetcher.java
r21 r25 23 23 import java.text.ParseException; 24 24 25 import org.apache.hadoop.fs.FileStatus; 25 26 import org.apache.hadoop.fs.FileSystem; 26 27 import org.apache.hadoop.fs.Path; … … 40 41 import org.apache.hadoop.mapred.OutputCollector; 41 42 import org.apache.hadoop.mapred.Reporter; 42 43 import tw.org.nchc.code.Convert;44 43 45 44 /** … … 96 95 } 97 96 } 98 97 static public Path[] listPaths(FileSystem fsm,Path path) throws IOException 98 { 99 FileStatus[] fss = fsm.listStatus(path); 100 int length = fss.length; 101 Path[] pi = new Path[length]; 102 for (int i=0 ; i< length; i++) 103 { 104 pi[i] = fss[i].getPath(); 105 } 106 return pi; 107 } 99 108 public static void runMapReduce(String table, String dir) 100 109 throws IOException { … … 106 115 jobConf.set(TABLE, table); 107 116 // my convert function from 0.16 to 0.17 108 Path[] in = Convert.listPaths(fs, InputDir);117 Path[] in = listPaths(fs, InputDir); 109 118 if (fs.isFile(InputDir)) { 110 // 0.16 111 // jobConf.setInputPath(InputDir); 112 Convert.setInputPath(jobConf, InputDir); 119 jobConf.setInputPath(InputDir); 113 120 } else { 114 121 for (int i = 0; i < in.length; i++) { 115 122 if (fs.isFile(in[i])) { 116 // 0.16 117 // jobConf.addInputPath(in[i]); 118 Convert.addInputPath(jobConf,in[i]); 123 jobConf.addInputPath(in[i]); 119 124 } else { 120 125 // my convert function from 0.16 to 0.17 121 Path[] sub = Convert.listPaths(fs, in[i]);126 Path[] sub = listPaths(fs, in[i]); 122 127 for (int j = 0; j < sub.length; j++) { 123 128 if (fs.isFile(sub[j])) { 124 // 0.16 125 // jobConf.addInputPath(sub[j]); 126 Convert.addInputPath(jobConf, sub[j]); 129 jobConf.addInputPath(sub[j]); 127 130 } 128 131 } … … 130 133 } 131 134 } 132 // 0.16 133 // jobConf.setOutputPath(tempDir); 134 Convert.setOutputPath(jobConf, tempDir); 135 jobConf.setOutputPath(tempDir); 135 136 136 137 jobConf.setMapperClass(MapClass.class); … … 142 143 143 144 JobClient.runJob(jobConf); 144 // 0.16 145 // fs.delete(tempDir); 146 fs.delete(tempDir,true); 147 145 146 fs.delete(tempDir); 148 147 fs.close(); 149 148 } -
sample/hadoop-0.16/tw/org/nchc/util/SequenceFileProcessor.java
r21 r25 1 /** 2 * Program: BuildHTable.java 3 * Editor: Waue Chen 4 * From : NCHC. Taiwn 5 * Last Update Date: 07/02/2008 6 * Upgrade to 0.17 7 * Re-code from : Cloud9: A MapReduce Library for Hadoop 1 /* 2 * Cloud9: A MapReduce Library for Hadoop 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); you 5 * may not use this file except in compliance with the License. You may 6 * obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 13 * implied. See the License for the specific language governing 14 * permissions and limitations under the License. 8 15 */ 9 10 16 11 17 package tw.org.nchc.util; … … 13 19 import java.io.IOException; 14 20 15 import org.apache.hadoop.fs.FileStatus;16 21 import org.apache.hadoop.fs.FileSystem; 17 22 import org.apache.hadoop.fs.Path; … … 22 27 23 28 /** 24 * Upgrade from hadoop 0.16 to 0.1725 29 * <p> 26 30 * Harness for processing one or more {@link SequenceFile}s within a single … … 115 119 private void run() throws IOException { 116 120 if (!FileSystem.get(conf).isFile(mPath)) { 117 Path[] pa = new Path[] { mPath }; 118 Path p; 119 // hadoop 0.17 -> listStatus(); 120 FileStatus[] fi = FileSystem.get(conf).listStatus(pa); 121 for (int i =0 ; i<fi.length ; i++) { 122 p = fi[i].getPath(); 121 for (Path p : FileSystem.get(conf).listPaths(new Path[] { mPath })) { 123 122 // System.out.println("Applying to " + p); 124 123 applyToFile(p);
Note: See TracChangeset
for help on using the changeset viewer.