- Timestamp:
- Jul 2, 2008, 3:10:09 PM (16 years ago)
- Location:
- sample
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
sample/HBaseRecord.java
r9 r18 3 3 * Editor: Waue Chen 4 4 * From : NCHC. Taiwn 5 * Last Update Date: 06/01/2008 5 * Last Update Date: 07/02/2008 6 * Upgrade to 0.17 6 7 */ 7 8 … … 13 14 * Make sure Hadoop file system and Hbase are running correctly. 14 15 * 1. put test.txt in t1 directory which content is 15 16 17 18 19 16 --------------- 17 name:locate:years 18 waue:taiwan:1981 19 shellon:taiwan:1981 20 --------------- 20 21 * 2. hadoop_root/$ bin/hadoop dfs -put t1 t1 21 22 * 3. hbase_root/$ bin/hbase shell 22 23 * 4. hql > create table t1_table("person"); 23 24 * 5. Come to Eclipse and run this code, and we will let database as that 24 25 26 27 28 29 25 t1_table -> person 26 ---------------- 27 | name | locate | years | 28 | waue | taiwan | 1981 | 29 | shellon | taiwan | 1981 | 30 ---------------- 30 31 * Check Result: 31 32 * Go to hbase console, type : 32 33 * hql > select * from t1_table; 33 08/06/06 12:20:48 INFO hbase.HTable: Creating scanner over t1_table starting at key34 +-------------------------+-------------------------+-------------------------+35 | Row | Column | Cell |36 +-------------------------+-------------------------+-------------------------+37 | 0 | person:locate | locate |38 +-------------------------+-------------------------+-------------------------+39 | 0 | person:name | name |40 +-------------------------+-------------------------+-------------------------+41 | 0 | person:years | years |42 +-------------------------+-------------------------+-------------------------+43 | 19 | person:locate | taiwan |44 +-------------------------+-------------------------+-------------------------+45 | 19 | person:name | waue |46 +-------------------------+-------------------------+-------------------------+47 | 19 | person:years | 1981 |48 +-------------------------+-------------------------+-------------------------+49 | 36 | person:locate | taiwan |50 +-------------------------+-------------------------+-------------------------+51 | 36 | person:name | shellon |52 +-------------------------+-------------------------+-------------------------+53 | 36 | person:years | 1981 |54 +-------------------------+-------------------------+-------------------------+55 3 row(s) in set. (0.04 sec)34 08/06/06 12:20:48 INFO hbase.HTable: Creating scanner over t1_table starting at key 35 +-------------------------+-------------------------+-------------------------+ 36 | Row | Column | Cell | 37 +-------------------------+-------------------------+-------------------------+ 38 | 0 | person:locate | locate | 39 +-------------------------+-------------------------+-------------------------+ 40 | 0 | person:name | name | 41 +-------------------------+-------------------------+-------------------------+ 42 | 0 | person:years | years | 43 +-------------------------+-------------------------+-------------------------+ 44 | 19 | person:locate | taiwan | 45 +-------------------------+-------------------------+-------------------------+ 46 | 19 | person:name | waue | 47 +-------------------------+-------------------------+-------------------------+ 48 | 19 | person:years | 1981 | 49 +-------------------------+-------------------------+-------------------------+ 50 | 36 | person:locate | taiwan | 51 +-------------------------+-------------------------+-------------------------+ 52 | 36 | person:name | shellon | 53 +-------------------------+-------------------------+-------------------------+ 54 | 36 | person:years | 1981 | 55 +-------------------------+-------------------------+-------------------------+ 56 3 row(s) in set. (0.04 sec) 56 57 */ 57 58 59 60 58 61 59 package tw.org.nchc.code; … … 77 75 import org.apache.hadoop.mapred.lib.IdentityReducer; 78 76 79 80 77 public class HBaseRecord { 81 78 82 79 /* Denify parameter */ 83 80 // one column family: person; three column qualifier: name,locate,years 84 static private String baseId1 ="person:name"; 85 static private String baseId2 ="person:locate"; 86 static private String baseId3 ="person:years"; 87 //split character 81 static private String baseId1 = "person:name"; 82 83 static private String baseId2 = "person:locate"; 84 85 static private String baseId3 = "person:years"; 86 87 // split character 88 88 static private String sp = ":"; 89 89 90 // file path in hadoop file system (not phisical file system) 90 91 String file_path = "/user/waue/t1"; 92 91 93 // Hbase table name 92 94 String table_name = "t1_table"; 95 93 96 // setup MapTask and Reduce Task 94 97 int mapTasks = 1; 98 95 99 int reduceTasks = 1; 96 100 97 101 private static class ReduceClass extends TableReduce<LongWritable, Text> { 98 102 99 // Column id is created dymanically, 103 // Column id is created dymanically, 100 104 private static final Text col_name = new Text(baseId1); 105 101 106 private static final Text col_local = new Text(baseId2); 107 102 108 private static final Text col_year = new Text(baseId3); 103 109 104 110 // this map holds the columns per row 105 private MapWritable map = new MapWritable(); 106 111 private MapWritable map = new MapWritable(); 112 107 113 // on this sample, map is nonuse, we use reduce to handle 108 114 public void reduce(LongWritable key, Iterator<Text> values, … … 110 116 throws IOException { 111 117 112 // values.next().getByte() can get value and transfer to byte form, there is an other way that let decode() 113 // to substitude getByte() 118 // values.next().getByte() can get value and transfer to byte form, 119 // there is an other way that let decode() 120 // to substitude getByte() 114 121 String stro = new String(values.next().getBytes()); 115 122 String str[] = stro.split(sp); … … 117 124 byte b_name[] = str[1].getBytes(); 118 125 byte b_year[] = str[2].getBytes(); 119 126 120 127 // contents must be ImmutableBytesWritable 121 ImmutableBytesWritable w_local = new ImmutableBytesWritable( 122 ImmutableBytesWritable w_name = new ImmutableBytesWritable( b_name);123 ImmutableBytesWritable w_year = new ImmutableBytesWritable( b_year);128 ImmutableBytesWritable w_local = new ImmutableBytesWritable(b_local); 129 ImmutableBytesWritable w_name = new ImmutableBytesWritable(b_name); 130 ImmutableBytesWritable w_year = new ImmutableBytesWritable(b_year); 124 131 125 132 // populate the current row … … 141 148 */ 142 149 public static void main(String[] args) throws IOException { 143 // which path of input files in Hadoop file system 144 150 // which path of input files in Hadoop file system 151 145 152 HBaseRecord setup = new HBaseRecord(); 146 153 JobConf conf = new JobConf(HBaseRecord.class); 147 154 148 // Job name; you can modify to any you like155 // Job name; you can modify to any you like 149 156 conf.setJobName("NCHC_PersonDataBase"); 150 157 151 158 // Hbase table name must be correct , in our profile is t1_table 152 159 TableReduce.initJob(setup.table_name, ReduceClass.class, conf); 153 160 154 161 // below are map-reduce profile 155 162 conf.setNumMapTasks(setup.mapTasks); 156 163 conf.setNumReduceTasks(setup.reduceTasks); 157 conf.setInputPath(new Path(setup.file_path)); 164 165 // 0.16 166 // conf.setInputPath(new Path(setup.file_path)); 167 Convert.setInputPath(conf, new Path(setup.file_path)); 168 158 169 conf.setMapperClass(IdentityMapper.class); 159 170 conf.setCombinerClass(IdentityReducer.class); -
sample/HBaseRecord2.java
r14 r18 3 3 * Editor: Waue Chen 4 4 * From : NCHC. Taiwn 5 * Last Update Date: 06/01/2008 5 * Last Update Date: 07/01/2008 6 * Upgrade to 0.17 6 7 */ 7 8 … … 53 54 package tw.org.nchc.code; 54 55 55 import java.io.FileInputStream;56 56 import java.io.IOException; 57 57 import java.util.Iterator; … … 140 140 + "\" has already existed !"); 141 141 } 142 FileInputStream fi = new FileInputStream(setup.file_path);143 144 142 145 143 JobConf conf = new JobConf(HBaseRecord2.class); … … 154 152 conf.setNumMapTasks(setup.mapTasks); 155 153 conf.setNumReduceTasks(setup.reduceTasks); 156 conf.setInputPath(new Path(setup.file_path)); 154 // 0.16 155 // conf.setInputPath(new Path(setup.file_path)); 156 Convert.setInputPath(conf, new Path(setup.file_path)); 157 157 conf.setMapperClass(IdentityMapper.class); 158 158 conf.setCombinerClass(IdentityReducer.class); -
sample/HBaseRecordPro.java
r17 r18 1 1 /** 2 * Program: HBaseRecord .java2 * Program: HBaseRecordPro.java 3 3 * Editor: Waue Chen 4 4 * From : NCHC. Taiwn 5 * Last Update Date: 06/01/2008 5 * Last Update Date: 07/02/2008 6 * Upgrade to 0.17 6 7 */ 7 8 … … 227 228 conf.setNumMapTasks(mapTasks); 228 229 conf.setNumReduceTasks(reduceTasks); 229 conf.setInputPath(text_path); 230 // 0.16 231 // conf.setInputPath(text_path); 232 Convert.setInputPath(conf, text_path); 233 230 234 conf.setMapperClass(IdentityMapper.class); 231 235 conf.setCombinerClass(IdentityReducer.class); … … 235 239 236 240 // delete tmp file 237 FileSystem.get(conf).delete(text_path); 241 // 0.16 242 // FileSystem.get(conf).delete(text_path); 243 FileSystem.get(conf).delete(text_path,true); 244 238 245 setup.deleteFile(conf_tmp); 239 246 } -
sample/WordCount.java
r9 r18 3 3 * Editor: Waue Chen 4 4 * From : NCHC. Taiwn 5 * Last Update Date: 06/13/2008 5 * Last Update Date: 07/02/2008 6 * Upgrade to 0.17 6 7 */ 7 8 … … 40 41 import org.apache.hadoop.mapred.Reporter; 41 42 42 43 43 public class WordCount { 44 44 private String filepath; 45 45 46 private String outputPath; 46 47 public WordCount() {47 48 public WordCount() { 48 49 filepath = "/user/waue/input/"; 49 50 outputPath = "counts1"; 50 51 } 51 public WordCount(String path,String output){ 52 53 public WordCount(String path, String output) { 52 54 filepath = path; 53 55 outputPath = output; 54 56 } 57 55 58 // mapper: emits (token, 1) for every word occurrence 56 59 private static class MapClass extends MapReduceBase implements … … 59 62 // reuse objects to save overhead of object creation 60 63 private final static IntWritable one = new IntWritable(1); 64 61 65 private Text word = new Text(); 62 66 … … 93 97 } 94 98 95 96 99 /** 97 100 * Runs the demo. … … 99 102 public static void main(String[] args) throws IOException { 100 103 WordCount wc = new WordCount(); 101 104 102 105 int mapTasks = 1; 103 106 int reduceTasks = 1; … … 107 110 conf.setNumMapTasks(mapTasks); 108 111 conf.setNumReduceTasks(reduceTasks); 109 110 conf.setInputPath(new Path(wc.filepath)); 112 // 0.16 113 // conf.setInputPath(new Path(wc.filepath)); 114 Convert.setInputPath(conf, new Path(wc.filepath)); 111 115 conf.setOutputKeyClass(Text.class); 112 116 conf.setOutputValueClass(IntWritable.class); 113 conf.setOutputPath(new Path(wc.outputPath)); 117 // 0.16 118 // conf.setOutputPath(new Path(wc.outputPath)); 119 Convert.setOutputPath(conf, new Path(wc.outputPath)); 114 120 115 121 conf.setMapperClass(MapClass.class); 116 122 conf.setCombinerClass(ReduceClass.class); 117 123 conf.setReducerClass(ReduceClass.class); 118 124 119 125 // Delete the output directory if it exists already 120 126 Path outputDir = new Path(wc.outputPath); 121 FileSystem.get(conf).delete(outputDir); 127 // 0.16 128 FileSystem.get(conf).delete(outputDir,true); 122 129 123 130 JobClient.runJob(conf); -
sample/WordCountFromHBase.java
r9 r18 3 3 * Editor: Waue Chen 4 4 * From : NCHC. Taiwn 5 * Last Update Date: 06/13/2008 5 * Last Update Date: 07/02/2008 6 * Upgrade to 0.17 6 7 */ 7 8 … … 25 26 import java.util.Iterator; 26 27 import java.util.StringTokenizer; 27 import java.io.FileOutputStream; 28 import java.io.File; 29 import java.io.RandomAccessFile; 28 30 29 import org.apache.hadoop.fs.FileSystem; 31 30 import org.apache.hadoop.fs.Path; … … 169 168 // input is Hbase format => TableInputFormat 170 169 conf.setInputFormat(TableInputFormat.class); 171 conf.setOutputPath(new Path(outputPath)); 170 // 0.16 171 // conf.setOutputPath(new Path(outputPath)); 172 Convert.setOutputPath(conf, new Path(outputPath)); 172 173 // delete the old path with the same name 173 FileSystem.get(conf).delete(new Path(outputPath) );174 FileSystem.get(conf).delete(new Path(outputPath),true); 174 175 JobClient.runJob(conf); 175 176 } -
sample/WordCountIntoHBase.java
r8 r18 3 3 * Editor: Waue Chen 4 4 * From : NCHC. Taiwn 5 * Last Update Date: 06/10/2008 5 * Last Update Date: 07/02/2008 6 * Upgrade to 0.17 6 7 */ 7 8 … … 43 44 44 45 /* setup parameters */ 45 // $Input_Path. Please make sure the path is correct and contains input files 46 // $Input_Path. Please make sure the path is correct and contains input 47 // files 46 48 static final String Input_Path = "/user/waue/simple"; 49 47 50 // Hbase table name, the program will create it 48 51 static final String Table_Name = "word_count5"; 52 49 53 // column name, the program will create it 50 static final String colstr = "word:text" 51 54 static final String colstr = "word:text"; 55 52 56 // constructor 53 57 private WordCountIntoHBase() { … … 57 61 // set (column_family:column_qualify) 58 62 private static final Text col = new Text(WordCountIntoHBase.colstr); 63 59 64 // this map holds the columns per row 60 65 private MapWritable map = new MapWritable(); 66 61 67 public void reduce(LongWritable key, Iterator<Text> values, 62 68 OutputCollector<Text, MapWritable> output, Reporter reporter) 63 69 throws IOException { 64 70 // contents must be ImmutableBytesWritable 65 ImmutableBytesWritable bytes = 66 new ImmutableBytesWritable(values.next().getBytes());71 ImmutableBytesWritable bytes = new ImmutableBytesWritable(values 72 .next().getBytes()); 67 73 map.clear(); 68 // write data 74 // write data 69 75 map.put(col, bytes); 70 76 // add the row with the key as the row id … … 76 82 * Runs the demo. 77 83 */ 78 public static void main(String[] args) throws IOException { 84 public static void main(String[] args) throws IOException { 79 85 // parse colstr to split column family and column qualify 80 86 String tmp[] = colstr.split(":"); 81 String Column_Family = tmp[0] +":";82 String CF[] = { Column_Family};83 // check whether create table or not , we don't admit \ 87 String Column_Family = tmp[0] + ":"; 88 String CF[] = { Column_Family }; 89 // check whether create table or not , we don't admit \ 84 90 // the same name but different structure 85 BuildHTable build_table = new BuildHTable(Table_Name, CF);91 BuildHTable build_table = new BuildHTable(Table_Name, CF); 86 92 if (!build_table.checkTableExist(Table_Name)) { 87 93 if (!build_table.createTable()) { 88 94 System.out.println("create table error !"); 89 95 } 90 }else{ 91 System.out.println("Table \"" + Table_Name +"\" has already existed !"); 96 } else { 97 System.out.println("Table \"" + Table_Name 98 + "\" has already existed !"); 92 99 } 93 100 int mapTasks = 1; … … 100 107 conf.setNumMapTasks(mapTasks); 101 108 conf.setNumReduceTasks(reduceTasks); 102 conf.setInputPath(new Path(Input_Path)); 109 // 0.16 110 // conf.setInputPath(new Path(Input_Path)); 111 Convert.setInputPath(conf, new Path(Input_Path)); 103 112 conf.setMapperClass(IdentityMapper.class); 104 113 conf.setCombinerClass(IdentityReducer.class);
Note: See TracChangeset
for help on using the changeset viewer.