1 | /** |
---|
2 | * Program: HBaseRecord.java |
---|
3 | * Editor: Waue Chen |
---|
4 | * From : NCHC. Taiwn |
---|
5 | * Last Update Date: 06/01/2008 |
---|
6 | */ |
---|
7 | |
---|
8 | /** |
---|
9 | * Purpose : |
---|
10 | * Parse your record and then store in HBase. |
---|
11 | * |
---|
12 | * HowToUse : |
---|
13 | * Make sure Hadoop file system and Hbase are running correctly. |
---|
14 | * 1. put test.txt in t1 directory which content is |
---|
15 | --------------- |
---|
16 | name:locate:years |
---|
17 | waue:taiwan:1981 |
---|
18 | shellon:taiwan:1981 |
---|
19 | --------------- |
---|
20 | * 2. hadoop_root/$ bin/hadoop dfs -put t1 t1 |
---|
21 | * 3. hbase_root/$ bin/hbase shell |
---|
22 | * 4. hql > create table t1_table("person"); |
---|
23 | * 5. Come to Eclipse and run this code, and we will let database as that |
---|
24 | t1_table -> person |
---|
25 | ---------------- |
---|
26 | | name | locate | years | |
---|
27 | | waue | taiwan | 1981 | |
---|
28 | | shellon | taiwan | 1981 | |
---|
29 | ---------------- |
---|
30 | * Check Result: |
---|
31 | * Go to hbase console, type : |
---|
32 | * hql > select * from t1_table; |
---|
33 | 08/06/06 12:20:48 INFO hbase.HTable: Creating scanner over t1_table starting at key |
---|
34 | +-------------------------+-------------------------+-------------------------+ |
---|
35 | | Row | Column | Cell | |
---|
36 | +-------------------------+-------------------------+-------------------------+ |
---|
37 | | 0 | person:locate | locate | |
---|
38 | +-------------------------+-------------------------+-------------------------+ |
---|
39 | | 0 | person:name | name | |
---|
40 | +-------------------------+-------------------------+-------------------------+ |
---|
41 | | 0 | person:years | years | |
---|
42 | +-------------------------+-------------------------+-------------------------+ |
---|
43 | | 19 | person:locate | taiwan | |
---|
44 | +-------------------------+-------------------------+-------------------------+ |
---|
45 | | 19 | person:name | waue | |
---|
46 | +-------------------------+-------------------------+-------------------------+ |
---|
47 | | 19 | person:years | 1981 | |
---|
48 | +-------------------------+-------------------------+-------------------------+ |
---|
49 | | 36 | person:locate | taiwan | |
---|
50 | +-------------------------+-------------------------+-------------------------+ |
---|
51 | | 36 | person:name | shellon | |
---|
52 | +-------------------------+-------------------------+-------------------------+ |
---|
53 | | 36 | person:years | 1981 | |
---|
54 | +-------------------------+-------------------------+-------------------------+ |
---|
55 | 3 row(s) in set. (0.04 sec) |
---|
56 | */ |
---|
57 | |
---|
58 | |
---|
59 | |
---|
60 | |
---|
61 | package tw.org.nchc.code; |
---|
62 | |
---|
63 | import java.io.IOException; |
---|
64 | import java.util.Iterator; |
---|
65 | |
---|
66 | import org.apache.hadoop.fs.Path; |
---|
67 | import org.apache.hadoop.hbase.io.ImmutableBytesWritable; |
---|
68 | import org.apache.hadoop.hbase.mapred.TableReduce; |
---|
69 | import org.apache.hadoop.io.LongWritable; |
---|
70 | import org.apache.hadoop.io.MapWritable; |
---|
71 | import org.apache.hadoop.io.Text; |
---|
72 | import org.apache.hadoop.mapred.JobClient; |
---|
73 | import org.apache.hadoop.mapred.JobConf; |
---|
74 | import org.apache.hadoop.mapred.OutputCollector; |
---|
75 | import org.apache.hadoop.mapred.Reporter; |
---|
76 | import org.apache.hadoop.mapred.lib.IdentityMapper; |
---|
77 | import org.apache.hadoop.mapred.lib.IdentityReducer; |
---|
78 | |
---|
79 | |
---|
80 | public class HBaseRecord { |
---|
81 | |
---|
82 | /* Denify parameter */ |
---|
83 | // one column family: person; three column qualifier: name,locate,years |
---|
84 | static private String baseId1 ="person:name"; |
---|
85 | static private String baseId2 ="person:locate"; |
---|
86 | static private String baseId3 ="person:years"; |
---|
87 | //split character |
---|
88 | static private String sp = ":"; |
---|
89 | // file path in hadoop file system (not phisical file system) |
---|
90 | String file_path = "/user/waue/t1"; |
---|
91 | // Hbase table name |
---|
92 | String table_name = "t1_table"; |
---|
93 | // setup MapTask and Reduce Task |
---|
94 | int mapTasks = 1; |
---|
95 | int reduceTasks = 1; |
---|
96 | |
---|
97 | private static class ReduceClass extends TableReduce<LongWritable, Text> { |
---|
98 | |
---|
99 | // Column id is created dymanically, |
---|
100 | private static final Text col_name = new Text(baseId1); |
---|
101 | private static final Text col_local = new Text(baseId2); |
---|
102 | private static final Text col_year = new Text(baseId3); |
---|
103 | |
---|
104 | // this map holds the columns per row |
---|
105 | private MapWritable map = new MapWritable(); |
---|
106 | |
---|
107 | // on this sample, map is nonuse, we use reduce to handle |
---|
108 | public void reduce(LongWritable key, Iterator<Text> values, |
---|
109 | OutputCollector<Text, MapWritable> output, Reporter reporter) |
---|
110 | throws IOException { |
---|
111 | |
---|
112 | // values.next().getByte() can get value and transfer to byte form, there is an other way that let decode() |
---|
113 | // to substitude getByte() |
---|
114 | String stro = new String(values.next().getBytes()); |
---|
115 | String str[] = stro.split(sp); |
---|
116 | byte b_local[] = str[0].getBytes(); |
---|
117 | byte b_name[] = str[1].getBytes(); |
---|
118 | byte b_year[] = str[2].getBytes(); |
---|
119 | |
---|
120 | // contents must be ImmutableBytesWritable |
---|
121 | ImmutableBytesWritable w_local = new ImmutableBytesWritable( b_local); |
---|
122 | ImmutableBytesWritable w_name = new ImmutableBytesWritable( b_name ); |
---|
123 | ImmutableBytesWritable w_year = new ImmutableBytesWritable( b_year ); |
---|
124 | |
---|
125 | // populate the current row |
---|
126 | map.clear(); |
---|
127 | map.put(col_name, w_local); |
---|
128 | map.put(col_local, w_name); |
---|
129 | map.put(col_year, w_year); |
---|
130 | |
---|
131 | // add the row with the key as the row id |
---|
132 | output.collect(new Text(key.toString()), map); |
---|
133 | } |
---|
134 | } |
---|
135 | |
---|
136 | private HBaseRecord() { |
---|
137 | } |
---|
138 | |
---|
139 | /** |
---|
140 | * Runs the demo. |
---|
141 | */ |
---|
142 | public static void main(String[] args) throws IOException { |
---|
143 | // which path of input files in Hadoop file system |
---|
144 | |
---|
145 | HBaseRecord setup = new HBaseRecord(); |
---|
146 | JobConf conf = new JobConf(HBaseRecord.class); |
---|
147 | |
---|
148 | //Job name; you can modify to any you like |
---|
149 | conf.setJobName("NCHC_PersonDataBase"); |
---|
150 | |
---|
151 | // Hbase table name must be correct , in our profile is t1_table |
---|
152 | TableReduce.initJob(setup.table_name, ReduceClass.class, conf); |
---|
153 | |
---|
154 | // below are map-reduce profile |
---|
155 | conf.setNumMapTasks(setup.mapTasks); |
---|
156 | conf.setNumReduceTasks(setup.reduceTasks); |
---|
157 | conf.setInputPath(new Path(setup.file_path)); |
---|
158 | conf.setMapperClass(IdentityMapper.class); |
---|
159 | conf.setCombinerClass(IdentityReducer.class); |
---|
160 | conf.setReducerClass(ReduceClass.class); |
---|
161 | JobClient.runJob(conf); |
---|
162 | } |
---|
163 | } |
---|