[[PageOutline]]
◢ <[wiki:III140614/Lab9 實作九]> | <[wiki:III140614 回課程大綱]> ▲ | <[wiki:III140614/Lab11 實作十一]> ◣
{{{
#!text
請連線至 hadoop.3du.me 作練習
}}}
= 實作十 Lab10 =
{{{
#!html
HDFS、MapReduce 與 Hadoop Streaming 觀念驗證
Running Hadoop Streaming with HDFS and MapReduce
}}}
== 準備輸入資料集 Input Dataset ==
* 首先準備輸入,包含兩部份:(1) 供 Velvet 計算的 *.fa 檔案,這裡為了方便示範起見,採用 test_long.fa 當範本,並複製 99 份不同檔名,當輸入 (2) 供 Mapper 運算的輸入檔案(內含 HDFS 的檔名路徑)
{{{
~$ cp /usr/share/doc/velvet-example/examples/data/test_long.fa.gz .
~$ gunzip test_long.fa.gz
~$ for ((i=1;i<100;i++)); do hadoop fs -put test_long.fa input-$i.fa; done
~$ for ((i=1;i<20;i++)); do echo /user/$(whoami)/input-$i.fa; done > split-01.txt
~$ for ((i=20;i<40;i++)); do echo /user/$(whoami)/input-$i.fa; done > split-02.txt
~$ for ((i=40;i<60;i++)); do echo /user/$(whoami)/input-$i.fa; done > split-03.txt
~$ for ((i=60;i<80;i++)); do echo /user/$(whoami)/input-$i.fa; done > split-04.txt
~$ for ((i=80;i<100;i++)); do echo /user/$(whoami)/input-$i.fa; done > split-05.txt
~$ hadoop fs -mkdir lab10_input
~$ hadoop fs -put split-0* lab10_input
}}}
* 檢查輸入檔案
{{{
~$ hadoop fs -ls
~$ hadoop fs -ls lab10_input
}}}
== 觀察 Hadoop Streaming 執行身份與工作目錄 ==
* 撰寫 testmapper.sh
{{{
#!sh
#!/bin/bash
id="h998"
mkdir -p /tmp/$id
host=`hostname`
pwd=`pwd`
uid=`whoami`
while read line; do
input=$line
filename=`basename $input`
echo "$uid@$host:$pwd> hadoop fs -get $input /tmp/$id/$filename"
echo "$uid@$host:$pwd> velveth output-$filename 17 -fasta -short /tmp/$id/$filename"
echo "$uid@$host:$pwd> hadoop fs -put output-$filename ."
done
rm -rf /tmp/$id
}}}
* 接著,讓我們在本地端先驗證一下 testmapper.sh 的運作
{{{
~$ head -n 2 split-01.txt > split-00.txt
~$ cat > testmapper.sh << EOF
#!/bin/bash
id="`whoami`"
mkdir -p /tmp/\$id
host=\`hostname\`
pwd=\`pwd\`
uid=\`whoami\`
while read line; do
input=\$line
filename=\`basename \$input\`
echo "\$uid@\$host:\$pwd> hadoop fs -get \$input /tmp/\$id/\$filename"
echo "\$uid@\$host:\$pwd> velveth output-\$filename 17 -fasta -short /tmp/\$id/\$filename"
echo "\$uid@\$host:\$pwd> hadoop fs -put output-\$filename ."
done
rm -rf /tmp/\$id
EOF
~$ chmod a+x testmapper.sh
~$ cat sample-00.txt | ./testmapper.sh
h998@hadoop:/home/h998> hadoop fs -get /user/h998/input-1.fa /tmp/h998/input-1.fa
h998@hadoop:/home/h998> velveth output-input-1.fa 17 -fasta -short /tmp/h998/input-1.fa
h998@hadoop:/home/h998> hadoop fs -put output-input-1.fa .
h998@hadoop:/home/h998> hadoop fs -get /user/h998/input-2.fa /tmp/h998/input-2.fa
}}}
* 讓我們用 Hadoop Streaming 的方式來執行 testmapper.sh
{{{
~$ hadoop jar hadoop-streaming.jar -input lab10_input -output lab10_out1 -mapper testmapper.sh -file testmapper.sh
}}}
* 觀察 lab10_out1 的結果,看與本機執行有何不同呢?
{{{
~$ hadoop fs -cat /user/$(whoami)/lab10_out1/part-00000 | head
hadoop@hadoop104:/var/lib/hadoop/cache/hadoop/mapred/local/taskTracker/jobcache/job_201106041247_1820/attempt_201106041247_1820_m_000002_0/work> hadoop fs -get /user/h998/input-60.fa /tmp/h998/input-60.fa
hadoop@hadoop104:/var/lib/hadoop/cache/hadoop/mapred/local/taskTracker/jobcache/job_201106041247_1820/attempt_201106041247_1820_m_000002_0/work> hadoop fs -get /user/h998/input-61.fa /tmp/h998/input-61.fa
hadoop@hadoop104:/var/lib/hadoop/cache/hadoop/mapred/local/taskTracker/jobcache/job_201106041247_1820/attempt_201106041247_1820_m_000002_0/work> hadoop fs -get /user/h998/input-62.fa /tmp/h998/input-62.fa
}}}
== 實作透過 Hadoop Streaming 執行 99 組 velvet 運算 ==
* 撰寫 mapper.sh
{{{
#!sh
#!/bin/bash
id="h998"
mkdir -p /tmp/$id
host=`hostname`
pwd=`pwd`
uid=`whoami`
while read line; do
input=$line
filename=`basename $input`
echo "$uid@$host> hadoop fs -get $input /tmp/$id/$filename"
hadoop fs -get $input /tmp/$id/$filename
echo "$uid@$host> velveth output-$filename 17 -fasta -short /tmp/$id/$filename"
velveth output-$filename 17 -fasta -short /tmp/$id/$filename
echo "$uid@$host> hadoop fs -put output-$filename /user/$id/."
hadoop fs -put output-$filename /user/$id/.
hadoop fs -chown $id /user/$id/output-$filename
done
rm -rf /tmp/$id
}}}
* 於本機測試 mapper.sh
{{{
~$ cat > mapper.sh << EOF
#!/bin/bash
id="`whoami`"
mkdir -p /tmp/\$id
host=\`hostname\`
pwd=\`pwd\`
uid=\`whoami\`
while read line; do
input=\$line
filename=\`basename \$input\`
echo "\$uid@\$host> hadoop fs -get \$input /tmp/\$id/\$filename"
hadoop fs -get \$input /tmp/\$id/\$filename
echo "\$uid@\$host> velveth output-\$filename 17 -fasta -short /tmp/\$id/\$filename"
velveth output-\$filename 17 -fasta -short /tmp/\$id/\$filename
echo "\$uid@\$host> hadoop fs -put output-\$filename /user/\$id/."
hadoop fs -put output-\$filename /user/\$id/.
hadoop fs -chown \$id /user/\$id/output-\$filename
done
rm -rf /tmp/\$id
EOF
~$ chmod a+x mapper.sh
~$ cat sample-00.txt | ./mapper.sh
~$ hadoop fs -rmr output-*
~$ rm -rf output-sample-*
}}}
* 接著用 hadoop streaming 來執行
{{{
~$ hadoop jar hadoop-streaming.jar -input lab10_input -output lab10_out2 -mapper mapper.sh -file mapper.sh
}}}