[50] | 1 | # Set Hadoop-specific environment variables here. |
---|
| 2 | |
---|
| 3 | # The only required environment variable is JAVA_HOME. All others are |
---|
| 4 | # optional. When running a distributed configuration it is best to |
---|
| 5 | # set JAVA_HOME in this file, so that it is correctly defined on |
---|
| 6 | # remote nodes. |
---|
| 7 | |
---|
| 8 | # The java implementation to use. Required. |
---|
| 9 | export JAVA_HOME=/usr/lib/jvm/java-6-sun |
---|
| 10 | export HADOOP_HOME=/opt/hadoop |
---|
| 11 | export HADOOP_CONF_DIR=$HADOOP_HOME/conf |
---|
| 12 | |
---|
| 13 | # Extra Java CLASSPATH elements. Optional. |
---|
| 14 | # export HADOOP_CLASSPATH= |
---|
| 15 | |
---|
| 16 | # The maximum amount of heap to use, in MB. Default is 1000. |
---|
| 17 | # export HADOOP_HEAPSIZE=2000 |
---|
| 18 | |
---|
| 19 | # Extra Java runtime options. Empty by default. |
---|
| 20 | # export HADOOP_OPTS=-server |
---|
| 21 | |
---|
| 22 | # Command specific options appended to HADOOP_OPTS when specified |
---|
| 23 | export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS" |
---|
| 24 | export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS" |
---|
| 25 | export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS" |
---|
| 26 | export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS" |
---|
| 27 | export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS" |
---|
| 28 | # export HADOOP_TASKTRACKER_OPTS= |
---|
| 29 | # The following applies to multiple commands (fs, dfs, fsck, distcp etc) |
---|
| 30 | # export HADOOP_CLIENT_OPTS |
---|
| 31 | |
---|
| 32 | # Extra ssh options. Empty by default. |
---|
| 33 | # export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR" |
---|
| 34 | |
---|
| 35 | # Where log files are stored. $HADOOP_HOME/logs by default. |
---|
| 36 | export HADOOP_LOG_DIR=/var/log/hadoop |
---|
| 37 | |
---|
| 38 | # File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default. |
---|
| 39 | # export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves |
---|
| 40 | |
---|
| 41 | # host:path where hadoop code should be rsync'd from. Unset by default. |
---|
| 42 | # export HADOOP_MASTER=master:/home/$USER/src/hadoop |
---|
| 43 | |
---|
| 44 | # Seconds to sleep between slave commands. Unset by default. This |
---|
| 45 | # can be useful in large clusters, where, e.g., slave rsyncs can |
---|
| 46 | # otherwise arrive faster than the master can service them. |
---|
| 47 | # export HADOOP_SLAVE_SLEEP=0.1 |
---|
| 48 | |
---|
| 49 | # The directory where pid files are stored. /tmp by default. |
---|
| 50 | # export HADOOP_PID_DIR=/var/hadoop/pids |
---|
| 51 | |
---|
| 52 | # A string representing this instance of hadoop. $USER by default. |
---|
| 53 | # export HADOOP_IDENT_STRING=$USER |
---|
| 54 | |
---|
| 55 | # The scheduling priority for daemon processes. See 'man nice'. |
---|
| 56 | # export HADOOP_NICENESS=10 |
---|