blob: 96ef9734c7681dff7b789dc031d7d906129af6d2 [file] [log] [blame]
Till Westmannea8ab392013-06-05 15:17:08 -07001#/*
2# Copyright 2009-2013 by The Regents of the University of California
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# you may obtain a copy of the License from
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#*/
ramangrover29116eb972013-02-20 20:47:35 +000015# Set Hadoop-specific environment variables here.
16
17# The only required environment variable is JAVA_HOME. All others are
18# optional. When running a distributed configuration it is best to
19# set JAVA_HOME in this file, so that it is correctly defined on
20# remote nodes.
21
22# The java implementation to use. Required.
ramangrover29ab6d3492013-02-21 22:23:03 +000023# export JAVA_HOME=$JAVA_HOME
ramangrover29116eb972013-02-20 20:47:35 +000024
25# Extra Java CLASSPATH elements. Optional.
26# export HADOOP_CLASSPATH=
27
28# The maximum amount of heap to use, in MB. Default is 1000.
29# export HADOOP_HEAPSIZE=2000
30
31# Extra Java runtime options. Empty by default.
32# export HADOOP_OPTS=-server
33
34# Command specific options appended to HADOOP_OPTS when specified
35export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
36export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
37export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
38export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
39export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
40# export HADOOP_TASKTRACKER_OPTS=
41# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
42# export HADOOP_CLIENT_OPTS
43
44# Extra ssh options. Empty by default.
45# export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
46
47# Where log files are stored. $HADOOP_HOME/logs by default.
48# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
49
50# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.
51# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
52
53# host:path where hadoop code should be rsync'd from. Unset by default.
54# export HADOOP_MASTER=master:/home/$USER/src/hadoop
55
56# Seconds to sleep between slave commands. Unset by default. This
57# can be useful in large clusters, where, e.g., slave rsyncs can
58# otherwise arrive faster than the master can service them.
59# export HADOOP_SLAVE_SLEEP=0.1
60
61# The directory where pid files are stored. /tmp by default.
62# export HADOOP_PID_DIR=/var/hadoop/pids
63
64# A string representing this instance of hadoop. $USER by default.
65# export HADOOP_IDENT_STRING=$USER
66
67# The scheduling priority for daemon processes. See 'man nice'.
68# export HADOOP_NICENESS=10