buyingyi | cf48fb5 | 2012-11-02 00:31:31 +0000 | [diff] [blame] | 1 | <?xml version="1.0"?> |
Till Westmann | ccac745 | 2013-06-05 18:56:27 -0700 | [diff] [blame] | 2 | <!-- |
| 3 | ! Copyright 2009-2013 by The Regents of the University of California |
| 4 | ! Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | ! you may not use this file except in compliance with the License. |
| 6 | ! you may obtain a copy of the License from |
| 7 | ! |
| 8 | ! http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | ! |
| 10 | ! Unless required by applicable law or agreed to in writing, software |
| 11 | ! distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | ! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | ! See the License for the specific language governing permissions and |
| 14 | ! limitations under the License. |
| 15 | !--> |
buyingyi | cf48fb5 | 2012-11-02 00:31:31 +0000 | [diff] [blame] | 16 | <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> |
| 17 | |
| 18 | <configuration> |
| 19 | |
| 20 | <!-- Hive Configuration can either be stored in this file or in the hadoop |
| 21 | configuration files --> |
| 22 | <!-- that are implied by Hadoop setup variables. --> |
| 23 | <!-- Aside from Hadoop setup variables - this file is provided as a convenience |
| 24 | so that Hive --> |
| 25 | <!-- users do not have to edit hadoop configuration files (that may be managed |
| 26 | as a centralized --> |
| 27 | <!-- resource). --> |
| 28 | |
| 29 | <!-- Hive Execution Parameters --> |
| 30 | <property> |
| 31 | <name>mapred.reduce.tasks</name> |
| 32 | <value>-1</value> |
| 33 | <description>The default number of reduce tasks per job. Typically set |
| 34 | to a prime close to the number of available hosts. Ignored when |
| 35 | mapred.job.tracker is "local". Hadoop set this to 1 by default, |
| 36 | whereas hive uses -1 as its default value. |
| 37 | By setting this property to |
| 38 | -1, Hive will automatically figure out what |
| 39 | should be the number of |
| 40 | reducers. |
| 41 | </description> |
| 42 | </property> |
| 43 | |
| 44 | <property> |
| 45 | <name>hive.hyracks.connectorpolicy</name> |
| 46 | <value>SEND_SIDE_MAT_PIPELINING</value> |
| 47 | </property> |
| 48 | |
| 49 | <property> |
| 50 | <name>hive.hyracks.host</name> |
| 51 | <value>127.0.0.1</value> |
| 52 | </property> |
| 53 | |
| 54 | <property> |
| 55 | <name>hive.hyracks.port</name> |
| 56 | <value>13099</value> |
| 57 | </property> |
| 58 | |
| 59 | <property> |
| 60 | <name>hive.hyracks.app</name> |
| 61 | <value>hivesterix</value> |
| 62 | </property> |
| 63 | |
| 64 | |
| 65 | <property> |
| 66 | <name>hive.hyracks.parrallelism</name> |
| 67 | <value>2</value> |
| 68 | </property> |
| 69 | |
| 70 | <property> |
| 71 | <name>hive.algebricks.groupby.external</name> |
buyingyi | bfc6355 | 2013-05-20 12:46:35 -0700 | [diff] [blame] | 72 | <value>true</value> |
buyingyi | cf48fb5 | 2012-11-02 00:31:31 +0000 | [diff] [blame] | 73 | </property> |
| 74 | |
| 75 | <property> |
| 76 | <name>hive.algebricks.groupby.external.memory</name> |
| 77 | <value>3072</value> |
| 78 | </property> |
| 79 | |
| 80 | <property> |
| 81 | <name>hive.algebricks.sort.memory</name> |
| 82 | <value>3072</value> |
| 83 | </property> |
| 84 | |
| 85 | <property> |
| 86 | <name>hive.algebricks.framesize</name> |
| 87 | <value>768</value> |
| 88 | </property> |
| 89 | |
| 90 | <property> |
| 91 | <name>hive.exec.reducers.bytes.per.reducer</name> |
| 92 | <value>1000000000</value> |
| 93 | <description>size per reducer.The default is 1G, i.e if the input size |
| 94 | is 10G, it will use 10 reducers. |
| 95 | </description> |
| 96 | </property> |
| 97 | |
| 98 | <property> |
| 99 | <name>hive.exec.reducers.max</name> |
| 100 | <value>999</value> |
| 101 | <description>max number of reducers will be used. If the one |
| 102 | specified |
| 103 | in the configuration parameter mapred.reduce.tasks is |
| 104 | negative, hive |
| 105 | will use this one as the max number of reducers when |
| 106 | automatically |
| 107 | determine number of reducers. |
| 108 | </description> |
| 109 | </property> |
| 110 | |
| 111 | <property> |
| 112 | <name>hive.exec.scratchdir</name> |
| 113 | <value>/tmp/hive-${user.name}</value> |
| 114 | <description>Scratch space for Hive jobs</description> |
| 115 | </property> |
| 116 | |
| 117 | <property> |
| 118 | <name>hive.test.mode</name> |
| 119 | <value>false</value> |
| 120 | <description>whether hive is running in test mode. If yes, it turns on |
| 121 | sampling and prefixes the output tablename |
| 122 | </description> |
| 123 | </property> |
| 124 | |
| 125 | <property> |
| 126 | <name>hive.test.mode.prefix</name> |
| 127 | <value>test_</value> |
| 128 | <description>if hive is running in test mode, prefixes the output |
| 129 | table by this string |
| 130 | </description> |
| 131 | </property> |
| 132 | |
| 133 | <!-- If the input table is not bucketed, the denominator of the tablesample |
| 134 | is determinied by the parameter below --> |
| 135 | <!-- For example, the following query: --> |
| 136 | <!-- INSERT OVERWRITE TABLE dest --> |
| 137 | <!-- SELECT col1 from src --> |
| 138 | <!-- would be converted to --> |
| 139 | <!-- INSERT OVERWRITE TABLE test_dest --> |
| 140 | <!-- SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1)) --> |
| 141 | <property> |
| 142 | <name>hive.test.mode.samplefreq</name> |
| 143 | <value>32</value> |
| 144 | <description>if hive is running in test mode and table is not |
| 145 | bucketed, sampling frequency |
| 146 | </description> |
| 147 | </property> |
| 148 | |
| 149 | <property> |
| 150 | <name>hive.test.mode.nosamplelist</name> |
| 151 | <value></value> |
| 152 | <description>if hive is running in test mode, dont sample the above |
| 153 | comma seperated list of tables |
| 154 | </description> |
| 155 | </property> |
| 156 | |
| 157 | <property> |
| 158 | <name>hive.metastore.local</name> |
| 159 | <value>true</value> |
| 160 | <description>controls whether to connect to remove metastore server or |
| 161 | open a new metastore server in Hive Client JVM |
| 162 | </description> |
| 163 | </property> |
| 164 | |
| 165 | <property> |
| 166 | <name>javax.jdo.option.ConnectionURL</name> |
| 167 | <value>jdbc:derby:;databaseName=metastore_db;create=true</value> |
| 168 | <description>JDBC connect string for a JDBC metastore</description> |
| 169 | </property> |
| 170 | |
| 171 | <property> |
| 172 | <name>javax.jdo.option.ConnectionDriverName</name> |
| 173 | <value>org.apache.derby.jdbc.EmbeddedDriver</value> |
| 174 | <description>Driver class name for a JDBC metastore</description> |
| 175 | </property> |
| 176 | |
| 177 | <property> |
| 178 | <name>javax.jdo.PersistenceManagerFactoryClass</name> |
| 179 | <value>org.datanucleus.jdo.JDOPersistenceManagerFactory</value> |
| 180 | <description>class implementing the jdo persistence</description> |
| 181 | </property> |
| 182 | |
| 183 | <property> |
| 184 | <name>datanucleus.connectionPoolingType</name> |
| 185 | <value>DBCP</value> |
| 186 | <description>Uses a DBCP connection pool for JDBC metastore |
| 187 | </description> |
| 188 | </property> |
| 189 | |
| 190 | <property> |
| 191 | <name>javax.jdo.option.DetachAllOnCommit</name> |
| 192 | <value>true</value> |
| 193 | <description>detaches all objects from session so that they can be |
| 194 | used after transaction is committed |
| 195 | </description> |
| 196 | </property> |
| 197 | |
| 198 | <property> |
| 199 | <name>javax.jdo.option.NonTransactionalRead</name> |
| 200 | <value>true</value> |
| 201 | <description>reads outside of transactions</description> |
| 202 | </property> |
| 203 | |
| 204 | <property> |
| 205 | <name>javax.jdo.option.ConnectionUserName</name> |
| 206 | <value>APP</value> |
| 207 | <description>username to use against metastore database</description> |
| 208 | </property> |
| 209 | |
| 210 | <property> |
| 211 | <name>javax.jdo.option.ConnectionPassword</name> |
| 212 | <value>mine</value> |
| 213 | <description>password to use against metastore database</description> |
| 214 | </property> |
| 215 | |
| 216 | <property> |
| 217 | <name>datanucleus.validateTables</name> |
| 218 | <value>false</value> |
| 219 | <description>validates existing schema against code. turn this on if |
| 220 | you want to verify existing schema |
| 221 | </description> |
| 222 | </property> |
| 223 | |
| 224 | <property> |
| 225 | <name>datanucleus.validateColumns</name> |
| 226 | <value>false</value> |
| 227 | <description>validates existing schema against code. turn this on if |
| 228 | you want to verify existing schema |
| 229 | </description> |
| 230 | </property> |
| 231 | |
| 232 | <property> |
| 233 | <name>datanucleus.validateConstraints</name> |
| 234 | <value>false</value> |
| 235 | <description>validates existing schema against code. turn this on if |
| 236 | you want to verify existing schema |
| 237 | </description> |
| 238 | </property> |
| 239 | |
| 240 | <property> |
| 241 | <name>datanucleus.storeManagerType</name> |
| 242 | <value>rdbms</value> |
| 243 | <description>metadata store type</description> |
| 244 | </property> |
| 245 | |
| 246 | <property> |
| 247 | <name>datanucleus.autoCreateSchema</name> |
| 248 | <value>true</value> |
| 249 | <description>creates necessary schema on a startup if one doesn't |
| 250 | exist. set this to false, after creating it once |
| 251 | </description> |
| 252 | </property> |
| 253 | |
| 254 | <property> |
| 255 | <name>datanucleus.autoStartMechanismMode</name> |
| 256 | <value>checked</value> |
| 257 | <description>throw exception if metadata tables are incorrect |
| 258 | </description> |
| 259 | </property> |
| 260 | |
| 261 | <property> |
| 262 | <name>datanucleus.transactionIsolation</name> |
| 263 | <value>read-committed</value> |
| 264 | <description>Default transaction isolation level for identity |
| 265 | generation. |
| 266 | </description> |
| 267 | </property> |
| 268 | |
| 269 | <property> |
| 270 | <name>datanucleus.cache.level2</name> |
| 271 | <value>false</value> |
| 272 | <description>Use a level 2 cache. Turn this off if metadata is changed |
| 273 | independently of hive metastore server |
| 274 | </description> |
| 275 | </property> |
| 276 | |
| 277 | <property> |
| 278 | <name>datanucleus.cache.level2.type</name> |
| 279 | <value>SOFT</value> |
| 280 | <description>SOFT=soft reference based cache, WEAK=weak reference |
| 281 | based cache. |
| 282 | </description> |
| 283 | </property> |
| 284 | |
| 285 | <property> |
| 286 | <name>datanucleus.identifierFactory</name> |
| 287 | <value>datanucleus</value> |
| 288 | <description>Name of the identifier factory to use when generating |
| 289 | table/column names etc. 'datanucleus' is used for backward |
| 290 | compatibility |
| 291 | </description> |
| 292 | </property> |
| 293 | |
| 294 | <property> |
| 295 | <name>hive.metastore.warehouse.dir</name> |
| 296 | <value>/tmp/hivesterix</value> |
| 297 | <description>location of default database for the warehouse |
| 298 | </description> |
| 299 | </property> |
| 300 | |
| 301 | <property> |
| 302 | <name>hive.metastore.connect.retries</name> |
| 303 | <value>5</value> |
| 304 | <description>Number of retries while opening a connection to metastore |
| 305 | </description> |
| 306 | </property> |
| 307 | |
| 308 | <property> |
| 309 | <name>hive.metastore.rawstore.impl</name> |
| 310 | <value>org.apache.hadoop.hive.metastore.ObjectStore</value> |
| 311 | <description>Name of the class that implements |
| 312 | org.apache.hadoop.hive.metastore.rawstore interface. This class is |
| 313 | used to store and retrieval of raw metadata objects such as table, |
| 314 | database |
| 315 | </description> |
| 316 | </property> |
| 317 | |
| 318 | <property> |
| 319 | <name>hive.default.fileformat</name> |
| 320 | <value>TextFile</value> |
| 321 | <description>Default file format for CREATE TABLE statement. Options |
| 322 | are TextFile and SequenceFile. Users can explicitly say CREATE TABLE |
| 323 | ... STORED AS <TEXTFILE|SEQUENCEFILE> to override</description> |
| 324 | </property> |
| 325 | |
| 326 | <property> |
| 327 | <name>hive.fileformat.check</name> |
| 328 | <value>true</value> |
| 329 | <description>Whether to check file format or not when loading data |
| 330 | files |
| 331 | </description> |
| 332 | </property> |
| 333 | |
| 334 | <property> |
| 335 | <name>hive.map.aggr</name> |
| 336 | <value>true</value> |
| 337 | <description>Whether to use map-side aggregation in Hive Group By |
| 338 | queries |
| 339 | </description> |
| 340 | </property> |
| 341 | |
| 342 | <property> |
| 343 | <name>hive.groupby.skewindata</name> |
| 344 | <value>false</value> |
| 345 | <description>Whether there is skew in data to optimize group by |
| 346 | queries |
| 347 | </description> |
| 348 | </property> |
| 349 | |
| 350 | <property> |
| 351 | <name>hive.groupby.mapaggr.checkinterval</name> |
| 352 | <value>100000</value> |
| 353 | <description>Number of rows after which size of the grouping |
| 354 | keys/aggregation classes is performed |
| 355 | </description> |
| 356 | </property> |
| 357 | |
| 358 | <property> |
| 359 | <name>hive.mapred.local.mem</name> |
| 360 | <value>0</value> |
| 361 | <description>For local mode, memory of the mappers/reducers |
| 362 | </description> |
| 363 | </property> |
| 364 | |
| 365 | <property> |
| 366 | <name>hive.map.aggr.hash.percentmemory</name> |
| 367 | <value>0.5</value> |
| 368 | <description>Portion of total memory to be used by map-side grup |
| 369 | aggregation hash table |
| 370 | </description> |
| 371 | </property> |
| 372 | |
| 373 | <property> |
| 374 | <name>hive.map.aggr.hash.min.reduction</name> |
| 375 | <value>0.5</value> |
| 376 | <description>Hash aggregation will be turned off if the ratio between |
| 377 | hash |
| 378 | table size and input rows is bigger than this number. Set to 1 to |
| 379 | make |
| 380 | sure |
| 381 | hash aggregation is never turned off. |
| 382 | </description> |
| 383 | </property> |
| 384 | |
| 385 | <property> |
| 386 | <name>hive.optimize.cp</name> |
| 387 | <value>true</value> |
| 388 | <description>Whether to enable column pruner</description> |
| 389 | </property> |
| 390 | |
| 391 | <property> |
| 392 | <name>hive.optimize.ppd</name> |
| 393 | <value>true</value> |
| 394 | <description>Whether to enable predicate pushdown</description> |
| 395 | </property> |
| 396 | |
| 397 | <property> |
| 398 | <name>hive.optimize.pruner</name> |
| 399 | <value>true</value> |
| 400 | <description>Whether to enable the new partition pruner which depends |
| 401 | on predicate pushdown. If this is disabled, |
| 402 | the old partition pruner |
| 403 | which is based on AST will be enabled. |
| 404 | </description> |
| 405 | </property> |
| 406 | |
| 407 | <property> |
| 408 | <name>hive.optimize.groupby</name> |
| 409 | <value>true</value> |
| 410 | <description>Whether to enable the bucketed group by from bucketed |
| 411 | partitions/tables. |
| 412 | </description> |
| 413 | </property> |
| 414 | |
| 415 | <property> |
| 416 | <name>hive.join.emit.interval</name> |
| 417 | <value>1000</value> |
| 418 | <description>How many rows in the right-most join operand Hive should |
| 419 | buffer before emitting the join result. |
| 420 | </description> |
| 421 | </property> |
| 422 | |
| 423 | <property> |
| 424 | <name>hive.join.cache.size</name> |
| 425 | <value>25000</value> |
| 426 | <description>How many rows in the joining tables (except the streaming |
| 427 | table) should be cached in memory. |
| 428 | </description> |
| 429 | </property> |
| 430 | |
| 431 | <property> |
| 432 | <name>hive.mapjoin.bucket.cache.size</name> |
| 433 | <value>100</value> |
| 434 | <description>How many values in each keys in the map-joined table |
| 435 | should be cached in memory. |
| 436 | </description> |
| 437 | </property> |
| 438 | |
| 439 | <property> |
| 440 | <name>hive.mapjoin.maxsize</name> |
| 441 | <value>100000</value> |
| 442 | <description>Maximum # of rows of the small table that can be handled |
| 443 | by map-side join. If the size is reached and hive.task.progress is |
| 444 | set, a fatal error counter is set and the job will be killed. |
| 445 | </description> |
| 446 | </property> |
| 447 | |
| 448 | <property> |
| 449 | <name>hive.mapjoin.cache.numrows</name> |
| 450 | <value>25000</value> |
| 451 | <description>How many rows should be cached by jdbm for map join. |
| 452 | </description> |
| 453 | </property> |
| 454 | |
| 455 | <property> |
| 456 | <name>hive.optimize.skewjoin</name> |
| 457 | <value>false</value> |
| 458 | <description>Whether to enable skew join optimization. </description> |
| 459 | </property> |
| 460 | |
| 461 | <property> |
| 462 | <name>hive.skewjoin.key</name> |
| 463 | <value>100000</value> |
| 464 | <description>Determine if we get a skew key in join. If we see more |
| 465 | than the specified number of rows with the same key in join operator, |
| 466 | we think the key as a skew join key. |
| 467 | </description> |
| 468 | </property> |
| 469 | |
| 470 | <property> |
| 471 | <name>hive.skewjoin.mapjoin.map.tasks</name> |
| 472 | <value>10000</value> |
| 473 | <description> Determine the number of map task used in the follow up |
| 474 | map join job |
| 475 | for a skew join. It should be used together with |
| 476 | hive.skewjoin.mapjoin.min.split |
| 477 | to perform a fine grained control. |
| 478 | </description> |
| 479 | </property> |
| 480 | |
| 481 | <property> |
| 482 | <name>hive.skewjoin.mapjoin.min.split</name> |
| 483 | <value>33554432</value> |
| 484 | <description> Determine the number of map task at most used in the |
| 485 | follow up map join job |
| 486 | for a skew join by specifying the minimum split |
| 487 | size. It should be used |
| 488 | together with |
| 489 | hive.skewjoin.mapjoin.map.tasks |
| 490 | to perform a fine grained control. |
| 491 | </description> |
| 492 | </property> |
| 493 | |
| 494 | <property> |
| 495 | <name>hive.mapred.mode</name> |
| 496 | <value>nonstrict</value> |
| 497 | <description>The mode in which the hive operations are being |
| 498 | performed. In strict mode, some risky queries are not allowed to run |
| 499 | </description> |
| 500 | </property> |
| 501 | |
| 502 | <property> |
| 503 | <name>hive.exec.script.maxerrsize</name> |
| 504 | <value>100000</value> |
| 505 | <description>Maximum number of bytes a script is allowed to emit to |
| 506 | standard error (per map-reduce task). This prevents runaway scripts |
| 507 | from filling logs partitions to capacity |
| 508 | </description> |
| 509 | </property> |
| 510 | |
| 511 | <property> |
| 512 | <name>hive.exec.script.allow.partial.consumption</name> |
| 513 | <value>false</value> |
| 514 | <description> When enabled, this option allows a user script to exit |
| 515 | successfully without consuming all the data from the standard input. |
| 516 | </description> |
| 517 | </property> |
| 518 | |
| 519 | <property> |
| 520 | <name>hive.script.operator.id.env.var</name> |
| 521 | <value>HIVE_SCRIPT_OPERATOR_ID</value> |
| 522 | <description> Name of the environment variable that holds the unique |
| 523 | script operator ID in the user's transform function (the custom |
| 524 | mapper/reducer that the user has specified in the query) |
| 525 | </description> |
| 526 | </property> |
| 527 | |
| 528 | <property> |
| 529 | <name>hive.exec.compress.output</name> |
| 530 | <value>false</value> |
| 531 | <description> This controls whether the final outputs of a query (to a |
| 532 | local/hdfs file or a hive table) is compressed. The compression codec |
| 533 | and other options are determined from hadoop config variables |
| 534 | mapred.output.compress* |
| 535 | </description> |
| 536 | </property> |
| 537 | |
| 538 | <property> |
| 539 | <name>hive.exec.compress.intermediate</name> |
| 540 | <value>false</value> |
| 541 | <description> This controls whether intermediate files produced by |
| 542 | hive between multiple map-reduce jobs are compressed. The compression |
| 543 | codec and other options are determined from hadoop config variables |
| 544 | mapred.output.compress* |
| 545 | </description> |
| 546 | </property> |
| 547 | |
| 548 | <property> |
| 549 | <name>hive.exec.parallel</name> |
| 550 | <value>false</value> |
| 551 | <description>Whether to execute jobs in parallel</description> |
| 552 | </property> |
| 553 | |
| 554 | <property> |
| 555 | <name>hive.exec.parallel.thread.number</name> |
| 556 | <value>8</value> |
| 557 | <description>How many jobs at most can be executed in parallel |
| 558 | </description> |
| 559 | </property> |
| 560 | |
| 561 | <property> |
| 562 | <name>hive.hwi.war.file</name> |
| 563 | <value>lib\hive-hwi-0.7.0.war</value> |
| 564 | <description>This sets the path to the HWI war file, relative to |
| 565 | ${HIVE_HOME}. |
| 566 | </description> |
| 567 | </property> |
| 568 | |
| 569 | <property> |
| 570 | <name>hive.hwi.listen.host</name> |
| 571 | <value>0.0.0.0</value> |
| 572 | <description>This is the host address the Hive Web Interface will |
| 573 | listen on |
| 574 | </description> |
| 575 | </property> |
| 576 | |
| 577 | <property> |
| 578 | <name>hive.hwi.listen.port</name> |
| 579 | <value>9999</value> |
| 580 | <description>This is the port the Hive Web Interface will listen on |
| 581 | </description> |
| 582 | </property> |
| 583 | |
| 584 | <property> |
| 585 | <name>hive.exec.pre.hooks</name> |
| 586 | <value></value> |
| 587 | <description>Pre Execute Hook for Tests</description> |
| 588 | </property> |
| 589 | |
| 590 | <property> |
| 591 | <name>hive.merge.mapfiles</name> |
| 592 | <value>true</value> |
| 593 | <description>Merge small files at the end of a map-only job |
| 594 | </description> |
| 595 | </property> |
| 596 | |
| 597 | <property> |
| 598 | <name>hive.merge.mapredfiles</name> |
| 599 | <value>false</value> |
| 600 | <description>Merge small files at the end of a map-reduce job |
| 601 | </description> |
| 602 | </property> |
| 603 | |
| 604 | <property> |
| 605 | <name>hive.heartbeat.interval</name> |
| 606 | <value>1000</value> |
| 607 | <description>Send a heartbeat after this interval - used by mapjoin |
| 608 | and filter operators |
| 609 | </description> |
| 610 | </property> |
| 611 | |
| 612 | <property> |
| 613 | <name>hive.merge.size.per.task</name> |
| 614 | <value>256000000</value> |
| 615 | <description>Size of merged files at the end of the job</description> |
| 616 | </property> |
| 617 | |
| 618 | <property> |
| 619 | <name>hive.merge.size.smallfiles.avgsize</name> |
| 620 | <value>16000000</value> |
| 621 | <description>When the average output file size of a job is less than |
| 622 | this number, Hive will start an additional map-reduce job to merge |
| 623 | the output files into bigger files. This is only done for map-only |
| 624 | jobs if hive.merge.mapfiles is true, and for map-reduce jobs if |
| 625 | hive.merge.mapredfiles is true. |
| 626 | </description> |
| 627 | </property> |
| 628 | |
| 629 | <property> |
| 630 | <name>hive.script.auto.progress</name> |
| 631 | <value>false</value> |
| 632 | <description>Whether Hive Tranform/Map/Reduce Clause should |
| 633 | automatically send progress information to TaskTracker to avoid the |
| 634 | task getting killed because of inactivity. Hive sends progress |
| 635 | information when the script is outputting to stderr. This option |
| 636 | removes the need of periodically producing stderr messages, but users |
| 637 | should be cautious because this may prevent infinite loops in the |
| 638 | scripts to be killed by TaskTracker. |
| 639 | </description> |
| 640 | </property> |
| 641 | |
| 642 | <property> |
| 643 | <name>hive.script.serde</name> |
| 644 | <value>org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe</value> |
| 645 | <description>The default serde for trasmitting input data to and |
| 646 | reading output data from the user scripts. |
| 647 | </description> |
| 648 | </property> |
| 649 | |
| 650 | <property> |
| 651 | <name>hive.script.recordreader</name> |
| 652 | <value>org.apache.hadoop.hive.ql.exec.TextRecordReader</value> |
| 653 | <description>The default record reader for reading data from the user |
| 654 | scripts. |
| 655 | </description> |
| 656 | </property> |
| 657 | |
| 658 | <property> |
| 659 | <name>hive.script.recordwriter</name> |
| 660 | <value>org.apache.hadoop.hive.ql.exec.TextRecordWriter</value> |
| 661 | <description>The default record writer for writing data to the user |
| 662 | scripts. |
| 663 | </description> |
| 664 | </property> |
| 665 | |
| 666 | <property> |
| 667 | <name>hive.input.format</name> |
| 668 | <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value> |
| 669 | <description>The default input format, if it is not specified, the |
| 670 | system assigns it. It is set to HiveInputFormat for hadoop versions |
| 671 | 17, 18 and 19, whereas it is set to CombinedHiveInputFormat for |
| 672 | hadoop 20. The user can always overwrite it - if there is a bug in |
| 673 | CombinedHiveInputFormat, it can always be manually set to |
| 674 | HiveInputFormat. |
| 675 | </description> |
| 676 | </property> |
| 677 | |
| 678 | <property> |
| 679 | <name>hive.udtf.auto.progress</name> |
| 680 | <value>false</value> |
| 681 | <description>Whether Hive should automatically send progress |
| 682 | information to TaskTracker when using UDTF's to prevent the task |
| 683 | getting killed because of inactivity. Users should be cautious |
| 684 | because this may prevent TaskTracker from killing tasks with infinte |
| 685 | loops. |
| 686 | </description> |
| 687 | </property> |
| 688 | |
| 689 | <property> |
| 690 | <name>hive.mapred.reduce.tasks.speculative.execution</name> |
| 691 | <value>true</value> |
| 692 | <description>Whether speculative execution for reducers should be |
| 693 | turned on. |
| 694 | </description> |
| 695 | </property> |
| 696 | |
| 697 | <property> |
| 698 | <name>hive.exec.counters.pull.interval</name> |
| 699 | <value>1000</value> |
| 700 | <description>The interval with which to poll the JobTracker for the |
| 701 | counters the running job. The smaller it is the more load there will |
| 702 | be on the jobtracker, the higher it is the less granular the caught |
| 703 | will be. |
| 704 | </description> |
| 705 | </property> |
| 706 | |
| 707 | <property> |
| 708 | <name>hive.enforce.bucketing</name> |
| 709 | <value>false</value> |
| 710 | <description>Whether bucketing is enforced. If true, while inserting |
| 711 | into the table, bucketing is enforced. |
| 712 | </description> |
| 713 | </property> |
| 714 | |
| 715 | <property> |
| 716 | <name>hive.enforce.sorting</name> |
| 717 | <value>false</value> |
| 718 | <description>Whether sorting is enforced. If true, while inserting |
| 719 | into the table, sorting is enforced. |
| 720 | </description> |
| 721 | </property> |
| 722 | |
| 723 | <property> |
| 724 | <name>hive.metastore.ds.connection.url.hook</name> |
| 725 | <value></value> |
| 726 | <description>Name of the hook to use for retriving the JDO connection |
| 727 | URL. If empty, the value in javax.jdo.option.ConnectionURL is used |
| 728 | </description> |
| 729 | </property> |
| 730 | |
| 731 | <property> |
| 732 | <name>hive.metastore.ds.retry.attempts</name> |
| 733 | <value>1</value> |
| 734 | <description>The number of times to retry a metastore call if there |
| 735 | were a connection error |
| 736 | </description> |
| 737 | </property> |
| 738 | |
| 739 | <property> |
| 740 | <name>hive.metastore.ds.retry.interval</name> |
| 741 | <value>1000</value> |
| 742 | <description>The number of miliseconds between metastore retry |
| 743 | attempts |
| 744 | </description> |
| 745 | </property> |
| 746 | |
| 747 | <property> |
| 748 | <name>hive.metastore.server.min.threads</name> |
| 749 | <value>200</value> |
| 750 | <description>Minimum number of worker threads in the Thrift server's |
| 751 | pool. |
| 752 | </description> |
| 753 | </property> |
| 754 | |
| 755 | <property> |
| 756 | <name>hive.metastore.server.max.threads</name> |
| 757 | <value>100000</value> |
| 758 | <description>Maximum number of worker threads in the Thrift server's |
| 759 | pool. |
| 760 | </description> |
| 761 | </property> |
| 762 | |
| 763 | <property> |
| 764 | <name>hive.metastore.server.tcp.keepalive</name> |
| 765 | <value>true</value> |
| 766 | <description>Whether to enable TCP keepalive for the metastore server. |
| 767 | Keepalive will prevent accumulation of half-open connections. |
| 768 | </description> |
| 769 | </property> |
| 770 | |
| 771 | <property> |
| 772 | <name>hive.optimize.reducededuplication</name> |
| 773 | <value>true</value> |
| 774 | <description>Remove extra map-reduce jobs if the data is already |
| 775 | clustered by the same key which needs to be used again. This should |
| 776 | always be set to true. Since it is a new feature, it has been made |
| 777 | configurable. |
| 778 | </description> |
| 779 | </property> |
| 780 | |
| 781 | <property> |
| 782 | <name>hive.exec.dynamic.partition</name> |
| 783 | <value>false</value> |
| 784 | <description>Whether or not to allow dynamic partitions in DML/DDL. |
| 785 | </description> |
| 786 | </property> |
| 787 | |
| 788 | <property> |
| 789 | <name>hive.exec.dynamic.partition.mode</name> |
| 790 | <value>strict</value> |
| 791 | <description>In strict mode, the user must specify at least one static |
| 792 | partition in case the user accidentally overwrites all partitions. |
| 793 | </description> |
| 794 | </property> |
| 795 | |
| 796 | <property> |
| 797 | <name>hive.exec.max.dynamic.partitions</name> |
| 798 | <value>1000</value> |
| 799 | <description>Maximum number of dynamic partitions allowed to be |
| 800 | created in total. |
| 801 | </description> |
| 802 | </property> |
| 803 | |
| 804 | <property> |
| 805 | <name>hive.exec.max.dynamic.partitions.pernode</name> |
| 806 | <value>100</value> |
| 807 | <description>Maximum number of dynamic partitions allowed to be |
| 808 | created in each mapper/reducer node. |
| 809 | </description> |
| 810 | </property> |
| 811 | |
| 812 | <property> |
| 813 | <name>hive.default.partition.name</name> |
| 814 | <value>__HIVE_DEFAULT_PARTITION__</value> |
| 815 | <description>The default partition name in case the dynamic partition |
| 816 | column value is null/empty string or anyother values that cannot be |
| 817 | escaped. This value must not contain any special character used in |
| 818 | HDFS URI (e.g., ':', '%', '/' etc). The user has to be aware that the |
| 819 | dynamic partition value should not contain this value to avoid |
| 820 | confusions. |
| 821 | </description> |
| 822 | </property> |
| 823 | |
| 824 | <property> |
| 825 | <name>fs.har.impl</name> |
| 826 | <value>org.apache.hadoop.hive.shims.HiveHarFileSystem</value> |
| 827 | <description>The implementation for accessing Hadoop Archives. Note |
| 828 | that this won't be applicable to Hadoop vers less than 0.20 |
| 829 | </description> |
| 830 | </property> |
| 831 | |
| 832 | <property> |
| 833 | <name>hive.archive.enabled</name> |
| 834 | <value>false</value> |
| 835 | <description>Whether archiving operations are permitted</description> |
| 836 | </property> |
| 837 | |
| 838 | <property> |
| 839 | <name>hive.archive.har.parentdir.settable</name> |
| 840 | <value>false</value> |
| 841 | <description>In new Hadoop versions, the parent directory must be set |
| 842 | while |
| 843 | creating a HAR. Because this functionality is hard to detect |
| 844 | with just |
| 845 | version |
| 846 | numbers, this conf var needs to be set manually. |
| 847 | </description> |
| 848 | </property> |
| 849 | |
| 850 | <!-- HBase Storage Handler Parameters --> |
| 851 | |
| 852 | <property> |
| 853 | <name>hive.hbase.wal.enabled</name> |
| 854 | <value>true</value> |
| 855 | <description>Whether writes to HBase should be forced to the |
| 856 | write-ahead log. Disabling this improves HBase write performance at |
| 857 | the risk of lost writes in case of a crash. |
| 858 | </description> |
| 859 | </property> |
| 860 | |
| 861 | <property> |
| 862 | <name>hive.exec.drop.ignorenonexistent</name> |
| 863 | <value>true</value> |
| 864 | <description>drop table always works.</description> |
| 865 | </property> |
| 866 | |
| 867 | </configuration> |