blob: 49cdedf1aa5f9012d011bbfb4d42af88c3933d17 [file] [log] [blame]
buyingyicf48fb52012-11-02 00:31:31 +00001<?xml version="1.0"?>
ramangrover29810d3302013-08-01 11:16:42 -07002<!-- ! Copyright 2009-2013 by The Regents of the University of California
3 ! Licensed under the Apache License, Version 2.0 (the "License"); ! you may
4 not use this file except in compliance with the License. ! you may obtain
5 a copy of the License from ! ! http://www.apache.org/licenses/LICENSE-2.0
6 ! ! Unless required by applicable law or agreed to in writing, software !
7 distributed under the License is distributed on an "AS IS" BASIS, ! WITHOUT
8 WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ! See the
9 License for the specific language governing permissions and ! limitations
10 under the License. ! -->
buyingyicf48fb52012-11-02 00:31:31 +000011<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
12
13<configuration>
14
ramangrover29810d3302013-08-01 11:16:42 -070015 <!-- Hivesterix Execution Parameters -->
16 <property>
17 <name>hive.hyracks.connectorpolicy</name>
18 <value>PIPELINING</value>
19 </property>
20
21 <property>
22 <name>hive.hyracks.parrallelism</name>
23 <value>4</value>
24 </property>
25
26 <property>
27 <name>hive.algebricks.groupby.external</name>
28 <value>true</value>
29 </property>
30
31 <property>
32 <name>hive.algebricks.groupby.external.memory</name>
33 <value>3072</value>
34 </property>
35
36 <property>
37 <name>hive.algebricks.sort.memory</name>
38 <value>3072</value>
39 </property>
40
41 <property>
42 <name>hive.algebricks.framesize</name>
43 <value>768</value>
44 </property>
45
46 <property>
47 <name>hive.auto.convert.join</name>
48 <value>false</value>
49 </property>
50
51 <property>
52 <name>hive.auto.convert.join.noconditionaltask</name>
53 <value>false</value>
54 <description>Whether Hive enable the optimization about converting
55 common join into mapjoin based on the input file
56 size. If this paramater is on, and the sum of size for n-1 of the
57 tables/partitions for a n-way join is smaller than the
58 specified size, the join is directly converted to a mapjoin (there is no
59 conditional task).
60 </description>
61 </property>
62
63
buyingyicf48fb52012-11-02 00:31:31 +000064 <!-- Hive Configuration can either be stored in this file or in the hadoop
65 configuration files -->
66 <!-- that are implied by Hadoop setup variables. -->
67 <!-- Aside from Hadoop setup variables - this file is provided as a convenience
68 so that Hive -->
69 <!-- users do not have to edit hadoop configuration files (that may be managed
70 as a centralized -->
71 <!-- resource). -->
72
73 <!-- Hive Execution Parameters -->
74 <property>
75 <name>mapred.reduce.tasks</name>
76 <value>-1</value>
77 <description>The default number of reduce tasks per job. Typically set
78 to a prime close to the number of available hosts. Ignored when
79 mapred.job.tracker is "local". Hadoop set this to 1 by default,
80 whereas hive uses -1 as its default value.
81 By setting this property to
82 -1, Hive will automatically figure out what
83 should be the number of
84 reducers.
85 </description>
86 </property>
87
88 <property>
buyingyicf48fb52012-11-02 00:31:31 +000089 <name>hive.exec.reducers.bytes.per.reducer</name>
90 <value>1000000000</value>
91 <description>size per reducer.The default is 1G, i.e if the input size
92 is 10G, it will use 10 reducers.
93 </description>
94 </property>
95
96 <property>
97 <name>hive.exec.reducers.max</name>
98 <value>999</value>
99 <description>max number of reducers will be used. If the one
100 specified
101 in the configuration parameter mapred.reduce.tasks is
102 negative, hive
103 will use this one as the max number of reducers when
104 automatically
105 determine number of reducers.
106 </description>
107 </property>
108
109 <property>
110 <name>hive.exec.scratchdir</name>
111 <value>/tmp/hive-${user.name}</value>
112 <description>Scratch space for Hive jobs</description>
113 </property>
114
115 <property>
116 <name>hive.test.mode</name>
117 <value>false</value>
118 <description>whether hive is running in test mode. If yes, it turns on
119 sampling and prefixes the output tablename
120 </description>
121 </property>
122
123 <property>
124 <name>hive.test.mode.prefix</name>
125 <value>test_</value>
126 <description>if hive is running in test mode, prefixes the output
127 table by this string
128 </description>
129 </property>
130
131 <!-- If the input table is not bucketed, the denominator of the tablesample
132 is determinied by the parameter below -->
133 <!-- For example, the following query: -->
134 <!-- INSERT OVERWRITE TABLE dest -->
135 <!-- SELECT col1 from src -->
136 <!-- would be converted to -->
137 <!-- INSERT OVERWRITE TABLE test_dest -->
138 <!-- SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1)) -->
139 <property>
140 <name>hive.test.mode.samplefreq</name>
141 <value>32</value>
142 <description>if hive is running in test mode and table is not
143 bucketed, sampling frequency
144 </description>
145 </property>
146
147 <property>
148 <name>hive.test.mode.nosamplelist</name>
149 <value></value>
150 <description>if hive is running in test mode, dont sample the above
151 comma seperated list of tables
152 </description>
153 </property>
154
155 <property>
156 <name>hive.metastore.local</name>
157 <value>true</value>
158 <description>controls whether to connect to remove metastore server or
159 open a new metastore server in Hive Client JVM
160 </description>
161 </property>
162
163 <property>
164 <name>javax.jdo.option.ConnectionURL</name>
165 <value>jdbc:derby:;databaseName=metastore_db;create=true</value>
166 <description>JDBC connect string for a JDBC metastore</description>
167 </property>
168
169 <property>
170 <name>javax.jdo.option.ConnectionDriverName</name>
171 <value>org.apache.derby.jdbc.EmbeddedDriver</value>
172 <description>Driver class name for a JDBC metastore</description>
173 </property>
174
175 <property>
176 <name>javax.jdo.PersistenceManagerFactoryClass</name>
177 <value>org.datanucleus.jdo.JDOPersistenceManagerFactory</value>
178 <description>class implementing the jdo persistence</description>
179 </property>
180
181 <property>
182 <name>datanucleus.connectionPoolingType</name>
183 <value>DBCP</value>
184 <description>Uses a DBCP connection pool for JDBC metastore
185 </description>
186 </property>
187
188 <property>
189 <name>javax.jdo.option.DetachAllOnCommit</name>
190 <value>true</value>
191 <description>detaches all objects from session so that they can be
192 used after transaction is committed
193 </description>
194 </property>
195
196 <property>
197 <name>javax.jdo.option.NonTransactionalRead</name>
198 <value>true</value>
199 <description>reads outside of transactions</description>
200 </property>
201
202 <property>
203 <name>javax.jdo.option.ConnectionUserName</name>
204 <value>APP</value>
205 <description>username to use against metastore database</description>
206 </property>
207
208 <property>
209 <name>javax.jdo.option.ConnectionPassword</name>
210 <value>mine</value>
211 <description>password to use against metastore database</description>
212 </property>
213
214 <property>
215 <name>datanucleus.validateTables</name>
216 <value>false</value>
217 <description>validates existing schema against code. turn this on if
218 you want to verify existing schema
219 </description>
220 </property>
221
222 <property>
223 <name>datanucleus.validateColumns</name>
224 <value>false</value>
225 <description>validates existing schema against code. turn this on if
226 you want to verify existing schema
227 </description>
228 </property>
229
230 <property>
231 <name>datanucleus.validateConstraints</name>
232 <value>false</value>
233 <description>validates existing schema against code. turn this on if
234 you want to verify existing schema
235 </description>
236 </property>
237
238 <property>
239 <name>datanucleus.storeManagerType</name>
240 <value>rdbms</value>
241 <description>metadata store type</description>
242 </property>
243
244 <property>
245 <name>datanucleus.autoCreateSchema</name>
246 <value>true</value>
247 <description>creates necessary schema on a startup if one doesn't
248 exist. set this to false, after creating it once
249 </description>
250 </property>
251
252 <property>
253 <name>datanucleus.autoStartMechanismMode</name>
254 <value>checked</value>
255 <description>throw exception if metadata tables are incorrect
256 </description>
257 </property>
258
259 <property>
260 <name>datanucleus.transactionIsolation</name>
261 <value>read-committed</value>
262 <description>Default transaction isolation level for identity
263 generation.
264 </description>
265 </property>
266
267 <property>
268 <name>datanucleus.cache.level2</name>
269 <value>false</value>
270 <description>Use a level 2 cache. Turn this off if metadata is changed
271 independently of hive metastore server
272 </description>
273 </property>
274
275 <property>
276 <name>datanucleus.cache.level2.type</name>
277 <value>SOFT</value>
278 <description>SOFT=soft reference based cache, WEAK=weak reference
279 based cache.
280 </description>
281 </property>
282
283 <property>
284 <name>datanucleus.identifierFactory</name>
285 <value>datanucleus</value>
286 <description>Name of the identifier factory to use when generating
287 table/column names etc. 'datanucleus' is used for backward
288 compatibility
289 </description>
290 </property>
291
292 <property>
293 <name>hive.metastore.warehouse.dir</name>
294 <value>/tmp/hivesterix</value>
295 <description>location of default database for the warehouse
296 </description>
297 </property>
298
299 <property>
300 <name>hive.metastore.connect.retries</name>
301 <value>5</value>
302 <description>Number of retries while opening a connection to metastore
303 </description>
304 </property>
305
306 <property>
307 <name>hive.metastore.rawstore.impl</name>
308 <value>org.apache.hadoop.hive.metastore.ObjectStore</value>
309 <description>Name of the class that implements
310 org.apache.hadoop.hive.metastore.rawstore interface. This class is
311 used to store and retrieval of raw metadata objects such as table,
312 database
313 </description>
314 </property>
315
316 <property>
317 <name>hive.default.fileformat</name>
318 <value>TextFile</value>
319 <description>Default file format for CREATE TABLE statement. Options
320 are TextFile and SequenceFile. Users can explicitly say CREATE TABLE
321 ... STORED AS &lt;TEXTFILE|SEQUENCEFILE&gt; to override</description>
322 </property>
323
324 <property>
325 <name>hive.fileformat.check</name>
326 <value>true</value>
327 <description>Whether to check file format or not when loading data
328 files
329 </description>
330 </property>
331
332 <property>
333 <name>hive.map.aggr</name>
334 <value>true</value>
335 <description>Whether to use map-side aggregation in Hive Group By
336 queries
337 </description>
338 </property>
339
340 <property>
341 <name>hive.groupby.skewindata</name>
342 <value>false</value>
343 <description>Whether there is skew in data to optimize group by
344 queries
345 </description>
346 </property>
347
348 <property>
349 <name>hive.groupby.mapaggr.checkinterval</name>
350 <value>100000</value>
351 <description>Number of rows after which size of the grouping
352 keys/aggregation classes is performed
353 </description>
354 </property>
355
356 <property>
357 <name>hive.mapred.local.mem</name>
358 <value>0</value>
359 <description>For local mode, memory of the mappers/reducers
360 </description>
361 </property>
362
363 <property>
364 <name>hive.map.aggr.hash.percentmemory</name>
365 <value>0.5</value>
366 <description>Portion of total memory to be used by map-side grup
367 aggregation hash table
368 </description>
369 </property>
370
371 <property>
372 <name>hive.map.aggr.hash.min.reduction</name>
373 <value>0.5</value>
374 <description>Hash aggregation will be turned off if the ratio between
375 hash
376 table size and input rows is bigger than this number. Set to 1 to
377 make
378 sure
379 hash aggregation is never turned off.
380 </description>
381 </property>
382
383 <property>
384 <name>hive.optimize.cp</name>
385 <value>true</value>
386 <description>Whether to enable column pruner</description>
387 </property>
388
389 <property>
390 <name>hive.optimize.ppd</name>
391 <value>true</value>
392 <description>Whether to enable predicate pushdown</description>
393 </property>
394
395 <property>
396 <name>hive.optimize.pruner</name>
397 <value>true</value>
398 <description>Whether to enable the new partition pruner which depends
399 on predicate pushdown. If this is disabled,
400 the old partition pruner
401 which is based on AST will be enabled.
402 </description>
403 </property>
404
405 <property>
406 <name>hive.optimize.groupby</name>
407 <value>true</value>
408 <description>Whether to enable the bucketed group by from bucketed
409 partitions/tables.
410 </description>
411 </property>
412
413 <property>
414 <name>hive.join.emit.interval</name>
415 <value>1000</value>
416 <description>How many rows in the right-most join operand Hive should
417 buffer before emitting the join result.
418 </description>
419 </property>
420
421 <property>
422 <name>hive.join.cache.size</name>
423 <value>25000</value>
424 <description>How many rows in the joining tables (except the streaming
425 table) should be cached in memory.
426 </description>
427 </property>
428
429 <property>
430 <name>hive.mapjoin.bucket.cache.size</name>
431 <value>100</value>
432 <description>How many values in each keys in the map-joined table
433 should be cached in memory.
434 </description>
435 </property>
436
437 <property>
438 <name>hive.mapjoin.maxsize</name>
439 <value>100000</value>
440 <description>Maximum # of rows of the small table that can be handled
441 by map-side join. If the size is reached and hive.task.progress is
442 set, a fatal error counter is set and the job will be killed.
443 </description>
444 </property>
445
446 <property>
447 <name>hive.mapjoin.cache.numrows</name>
448 <value>25000</value>
449 <description>How many rows should be cached by jdbm for map join.
450 </description>
451 </property>
452
453 <property>
454 <name>hive.optimize.skewjoin</name>
455 <value>false</value>
456 <description>Whether to enable skew join optimization. </description>
457 </property>
458
459 <property>
460 <name>hive.skewjoin.key</name>
461 <value>100000</value>
462 <description>Determine if we get a skew key in join. If we see more
463 than the specified number of rows with the same key in join operator,
464 we think the key as a skew join key.
465 </description>
466 </property>
467
468 <property>
469 <name>hive.skewjoin.mapjoin.map.tasks</name>
470 <value>10000</value>
471 <description> Determine the number of map task used in the follow up
472 map join job
473 for a skew join. It should be used together with
474 hive.skewjoin.mapjoin.min.split
475 to perform a fine grained control.
476 </description>
477 </property>
478
479 <property>
480 <name>hive.skewjoin.mapjoin.min.split</name>
481 <value>33554432</value>
482 <description> Determine the number of map task at most used in the
483 follow up map join job
484 for a skew join by specifying the minimum split
485 size. It should be used
486 together with
487 hive.skewjoin.mapjoin.map.tasks
488 to perform a fine grained control.
489 </description>
490 </property>
491
492 <property>
493 <name>hive.mapred.mode</name>
494 <value>nonstrict</value>
495 <description>The mode in which the hive operations are being
496 performed. In strict mode, some risky queries are not allowed to run
497 </description>
498 </property>
499
500 <property>
501 <name>hive.exec.script.maxerrsize</name>
502 <value>100000</value>
503 <description>Maximum number of bytes a script is allowed to emit to
504 standard error (per map-reduce task). This prevents runaway scripts
505 from filling logs partitions to capacity
506 </description>
507 </property>
508
509 <property>
510 <name>hive.exec.script.allow.partial.consumption</name>
511 <value>false</value>
512 <description> When enabled, this option allows a user script to exit
513 successfully without consuming all the data from the standard input.
514 </description>
515 </property>
516
517 <property>
518 <name>hive.script.operator.id.env.var</name>
519 <value>HIVE_SCRIPT_OPERATOR_ID</value>
520 <description> Name of the environment variable that holds the unique
521 script operator ID in the user's transform function (the custom
522 mapper/reducer that the user has specified in the query)
523 </description>
524 </property>
525
526 <property>
527 <name>hive.exec.compress.output</name>
528 <value>false</value>
529 <description> This controls whether the final outputs of a query (to a
530 local/hdfs file or a hive table) is compressed. The compression codec
531 and other options are determined from hadoop config variables
532 mapred.output.compress*
533 </description>
534 </property>
535
536 <property>
537 <name>hive.exec.compress.intermediate</name>
538 <value>false</value>
539 <description> This controls whether intermediate files produced by
540 hive between multiple map-reduce jobs are compressed. The compression
541 codec and other options are determined from hadoop config variables
542 mapred.output.compress*
543 </description>
544 </property>
545
546 <property>
547 <name>hive.exec.parallel</name>
548 <value>false</value>
549 <description>Whether to execute jobs in parallel</description>
550 </property>
551
552 <property>
553 <name>hive.exec.parallel.thread.number</name>
554 <value>8</value>
555 <description>How many jobs at most can be executed in parallel
556 </description>
557 </property>
558
559 <property>
560 <name>hive.hwi.war.file</name>
561 <value>lib\hive-hwi-0.7.0.war</value>
562 <description>This sets the path to the HWI war file, relative to
563 ${HIVE_HOME}.
564 </description>
565 </property>
566
567 <property>
568 <name>hive.hwi.listen.host</name>
569 <value>0.0.0.0</value>
570 <description>This is the host address the Hive Web Interface will
571 listen on
572 </description>
573 </property>
574
575 <property>
576 <name>hive.hwi.listen.port</name>
577 <value>9999</value>
578 <description>This is the port the Hive Web Interface will listen on
579 </description>
580 </property>
581
582 <property>
583 <name>hive.exec.pre.hooks</name>
584 <value></value>
585 <description>Pre Execute Hook for Tests</description>
586 </property>
587
588 <property>
589 <name>hive.merge.mapfiles</name>
590 <value>true</value>
591 <description>Merge small files at the end of a map-only job
592 </description>
593 </property>
594
595 <property>
596 <name>hive.merge.mapredfiles</name>
597 <value>false</value>
598 <description>Merge small files at the end of a map-reduce job
599 </description>
600 </property>
601
602 <property>
603 <name>hive.heartbeat.interval</name>
604 <value>1000</value>
605 <description>Send a heartbeat after this interval - used by mapjoin
606 and filter operators
607 </description>
608 </property>
609
610 <property>
611 <name>hive.merge.size.per.task</name>
612 <value>256000000</value>
613 <description>Size of merged files at the end of the job</description>
614 </property>
615
616 <property>
617 <name>hive.merge.size.smallfiles.avgsize</name>
618 <value>16000000</value>
619 <description>When the average output file size of a job is less than
620 this number, Hive will start an additional map-reduce job to merge
621 the output files into bigger files. This is only done for map-only
622 jobs if hive.merge.mapfiles is true, and for map-reduce jobs if
623 hive.merge.mapredfiles is true.
624 </description>
625 </property>
626
627 <property>
628 <name>hive.script.auto.progress</name>
629 <value>false</value>
630 <description>Whether Hive Tranform/Map/Reduce Clause should
631 automatically send progress information to TaskTracker to avoid the
632 task getting killed because of inactivity. Hive sends progress
633 information when the script is outputting to stderr. This option
634 removes the need of periodically producing stderr messages, but users
635 should be cautious because this may prevent infinite loops in the
636 scripts to be killed by TaskTracker.
637 </description>
638 </property>
639
640 <property>
641 <name>hive.script.serde</name>
642 <value>org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe</value>
643 <description>The default serde for trasmitting input data to and
644 reading output data from the user scripts.
645 </description>
646 </property>
647
648 <property>
649 <name>hive.script.recordreader</name>
650 <value>org.apache.hadoop.hive.ql.exec.TextRecordReader</value>
651 <description>The default record reader for reading data from the user
652 scripts.
653 </description>
654 </property>
655
656 <property>
657 <name>hive.script.recordwriter</name>
658 <value>org.apache.hadoop.hive.ql.exec.TextRecordWriter</value>
659 <description>The default record writer for writing data to the user
660 scripts.
661 </description>
662 </property>
663
664 <property>
665 <name>hive.input.format</name>
666 <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
667 <description>The default input format, if it is not specified, the
668 system assigns it. It is set to HiveInputFormat for hadoop versions
669 17, 18 and 19, whereas it is set to CombinedHiveInputFormat for
670 hadoop 20. The user can always overwrite it - if there is a bug in
671 CombinedHiveInputFormat, it can always be manually set to
672 HiveInputFormat.
673 </description>
674 </property>
675
676 <property>
677 <name>hive.udtf.auto.progress</name>
678 <value>false</value>
679 <description>Whether Hive should automatically send progress
680 information to TaskTracker when using UDTF's to prevent the task
681 getting killed because of inactivity. Users should be cautious
682 because this may prevent TaskTracker from killing tasks with infinte
683 loops.
684 </description>
685 </property>
686
687 <property>
688 <name>hive.mapred.reduce.tasks.speculative.execution</name>
689 <value>true</value>
690 <description>Whether speculative execution for reducers should be
691 turned on.
692 </description>
693 </property>
694
695 <property>
696 <name>hive.exec.counters.pull.interval</name>
697 <value>1000</value>
698 <description>The interval with which to poll the JobTracker for the
699 counters the running job. The smaller it is the more load there will
700 be on the jobtracker, the higher it is the less granular the caught
701 will be.
702 </description>
703 </property>
704
705 <property>
706 <name>hive.enforce.bucketing</name>
707 <value>false</value>
708 <description>Whether bucketing is enforced. If true, while inserting
709 into the table, bucketing is enforced.
710 </description>
711 </property>
712
713 <property>
714 <name>hive.enforce.sorting</name>
715 <value>false</value>
716 <description>Whether sorting is enforced. If true, while inserting
717 into the table, sorting is enforced.
718 </description>
719 </property>
720
721 <property>
722 <name>hive.metastore.ds.connection.url.hook</name>
723 <value></value>
724 <description>Name of the hook to use for retriving the JDO connection
725 URL. If empty, the value in javax.jdo.option.ConnectionURL is used
726 </description>
727 </property>
728
729 <property>
730 <name>hive.metastore.ds.retry.attempts</name>
731 <value>1</value>
732 <description>The number of times to retry a metastore call if there
733 were a connection error
734 </description>
735 </property>
736
737 <property>
738 <name>hive.metastore.ds.retry.interval</name>
739 <value>1000</value>
740 <description>The number of miliseconds between metastore retry
741 attempts
742 </description>
743 </property>
744
745 <property>
746 <name>hive.metastore.server.min.threads</name>
747 <value>200</value>
748 <description>Minimum number of worker threads in the Thrift server's
749 pool.
750 </description>
751 </property>
752
753 <property>
754 <name>hive.metastore.server.max.threads</name>
755 <value>100000</value>
756 <description>Maximum number of worker threads in the Thrift server's
757 pool.
758 </description>
759 </property>
760
761 <property>
762 <name>hive.metastore.server.tcp.keepalive</name>
763 <value>true</value>
764 <description>Whether to enable TCP keepalive for the metastore server.
765 Keepalive will prevent accumulation of half-open connections.
766 </description>
767 </property>
768
769 <property>
770 <name>hive.optimize.reducededuplication</name>
771 <value>true</value>
772 <description>Remove extra map-reduce jobs if the data is already
773 clustered by the same key which needs to be used again. This should
774 always be set to true. Since it is a new feature, it has been made
775 configurable.
776 </description>
777 </property>
778
779 <property>
780 <name>hive.exec.dynamic.partition</name>
781 <value>false</value>
782 <description>Whether or not to allow dynamic partitions in DML/DDL.
783 </description>
784 </property>
785
786 <property>
787 <name>hive.exec.dynamic.partition.mode</name>
788 <value>strict</value>
789 <description>In strict mode, the user must specify at least one static
790 partition in case the user accidentally overwrites all partitions.
791 </description>
792 </property>
793
794 <property>
795 <name>hive.exec.max.dynamic.partitions</name>
796 <value>1000</value>
797 <description>Maximum number of dynamic partitions allowed to be
798 created in total.
799 </description>
800 </property>
801
802 <property>
803 <name>hive.exec.max.dynamic.partitions.pernode</name>
804 <value>100</value>
805 <description>Maximum number of dynamic partitions allowed to be
806 created in each mapper/reducer node.
807 </description>
808 </property>
809
810 <property>
811 <name>hive.default.partition.name</name>
812 <value>__HIVE_DEFAULT_PARTITION__</value>
813 <description>The default partition name in case the dynamic partition
814 column value is null/empty string or anyother values that cannot be
815 escaped. This value must not contain any special character used in
816 HDFS URI (e.g., ':', '%', '/' etc). The user has to be aware that the
817 dynamic partition value should not contain this value to avoid
818 confusions.
819 </description>
820 </property>
821
822 <property>
823 <name>fs.har.impl</name>
824 <value>org.apache.hadoop.hive.shims.HiveHarFileSystem</value>
825 <description>The implementation for accessing Hadoop Archives. Note
826 that this won't be applicable to Hadoop vers less than 0.20
827 </description>
828 </property>
829
830 <property>
831 <name>hive.archive.enabled</name>
832 <value>false</value>
833 <description>Whether archiving operations are permitted</description>
834 </property>
835
836 <property>
837 <name>hive.archive.har.parentdir.settable</name>
838 <value>false</value>
839 <description>In new Hadoop versions, the parent directory must be set
840 while
841 creating a HAR. Because this functionality is hard to detect
842 with just
843 version
844 numbers, this conf var needs to be set manually.
845 </description>
846 </property>
847
848 <!-- HBase Storage Handler Parameters -->
849
850 <property>
851 <name>hive.hbase.wal.enabled</name>
852 <value>true</value>
853 <description>Whether writes to HBase should be forced to the
854 write-ahead log. Disabling this improves HBase write performance at
855 the risk of lost writes in case of a crash.
856 </description>
857 </property>
858
859 <property>
860 <name>hive.exec.drop.ignorenonexistent</name>
861 <value>true</value>
862 <description>drop table always works.</description>
863 </property>
864
865</configuration>