blob: c726bfa75f9a5d1aea4aa1a39df67dda8634865d [file] [log] [blame]
buyingyicf48fb52012-11-02 00:31:31 +00001<?xml version="1.0"?>
Till Westmannccac7452013-06-05 18:56:27 -07002<!--
3 ! Copyright 2009-2013 by The Regents of the University of California
4 ! Licensed under the Apache License, Version 2.0 (the "License");
5 ! you may not use this file except in compliance with the License.
6 ! you may obtain a copy of the License from
7 !
8 ! http://www.apache.org/licenses/LICENSE-2.0
9 !
10 ! Unless required by applicable law or agreed to in writing, software
11 ! distributed under the License is distributed on an "AS IS" BASIS,
12 ! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 ! See the License for the specific language governing permissions and
14 ! limitations under the License.
15 !-->
buyingyicf48fb52012-11-02 00:31:31 +000016<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
17
18<configuration>
19
20 <!-- Hive Configuration can either be stored in this file or in the hadoop
21 configuration files -->
22 <!-- that are implied by Hadoop setup variables. -->
23 <!-- Aside from Hadoop setup variables - this file is provided as a convenience
24 so that Hive -->
25 <!-- users do not have to edit hadoop configuration files (that may be managed
26 as a centralized -->
27 <!-- resource). -->
28
29 <!-- Hive Execution Parameters -->
30 <property>
31 <name>mapred.reduce.tasks</name>
32 <value>-1</value>
33 <description>The default number of reduce tasks per job. Typically set
34 to a prime close to the number of available hosts. Ignored when
35 mapred.job.tracker is "local". Hadoop set this to 1 by default,
36 whereas hive uses -1 as its default value.
37 By setting this property to
38 -1, Hive will automatically figure out what
39 should be the number of
40 reducers.
41 </description>
42 </property>
43
44 <property>
buyingyiff52e132013-07-09 23:46:43 -070045 <name>hive.auto.convert.join.noconditionaltask</name>
46 <value>false</value>
47 <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
48 size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
49 specified size, the join is directly converted to a mapjoin (there is no conditional task).
50 </description>
51 </property>
52
53 <property>
54 <name>hive.auto.convert.join</name>
55 <value>false</value>
56 </property>
57
58 <property>
buyingyicf48fb52012-11-02 00:31:31 +000059 <name>hive.hyracks.connectorpolicy</name>
60 <value>SEND_SIDE_MAT_PIPELINING</value>
61 </property>
62
63 <property>
64 <name>hive.hyracks.host</name>
65 <value>127.0.0.1</value>
66 </property>
67
68 <property>
69 <name>hive.hyracks.port</name>
70 <value>13099</value>
71 </property>
72
73 <property>
74 <name>hive.hyracks.app</name>
75 <value>hivesterix</value>
76 </property>
77
78
79 <property>
80 <name>hive.hyracks.parrallelism</name>
81 <value>2</value>
82 </property>
83
84 <property>
85 <name>hive.algebricks.groupby.external</name>
buyingyibfc63552013-05-20 12:46:35 -070086 <value>true</value>
buyingyicf48fb52012-11-02 00:31:31 +000087 </property>
88
89 <property>
90 <name>hive.algebricks.groupby.external.memory</name>
91 <value>3072</value>
92 </property>
93
94 <property>
95 <name>hive.algebricks.sort.memory</name>
96 <value>3072</value>
97 </property>
98
99 <property>
100 <name>hive.algebricks.framesize</name>
101 <value>768</value>
102 </property>
103
104 <property>
105 <name>hive.exec.reducers.bytes.per.reducer</name>
106 <value>1000000000</value>
107 <description>size per reducer.The default is 1G, i.e if the input size
108 is 10G, it will use 10 reducers.
109 </description>
110 </property>
111
112 <property>
113 <name>hive.exec.reducers.max</name>
114 <value>999</value>
115 <description>max number of reducers will be used. If the one
116 specified
117 in the configuration parameter mapred.reduce.tasks is
118 negative, hive
119 will use this one as the max number of reducers when
120 automatically
121 determine number of reducers.
122 </description>
123 </property>
124
125 <property>
126 <name>hive.exec.scratchdir</name>
127 <value>/tmp/hive-${user.name}</value>
128 <description>Scratch space for Hive jobs</description>
129 </property>
130
131 <property>
132 <name>hive.test.mode</name>
133 <value>false</value>
134 <description>whether hive is running in test mode. If yes, it turns on
135 sampling and prefixes the output tablename
136 </description>
137 </property>
138
139 <property>
140 <name>hive.test.mode.prefix</name>
141 <value>test_</value>
142 <description>if hive is running in test mode, prefixes the output
143 table by this string
144 </description>
145 </property>
146
147 <!-- If the input table is not bucketed, the denominator of the tablesample
148 is determinied by the parameter below -->
149 <!-- For example, the following query: -->
150 <!-- INSERT OVERWRITE TABLE dest -->
151 <!-- SELECT col1 from src -->
152 <!-- would be converted to -->
153 <!-- INSERT OVERWRITE TABLE test_dest -->
154 <!-- SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1)) -->
155 <property>
156 <name>hive.test.mode.samplefreq</name>
157 <value>32</value>
158 <description>if hive is running in test mode and table is not
159 bucketed, sampling frequency
160 </description>
161 </property>
162
163 <property>
164 <name>hive.test.mode.nosamplelist</name>
165 <value></value>
166 <description>if hive is running in test mode, dont sample the above
167 comma seperated list of tables
168 </description>
169 </property>
170
171 <property>
172 <name>hive.metastore.local</name>
173 <value>true</value>
174 <description>controls whether to connect to remove metastore server or
175 open a new metastore server in Hive Client JVM
176 </description>
177 </property>
178
179 <property>
180 <name>javax.jdo.option.ConnectionURL</name>
181 <value>jdbc:derby:;databaseName=metastore_db;create=true</value>
182 <description>JDBC connect string for a JDBC metastore</description>
183 </property>
184
185 <property>
186 <name>javax.jdo.option.ConnectionDriverName</name>
187 <value>org.apache.derby.jdbc.EmbeddedDriver</value>
188 <description>Driver class name for a JDBC metastore</description>
189 </property>
190
191 <property>
192 <name>javax.jdo.PersistenceManagerFactoryClass</name>
193 <value>org.datanucleus.jdo.JDOPersistenceManagerFactory</value>
194 <description>class implementing the jdo persistence</description>
195 </property>
196
197 <property>
198 <name>datanucleus.connectionPoolingType</name>
199 <value>DBCP</value>
200 <description>Uses a DBCP connection pool for JDBC metastore
201 </description>
202 </property>
203
204 <property>
205 <name>javax.jdo.option.DetachAllOnCommit</name>
206 <value>true</value>
207 <description>detaches all objects from session so that they can be
208 used after transaction is committed
209 </description>
210 </property>
211
212 <property>
213 <name>javax.jdo.option.NonTransactionalRead</name>
214 <value>true</value>
215 <description>reads outside of transactions</description>
216 </property>
217
218 <property>
219 <name>javax.jdo.option.ConnectionUserName</name>
220 <value>APP</value>
221 <description>username to use against metastore database</description>
222 </property>
223
224 <property>
225 <name>javax.jdo.option.ConnectionPassword</name>
226 <value>mine</value>
227 <description>password to use against metastore database</description>
228 </property>
229
230 <property>
231 <name>datanucleus.validateTables</name>
232 <value>false</value>
233 <description>validates existing schema against code. turn this on if
234 you want to verify existing schema
235 </description>
236 </property>
237
238 <property>
239 <name>datanucleus.validateColumns</name>
240 <value>false</value>
241 <description>validates existing schema against code. turn this on if
242 you want to verify existing schema
243 </description>
244 </property>
245
246 <property>
247 <name>datanucleus.validateConstraints</name>
248 <value>false</value>
249 <description>validates existing schema against code. turn this on if
250 you want to verify existing schema
251 </description>
252 </property>
253
254 <property>
255 <name>datanucleus.storeManagerType</name>
256 <value>rdbms</value>
257 <description>metadata store type</description>
258 </property>
259
260 <property>
261 <name>datanucleus.autoCreateSchema</name>
262 <value>true</value>
263 <description>creates necessary schema on a startup if one doesn't
264 exist. set this to false, after creating it once
265 </description>
266 </property>
267
268 <property>
269 <name>datanucleus.autoStartMechanismMode</name>
270 <value>checked</value>
271 <description>throw exception if metadata tables are incorrect
272 </description>
273 </property>
274
275 <property>
276 <name>datanucleus.transactionIsolation</name>
277 <value>read-committed</value>
278 <description>Default transaction isolation level for identity
279 generation.
280 </description>
281 </property>
282
283 <property>
284 <name>datanucleus.cache.level2</name>
285 <value>false</value>
286 <description>Use a level 2 cache. Turn this off if metadata is changed
287 independently of hive metastore server
288 </description>
289 </property>
290
291 <property>
292 <name>datanucleus.cache.level2.type</name>
293 <value>SOFT</value>
294 <description>SOFT=soft reference based cache, WEAK=weak reference
295 based cache.
296 </description>
297 </property>
298
299 <property>
300 <name>datanucleus.identifierFactory</name>
301 <value>datanucleus</value>
302 <description>Name of the identifier factory to use when generating
303 table/column names etc. 'datanucleus' is used for backward
304 compatibility
305 </description>
306 </property>
307
308 <property>
309 <name>hive.metastore.warehouse.dir</name>
310 <value>/tmp/hivesterix</value>
311 <description>location of default database for the warehouse
312 </description>
313 </property>
314
315 <property>
316 <name>hive.metastore.connect.retries</name>
317 <value>5</value>
318 <description>Number of retries while opening a connection to metastore
319 </description>
320 </property>
321
322 <property>
323 <name>hive.metastore.rawstore.impl</name>
324 <value>org.apache.hadoop.hive.metastore.ObjectStore</value>
325 <description>Name of the class that implements
326 org.apache.hadoop.hive.metastore.rawstore interface. This class is
327 used to store and retrieval of raw metadata objects such as table,
328 database
329 </description>
330 </property>
331
332 <property>
333 <name>hive.default.fileformat</name>
334 <value>TextFile</value>
335 <description>Default file format for CREATE TABLE statement. Options
336 are TextFile and SequenceFile. Users can explicitly say CREATE TABLE
337 ... STORED AS &lt;TEXTFILE|SEQUENCEFILE&gt; to override</description>
338 </property>
339
340 <property>
341 <name>hive.fileformat.check</name>
342 <value>true</value>
343 <description>Whether to check file format or not when loading data
344 files
345 </description>
346 </property>
347
348 <property>
349 <name>hive.map.aggr</name>
350 <value>true</value>
351 <description>Whether to use map-side aggregation in Hive Group By
352 queries
353 </description>
354 </property>
355
356 <property>
357 <name>hive.groupby.skewindata</name>
358 <value>false</value>
359 <description>Whether there is skew in data to optimize group by
360 queries
361 </description>
362 </property>
363
364 <property>
365 <name>hive.groupby.mapaggr.checkinterval</name>
366 <value>100000</value>
367 <description>Number of rows after which size of the grouping
368 keys/aggregation classes is performed
369 </description>
370 </property>
371
372 <property>
373 <name>hive.mapred.local.mem</name>
374 <value>0</value>
375 <description>For local mode, memory of the mappers/reducers
376 </description>
377 </property>
378
379 <property>
380 <name>hive.map.aggr.hash.percentmemory</name>
381 <value>0.5</value>
382 <description>Portion of total memory to be used by map-side grup
383 aggregation hash table
384 </description>
385 </property>
386
387 <property>
388 <name>hive.map.aggr.hash.min.reduction</name>
389 <value>0.5</value>
390 <description>Hash aggregation will be turned off if the ratio between
391 hash
392 table size and input rows is bigger than this number. Set to 1 to
393 make
394 sure
395 hash aggregation is never turned off.
396 </description>
397 </property>
398
399 <property>
400 <name>hive.optimize.cp</name>
401 <value>true</value>
402 <description>Whether to enable column pruner</description>
403 </property>
404
405 <property>
406 <name>hive.optimize.ppd</name>
407 <value>true</value>
408 <description>Whether to enable predicate pushdown</description>
409 </property>
410
411 <property>
412 <name>hive.optimize.pruner</name>
413 <value>true</value>
414 <description>Whether to enable the new partition pruner which depends
415 on predicate pushdown. If this is disabled,
416 the old partition pruner
417 which is based on AST will be enabled.
418 </description>
419 </property>
420
421 <property>
422 <name>hive.optimize.groupby</name>
423 <value>true</value>
424 <description>Whether to enable the bucketed group by from bucketed
425 partitions/tables.
426 </description>
427 </property>
428
429 <property>
430 <name>hive.join.emit.interval</name>
431 <value>1000</value>
432 <description>How many rows in the right-most join operand Hive should
433 buffer before emitting the join result.
434 </description>
435 </property>
436
437 <property>
438 <name>hive.join.cache.size</name>
439 <value>25000</value>
440 <description>How many rows in the joining tables (except the streaming
441 table) should be cached in memory.
442 </description>
443 </property>
444
445 <property>
446 <name>hive.mapjoin.bucket.cache.size</name>
447 <value>100</value>
448 <description>How many values in each keys in the map-joined table
449 should be cached in memory.
450 </description>
451 </property>
452
453 <property>
454 <name>hive.mapjoin.maxsize</name>
455 <value>100000</value>
456 <description>Maximum # of rows of the small table that can be handled
457 by map-side join. If the size is reached and hive.task.progress is
458 set, a fatal error counter is set and the job will be killed.
459 </description>
460 </property>
461
462 <property>
463 <name>hive.mapjoin.cache.numrows</name>
464 <value>25000</value>
465 <description>How many rows should be cached by jdbm for map join.
466 </description>
467 </property>
468
469 <property>
470 <name>hive.optimize.skewjoin</name>
471 <value>false</value>
472 <description>Whether to enable skew join optimization. </description>
473 </property>
474
475 <property>
476 <name>hive.skewjoin.key</name>
477 <value>100000</value>
478 <description>Determine if we get a skew key in join. If we see more
479 than the specified number of rows with the same key in join operator,
480 we think the key as a skew join key.
481 </description>
482 </property>
483
484 <property>
485 <name>hive.skewjoin.mapjoin.map.tasks</name>
486 <value>10000</value>
487 <description> Determine the number of map task used in the follow up
488 map join job
489 for a skew join. It should be used together with
490 hive.skewjoin.mapjoin.min.split
491 to perform a fine grained control.
492 </description>
493 </property>
494
495 <property>
496 <name>hive.skewjoin.mapjoin.min.split</name>
497 <value>33554432</value>
498 <description> Determine the number of map task at most used in the
499 follow up map join job
500 for a skew join by specifying the minimum split
501 size. It should be used
502 together with
503 hive.skewjoin.mapjoin.map.tasks
504 to perform a fine grained control.
505 </description>
506 </property>
507
508 <property>
509 <name>hive.mapred.mode</name>
510 <value>nonstrict</value>
511 <description>The mode in which the hive operations are being
512 performed. In strict mode, some risky queries are not allowed to run
513 </description>
514 </property>
515
516 <property>
517 <name>hive.exec.script.maxerrsize</name>
518 <value>100000</value>
519 <description>Maximum number of bytes a script is allowed to emit to
520 standard error (per map-reduce task). This prevents runaway scripts
521 from filling logs partitions to capacity
522 </description>
523 </property>
524
525 <property>
526 <name>hive.exec.script.allow.partial.consumption</name>
527 <value>false</value>
528 <description> When enabled, this option allows a user script to exit
529 successfully without consuming all the data from the standard input.
530 </description>
531 </property>
532
533 <property>
534 <name>hive.script.operator.id.env.var</name>
535 <value>HIVE_SCRIPT_OPERATOR_ID</value>
536 <description> Name of the environment variable that holds the unique
537 script operator ID in the user's transform function (the custom
538 mapper/reducer that the user has specified in the query)
539 </description>
540 </property>
541
542 <property>
543 <name>hive.exec.compress.output</name>
544 <value>false</value>
545 <description> This controls whether the final outputs of a query (to a
546 local/hdfs file or a hive table) is compressed. The compression codec
547 and other options are determined from hadoop config variables
548 mapred.output.compress*
549 </description>
550 </property>
551
552 <property>
553 <name>hive.exec.compress.intermediate</name>
554 <value>false</value>
555 <description> This controls whether intermediate files produced by
556 hive between multiple map-reduce jobs are compressed. The compression
557 codec and other options are determined from hadoop config variables
558 mapred.output.compress*
559 </description>
560 </property>
561
562 <property>
563 <name>hive.exec.parallel</name>
564 <value>false</value>
565 <description>Whether to execute jobs in parallel</description>
566 </property>
567
568 <property>
569 <name>hive.exec.parallel.thread.number</name>
570 <value>8</value>
571 <description>How many jobs at most can be executed in parallel
572 </description>
573 </property>
574
575 <property>
576 <name>hive.hwi.war.file</name>
577 <value>lib\hive-hwi-0.7.0.war</value>
578 <description>This sets the path to the HWI war file, relative to
579 ${HIVE_HOME}.
580 </description>
581 </property>
582
583 <property>
584 <name>hive.hwi.listen.host</name>
585 <value>0.0.0.0</value>
586 <description>This is the host address the Hive Web Interface will
587 listen on
588 </description>
589 </property>
590
591 <property>
592 <name>hive.hwi.listen.port</name>
593 <value>9999</value>
594 <description>This is the port the Hive Web Interface will listen on
595 </description>
596 </property>
597
598 <property>
599 <name>hive.exec.pre.hooks</name>
600 <value></value>
601 <description>Pre Execute Hook for Tests</description>
602 </property>
603
604 <property>
605 <name>hive.merge.mapfiles</name>
606 <value>true</value>
607 <description>Merge small files at the end of a map-only job
608 </description>
609 </property>
610
611 <property>
612 <name>hive.merge.mapredfiles</name>
613 <value>false</value>
614 <description>Merge small files at the end of a map-reduce job
615 </description>
616 </property>
617
618 <property>
619 <name>hive.heartbeat.interval</name>
620 <value>1000</value>
621 <description>Send a heartbeat after this interval - used by mapjoin
622 and filter operators
623 </description>
624 </property>
625
626 <property>
627 <name>hive.merge.size.per.task</name>
628 <value>256000000</value>
629 <description>Size of merged files at the end of the job</description>
630 </property>
631
632 <property>
633 <name>hive.merge.size.smallfiles.avgsize</name>
634 <value>16000000</value>
635 <description>When the average output file size of a job is less than
636 this number, Hive will start an additional map-reduce job to merge
637 the output files into bigger files. This is only done for map-only
638 jobs if hive.merge.mapfiles is true, and for map-reduce jobs if
639 hive.merge.mapredfiles is true.
640 </description>
641 </property>
642
643 <property>
644 <name>hive.script.auto.progress</name>
645 <value>false</value>
646 <description>Whether Hive Tranform/Map/Reduce Clause should
647 automatically send progress information to TaskTracker to avoid the
648 task getting killed because of inactivity. Hive sends progress
649 information when the script is outputting to stderr. This option
650 removes the need of periodically producing stderr messages, but users
651 should be cautious because this may prevent infinite loops in the
652 scripts to be killed by TaskTracker.
653 </description>
654 </property>
655
656 <property>
657 <name>hive.script.serde</name>
658 <value>org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe</value>
659 <description>The default serde for trasmitting input data to and
660 reading output data from the user scripts.
661 </description>
662 </property>
663
664 <property>
665 <name>hive.script.recordreader</name>
666 <value>org.apache.hadoop.hive.ql.exec.TextRecordReader</value>
667 <description>The default record reader for reading data from the user
668 scripts.
669 </description>
670 </property>
671
672 <property>
673 <name>hive.script.recordwriter</name>
674 <value>org.apache.hadoop.hive.ql.exec.TextRecordWriter</value>
675 <description>The default record writer for writing data to the user
676 scripts.
677 </description>
678 </property>
679
680 <property>
681 <name>hive.input.format</name>
682 <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
683 <description>The default input format, if it is not specified, the
684 system assigns it. It is set to HiveInputFormat for hadoop versions
685 17, 18 and 19, whereas it is set to CombinedHiveInputFormat for
686 hadoop 20. The user can always overwrite it - if there is a bug in
687 CombinedHiveInputFormat, it can always be manually set to
688 HiveInputFormat.
689 </description>
690 </property>
691
692 <property>
693 <name>hive.udtf.auto.progress</name>
694 <value>false</value>
695 <description>Whether Hive should automatically send progress
696 information to TaskTracker when using UDTF's to prevent the task
697 getting killed because of inactivity. Users should be cautious
698 because this may prevent TaskTracker from killing tasks with infinte
699 loops.
700 </description>
701 </property>
702
703 <property>
704 <name>hive.mapred.reduce.tasks.speculative.execution</name>
705 <value>true</value>
706 <description>Whether speculative execution for reducers should be
707 turned on.
708 </description>
709 </property>
710
711 <property>
712 <name>hive.exec.counters.pull.interval</name>
713 <value>1000</value>
714 <description>The interval with which to poll the JobTracker for the
715 counters the running job. The smaller it is the more load there will
716 be on the jobtracker, the higher it is the less granular the caught
717 will be.
718 </description>
719 </property>
720
721 <property>
722 <name>hive.enforce.bucketing</name>
723 <value>false</value>
724 <description>Whether bucketing is enforced. If true, while inserting
725 into the table, bucketing is enforced.
726 </description>
727 </property>
728
729 <property>
730 <name>hive.enforce.sorting</name>
731 <value>false</value>
732 <description>Whether sorting is enforced. If true, while inserting
733 into the table, sorting is enforced.
734 </description>
735 </property>
736
737 <property>
738 <name>hive.metastore.ds.connection.url.hook</name>
739 <value></value>
740 <description>Name of the hook to use for retriving the JDO connection
741 URL. If empty, the value in javax.jdo.option.ConnectionURL is used
742 </description>
743 </property>
744
745 <property>
746 <name>hive.metastore.ds.retry.attempts</name>
747 <value>1</value>
748 <description>The number of times to retry a metastore call if there
749 were a connection error
750 </description>
751 </property>
752
753 <property>
754 <name>hive.metastore.ds.retry.interval</name>
755 <value>1000</value>
756 <description>The number of miliseconds between metastore retry
757 attempts
758 </description>
759 </property>
760
761 <property>
762 <name>hive.metastore.server.min.threads</name>
763 <value>200</value>
764 <description>Minimum number of worker threads in the Thrift server's
765 pool.
766 </description>
767 </property>
768
769 <property>
770 <name>hive.metastore.server.max.threads</name>
771 <value>100000</value>
772 <description>Maximum number of worker threads in the Thrift server's
773 pool.
774 </description>
775 </property>
776
777 <property>
778 <name>hive.metastore.server.tcp.keepalive</name>
779 <value>true</value>
780 <description>Whether to enable TCP keepalive for the metastore server.
781 Keepalive will prevent accumulation of half-open connections.
782 </description>
783 </property>
784
785 <property>
786 <name>hive.optimize.reducededuplication</name>
787 <value>true</value>
788 <description>Remove extra map-reduce jobs if the data is already
789 clustered by the same key which needs to be used again. This should
790 always be set to true. Since it is a new feature, it has been made
791 configurable.
792 </description>
793 </property>
794
795 <property>
796 <name>hive.exec.dynamic.partition</name>
797 <value>false</value>
798 <description>Whether or not to allow dynamic partitions in DML/DDL.
799 </description>
800 </property>
801
802 <property>
803 <name>hive.exec.dynamic.partition.mode</name>
804 <value>strict</value>
805 <description>In strict mode, the user must specify at least one static
806 partition in case the user accidentally overwrites all partitions.
807 </description>
808 </property>
809
810 <property>
811 <name>hive.exec.max.dynamic.partitions</name>
812 <value>1000</value>
813 <description>Maximum number of dynamic partitions allowed to be
814 created in total.
815 </description>
816 </property>
817
818 <property>
819 <name>hive.exec.max.dynamic.partitions.pernode</name>
820 <value>100</value>
821 <description>Maximum number of dynamic partitions allowed to be
822 created in each mapper/reducer node.
823 </description>
824 </property>
825
826 <property>
827 <name>hive.default.partition.name</name>
828 <value>__HIVE_DEFAULT_PARTITION__</value>
829 <description>The default partition name in case the dynamic partition
830 column value is null/empty string or anyother values that cannot be
831 escaped. This value must not contain any special character used in
832 HDFS URI (e.g., ':', '%', '/' etc). The user has to be aware that the
833 dynamic partition value should not contain this value to avoid
834 confusions.
835 </description>
836 </property>
837
838 <property>
839 <name>fs.har.impl</name>
840 <value>org.apache.hadoop.hive.shims.HiveHarFileSystem</value>
841 <description>The implementation for accessing Hadoop Archives. Note
842 that this won't be applicable to Hadoop vers less than 0.20
843 </description>
844 </property>
845
846 <property>
847 <name>hive.archive.enabled</name>
848 <value>false</value>
849 <description>Whether archiving operations are permitted</description>
850 </property>
851
852 <property>
853 <name>hive.archive.har.parentdir.settable</name>
854 <value>false</value>
855 <description>In new Hadoop versions, the parent directory must be set
856 while
857 creating a HAR. Because this functionality is hard to detect
858 with just
859 version
860 numbers, this conf var needs to be set manually.
861 </description>
862 </property>
863
864 <!-- HBase Storage Handler Parameters -->
865
866 <property>
867 <name>hive.hbase.wal.enabled</name>
868 <value>true</value>
869 <description>Whether writes to HBase should be forced to the
870 write-ahead log. Disabling this improves HBase write performance at
871 the risk of lost writes in case of a crash.
872 </description>
873 </property>
874
875 <property>
876 <name>hive.exec.drop.ignorenonexistent</name>
877 <value>true</value>
878 <description>drop table always works.</description>
879 </property>
880
881</configuration>