blob: 8e76de3c2edc79528cb6d6deba2b1634683fd0b2 [file] [log] [blame]
buyingyicf48fb52012-11-02 00:31:31 +00001<?xml version="1.0"?>
Till Westmannccac7452013-06-05 18:56:27 -07002<!--
3 ! Copyright 2009-2013 by The Regents of the University of California
4 ! Licensed under the Apache License, Version 2.0 (the "License");
5 ! you may not use this file except in compliance with the License.
6 ! you may obtain a copy of the License from
7 !
8 ! http://www.apache.org/licenses/LICENSE-2.0
9 !
10 ! Unless required by applicable law or agreed to in writing, software
11 ! distributed under the License is distributed on an "AS IS" BASIS,
12 ! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 ! See the License for the specific language governing permissions and
14 ! limitations under the License.
15 !-->
buyingyicf48fb52012-11-02 00:31:31 +000016<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
17
18<configuration>
19
20 <!-- Hive Configuration can either be stored in this file or in the hadoop
21 configuration files -->
22 <!-- that are implied by Hadoop setup variables. -->
23 <!-- Aside from Hadoop setup variables - this file is provided as a convenience
24 so that Hive -->
25 <!-- users do not have to edit hadoop configuration files (that may be managed
26 as a centralized -->
27 <!-- resource). -->
28
29 <!-- Hive Execution Parameters -->
30 <property>
31 <name>mapred.reduce.tasks</name>
32 <value>-1</value>
33 <description>The default number of reduce tasks per job. Typically set
34 to a prime close to the number of available hosts. Ignored when
35 mapred.job.tracker is "local". Hadoop set this to 1 by default,
36 whereas hive uses -1 as its default value.
37 By setting this property to
38 -1, Hive will automatically figure out what
39 should be the number of
40 reducers.
41 </description>
42 </property>
43
44 <property>
45 <name>hive.hyracks.host</name>
46 <value>127.0.0.1</value>
47 </property>
48
49 <property>
50 <name>hive.hyracks.port</name>
51 <value>13099</value>
52 </property>
53
54 <property>
55 <name>hive.hyracks.app</name>
56 <value>hivesterix</value>
57 </property>
58
59
60 <property>
61 <name>hive.hyracks.parrallelism</name>
62 <value>2</value>
63 </property>
64
65 <property>
66 <name>hive.algebricks.groupby.external</name>
67 <value>true</value>
68 </property>
69
70 <property>
71 <name>hive.algebricks.groupby.external.memory</name>
72 <value>3072</value>
73 </property>
74
75 <property>
76 <name>hive.algebricks.sort.memory</name>
77 <value>3072</value>
78 </property>
79
80 <property>
81 <name>hive.algebricks.framesize</name>
82 <value>768</value>
83 </property>
84
85 <property>
86 <name>hive.exec.reducers.bytes.per.reducer</name>
87 <value>1000000000</value>
88 <description>size per reducer.The default is 1G, i.e if the input size
89 is 10G, it will use 10 reducers.</description>
90 </property>
91
92 <property>
93 <name>hive.exec.reducers.max</name>
94 <value>999</value>
95 <description>max number of reducers will be used. If the one
96 specified
97 in the configuration parameter mapred.reduce.tasks is
98 negative, hive
99 will use this one as the max number of reducers when
100 automatically
101 determine number of reducers.</description>
102 </property>
103
104 <property>
105 <name>hive.exec.scratchdir</name>
106 <value>/tmp/hive-${user.name}</value>
107 <description>Scratch space for Hive jobs</description>
108 </property>
109
110 <property>
111 <name>hive.test.mode</name>
112 <value>false</value>
113 <description>whether hive is running in test mode. If yes, it turns on
114 sampling and prefixes the output tablename</description>
115 </property>
116
117 <property>
118 <name>hive.test.mode.prefix</name>
119 <value>test_</value>
120 <description>if hive is running in test mode, prefixes the output
121 table by this string</description>
122 </property>
123
124 <!-- If the input table is not bucketed, the denominator of the tablesample
125 is determinied by the parameter below -->
126 <!-- For example, the following query: -->
127 <!-- INSERT OVERWRITE TABLE dest -->
128 <!-- SELECT col1 from src -->
129 <!-- would be converted to -->
130 <!-- INSERT OVERWRITE TABLE test_dest -->
131 <!-- SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1)) -->
132 <property>
133 <name>hive.test.mode.samplefreq</name>
134 <value>32</value>
135 <description>if hive is running in test mode and table is not
136 bucketed, sampling frequency</description>
137 </property>
138
139 <property>
140 <name>hive.test.mode.nosamplelist</name>
141 <value></value>
142 <description>if hive is running in test mode, dont sample the above
143 comma seperated list of tables</description>
144 </property>
145
146 <property>
147 <name>hive.metastore.local</name>
148 <value>true</value>
149 <description>controls whether to connect to remove metastore server or
150 open a new metastore server in Hive Client JVM</description>
151 </property>
152
153 <property>
154 <name>javax.jdo.option.ConnectionURL</name>
155 <value>jdbc:derby:;databaseName=metastore_db;create=true</value>
156 <description>JDBC connect string for a JDBC metastore</description>
157 </property>
158
159 <property>
160 <name>javax.jdo.option.ConnectionDriverName</name>
161 <value>org.apache.derby.jdbc.EmbeddedDriver</value>
162 <description>Driver class name for a JDBC metastore</description>
163 </property>
164
165 <property>
166 <name>javax.jdo.PersistenceManagerFactoryClass</name>
167 <value>org.datanucleus.jdo.JDOPersistenceManagerFactory</value>
168 <description>class implementing the jdo persistence</description>
169 </property>
170
171 <property>
172 <name>datanucleus.connectionPoolingType</name>
173 <value>DBCP</value>
174 <description>Uses a DBCP connection pool for JDBC metastore
175 </description>
176 </property>
177
178 <property>
179 <name>javax.jdo.option.DetachAllOnCommit</name>
180 <value>true</value>
181 <description>detaches all objects from session so that they can be
182 used after transaction is committed</description>
183 </property>
184
185 <property>
186 <name>javax.jdo.option.NonTransactionalRead</name>
187 <value>true</value>
188 <description>reads outside of transactions</description>
189 </property>
190
191 <property>
192 <name>javax.jdo.option.ConnectionUserName</name>
193 <value>APP</value>
194 <description>username to use against metastore database</description>
195 </property>
196
197 <property>
198 <name>javax.jdo.option.ConnectionPassword</name>
199 <value>mine</value>
200 <description>password to use against metastore database</description>
201 </property>
202
203 <property>
204 <name>datanucleus.validateTables</name>
205 <value>false</value>
206 <description>validates existing schema against code. turn this on if
207 you want to verify existing schema </description>
208 </property>
209
210 <property>
211 <name>datanucleus.validateColumns</name>
212 <value>false</value>
213 <description>validates existing schema against code. turn this on if
214 you want to verify existing schema </description>
215 </property>
216
217 <property>
218 <name>datanucleus.validateConstraints</name>
219 <value>false</value>
220 <description>validates existing schema against code. turn this on if
221 you want to verify existing schema </description>
222 </property>
223
224 <property>
225 <name>datanucleus.storeManagerType</name>
226 <value>rdbms</value>
227 <description>metadata store type</description>
228 </property>
229
230 <property>
231 <name>datanucleus.autoCreateSchema</name>
232 <value>true</value>
233 <description>creates necessary schema on a startup if one doesn't
234 exist. set this to false, after creating it once</description>
235 </property>
236
237 <property>
238 <name>datanucleus.autoStartMechanismMode</name>
239 <value>checked</value>
240 <description>throw exception if metadata tables are incorrect
241 </description>
242 </property>
243
244 <property>
245 <name>datanucleus.transactionIsolation</name>
246 <value>read-committed</value>
247 <description>Default transaction isolation level for identity
248 generation. </description>
249 </property>
250
251 <property>
252 <name>datanucleus.cache.level2</name>
253 <value>false</value>
254 <description>Use a level 2 cache. Turn this off if metadata is changed
255 independently of hive metastore server</description>
256 </property>
257
258 <property>
259 <name>datanucleus.cache.level2.type</name>
260 <value>SOFT</value>
261 <description>SOFT=soft reference based cache, WEAK=weak reference
262 based cache.</description>
263 </property>
264
265 <property>
266 <name>datanucleus.identifierFactory</name>
267 <value>datanucleus</value>
268 <description>Name of the identifier factory to use when generating
269 table/column names etc. 'datanucleus' is used for backward
270 compatibility</description>
271 </property>
272
273 <property>
274 <name>hive.metastore.warehouse.dir</name>
275 <value>/tmp/hivesterix</value>
276 <description>location of default database for the warehouse
277 </description>
278 </property>
279
280 <property>
281 <name>hive.metastore.connect.retries</name>
282 <value>5</value>
283 <description>Number of retries while opening a connection to metastore
284 </description>
285 </property>
286
287 <property>
288 <name>hive.metastore.rawstore.impl</name>
289 <value>org.apache.hadoop.hive.metastore.ObjectStore</value>
290 <description>Name of the class that implements
291 org.apache.hadoop.hive.metastore.rawstore interface. This class is
292 used to store and retrieval of raw metadata objects such as table,
293 database</description>
294 </property>
295
296 <property>
297 <name>hive.default.fileformat</name>
298 <value>TextFile</value>
299 <description>Default file format for CREATE TABLE statement. Options
300 are TextFile and SequenceFile. Users can explicitly say CREATE TABLE
301 ... STORED AS &lt;TEXTFILE|SEQUENCEFILE&gt; to override</description>
302 </property>
303
304 <property>
305 <name>hive.fileformat.check</name>
306 <value>true</value>
307 <description>Whether to check file format or not when loading data
308 files</description>
309 </property>
310
311 <property>
312 <name>hive.map.aggr</name>
313 <value>true</value>
314 <description>Whether to use map-side aggregation in Hive Group By
315 queries</description>
316 </property>
317
318 <property>
319 <name>hive.groupby.skewindata</name>
320 <value>false</value>
321 <description>Whether there is skew in data to optimize group by
322 queries</description>
323 </property>
324
325 <property>
326 <name>hive.groupby.mapaggr.checkinterval</name>
327 <value>100000</value>
328 <description>Number of rows after which size of the grouping
329 keys/aggregation classes is performed</description>
330 </property>
331
332 <property>
333 <name>hive.mapred.local.mem</name>
334 <value>0</value>
335 <description>For local mode, memory of the mappers/reducers
336 </description>
337 </property>
338
339 <property>
340 <name>hive.map.aggr.hash.percentmemory</name>
341 <value>0.5</value>
342 <description>Portion of total memory to be used by map-side grup
343 aggregation hash table</description>
344 </property>
345
346 <property>
347 <name>hive.map.aggr.hash.min.reduction</name>
348 <value>0.5</value>
349 <description>Hash aggregation will be turned off if the ratio between
350 hash
351 table size and input rows is bigger than this number. Set to 1 to
352 make
353 sure
354 hash aggregation is never turned off.</description>
355 </property>
356
357 <property>
358 <name>hive.optimize.cp</name>
359 <value>true</value>
360 <description>Whether to enable column pruner</description>
361 </property>
362
363 <property>
364 <name>hive.optimize.ppd</name>
365 <value>true</value>
366 <description>Whether to enable predicate pushdown</description>
367 </property>
368
369 <property>
370 <name>hive.optimize.pruner</name>
371 <value>true</value>
372 <description>Whether to enable the new partition pruner which depends
373 on predicate pushdown. If this is disabled,
374 the old partition pruner
375 which is based on AST will be enabled.
376 </description>
377 </property>
378
379 <property>
380 <name>hive.optimize.groupby</name>
381 <value>true</value>
382 <description>Whether to enable the bucketed group by from bucketed
383 partitions/tables.</description>
384 </property>
385
386 <property>
387 <name>hive.join.emit.interval</name>
388 <value>1000</value>
389 <description>How many rows in the right-most join operand Hive should
390 buffer before emitting the join result. </description>
391 </property>
392
393 <property>
394 <name>hive.join.cache.size</name>
395 <value>25000</value>
396 <description>How many rows in the joining tables (except the streaming
397 table) should be cached in memory. </description>
398 </property>
399
400 <property>
401 <name>hive.mapjoin.bucket.cache.size</name>
402 <value>100</value>
403 <description>How many values in each keys in the map-joined table
404 should be cached in memory. </description>
405 </property>
406
407 <property>
408 <name>hive.mapjoin.maxsize</name>
409 <value>100000</value>
410 <description>Maximum # of rows of the small table that can be handled
411 by map-side join. If the size is reached and hive.task.progress is
412 set, a fatal error counter is set and the job will be killed.
413 </description>
414 </property>
415
416 <property>
417 <name>hive.mapjoin.cache.numrows</name>
418 <value>25000</value>
419 <description>How many rows should be cached by jdbm for map join.
420 </description>
421 </property>
422
423 <property>
424 <name>hive.optimize.skewjoin</name>
425 <value>false</value>
426 <description>Whether to enable skew join optimization. </description>
427 </property>
428
429 <property>
430 <name>hive.skewjoin.key</name>
431 <value>100000</value>
432 <description>Determine if we get a skew key in join. If we see more
433 than the specified number of rows with the same key in join operator,
434 we think the key as a skew join key. </description>
435 </property>
436
437 <property>
438 <name>hive.skewjoin.mapjoin.map.tasks</name>
439 <value>10000</value>
440 <description> Determine the number of map task used in the follow up
441 map join job
442 for a skew join. It should be used together with
443 hive.skewjoin.mapjoin.min.split
444 to perform a fine grained control.
445 </description>
446 </property>
447
448 <property>
449 <name>hive.skewjoin.mapjoin.min.split</name>
450 <value>33554432</value>
451 <description> Determine the number of map task at most used in the
452 follow up map join job
453 for a skew join by specifying the minimum split
454 size. It should be used
455 together with
456 hive.skewjoin.mapjoin.map.tasks
457 to perform a fine grained control.</description>
458 </property>
459
460 <property>
461 <name>hive.mapred.mode</name>
462 <value>nonstrict</value>
463 <description>The mode in which the hive operations are being
464 performed. In strict mode, some risky queries are not allowed to run
465 </description>
466 </property>
467
468 <property>
469 <name>hive.exec.script.maxerrsize</name>
470 <value>100000</value>
471 <description>Maximum number of bytes a script is allowed to emit to
472 standard error (per map-reduce task). This prevents runaway scripts
473 from filling logs partitions to capacity </description>
474 </property>
475
476 <property>
477 <name>hive.exec.script.allow.partial.consumption</name>
478 <value>false</value>
479 <description> When enabled, this option allows a user script to exit
480 successfully without consuming all the data from the standard input.
481 </description>
482 </property>
483
484 <property>
485 <name>hive.script.operator.id.env.var</name>
486 <value>HIVE_SCRIPT_OPERATOR_ID</value>
487 <description> Name of the environment variable that holds the unique
488 script operator ID in the user's transform function (the custom
489 mapper/reducer that the user has specified in the query)
490 </description>
491 </property>
492
493 <property>
494 <name>hive.exec.compress.output</name>
495 <value>false</value>
496 <description> This controls whether the final outputs of a query (to a
497 local/hdfs file or a hive table) is compressed. The compression codec
498 and other options are determined from hadoop config variables
499 mapred.output.compress* </description>
500 </property>
501
502 <property>
503 <name>hive.exec.compress.intermediate</name>
504 <value>false</value>
505 <description> This controls whether intermediate files produced by
506 hive between multiple map-reduce jobs are compressed. The compression
507 codec and other options are determined from hadoop config variables
508 mapred.output.compress* </description>
509 </property>
510
511 <property>
512 <name>hive.exec.parallel</name>
513 <value>false</value>
514 <description>Whether to execute jobs in parallel</description>
515 </property>
516
517 <property>
518 <name>hive.exec.parallel.thread.number</name>
519 <value>8</value>
520 <description>How many jobs at most can be executed in parallel
521 </description>
522 </property>
523
524 <property>
525 <name>hive.hwi.war.file</name>
526 <value>lib\hive-hwi-0.7.0.war</value>
527 <description>This sets the path to the HWI war file, relative to
528 ${HIVE_HOME}. </description>
529 </property>
530
531 <property>
532 <name>hive.hwi.listen.host</name>
533 <value>0.0.0.0</value>
534 <description>This is the host address the Hive Web Interface will
535 listen on</description>
536 </property>
537
538 <property>
539 <name>hive.hwi.listen.port</name>
540 <value>9999</value>
541 <description>This is the port the Hive Web Interface will listen on
542 </description>
543 </property>
544
545 <property>
546 <name>hive.exec.pre.hooks</name>
547 <value></value>
548 <description>Pre Execute Hook for Tests</description>
549 </property>
550
551 <property>
552 <name>hive.merge.mapfiles</name>
553 <value>true</value>
554 <description>Merge small files at the end of a map-only job
555 </description>
556 </property>
557
558 <property>
559 <name>hive.merge.mapredfiles</name>
560 <value>false</value>
561 <description>Merge small files at the end of a map-reduce job
562 </description>
563 </property>
564
565 <property>
566 <name>hive.heartbeat.interval</name>
567 <value>1000</value>
568 <description>Send a heartbeat after this interval - used by mapjoin
569 and filter operators</description>
570 </property>
571
572 <property>
573 <name>hive.merge.size.per.task</name>
574 <value>256000000</value>
575 <description>Size of merged files at the end of the job</description>
576 </property>
577
578 <property>
579 <name>hive.merge.size.smallfiles.avgsize</name>
580 <value>16000000</value>
581 <description>When the average output file size of a job is less than
582 this number, Hive will start an additional map-reduce job to merge
583 the output files into bigger files. This is only done for map-only
584 jobs if hive.merge.mapfiles is true, and for map-reduce jobs if
585 hive.merge.mapredfiles is true.</description>
586 </property>
587
588 <property>
589 <name>hive.script.auto.progress</name>
590 <value>false</value>
591 <description>Whether Hive Tranform/Map/Reduce Clause should
592 automatically send progress information to TaskTracker to avoid the
593 task getting killed because of inactivity. Hive sends progress
594 information when the script is outputting to stderr. This option
595 removes the need of periodically producing stderr messages, but users
596 should be cautious because this may prevent infinite loops in the
597 scripts to be killed by TaskTracker. </description>
598 </property>
599
600 <property>
601 <name>hive.script.serde</name>
602 <value>org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe</value>
603 <description>The default serde for trasmitting input data to and
604 reading output data from the user scripts. </description>
605 </property>
606
607 <property>
608 <name>hive.script.recordreader</name>
609 <value>org.apache.hadoop.hive.ql.exec.TextRecordReader</value>
610 <description>The default record reader for reading data from the user
611 scripts. </description>
612 </property>
613
614 <property>
615 <name>hive.script.recordwriter</name>
616 <value>org.apache.hadoop.hive.ql.exec.TextRecordWriter</value>
617 <description>The default record writer for writing data to the user
618 scripts. </description>
619 </property>
620
621 <property>
622 <name>hive.input.format</name>
623 <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
624 <description>The default input format, if it is not specified, the
625 system assigns it. It is set to HiveInputFormat for hadoop versions
626 17, 18 and 19, whereas it is set to CombinedHiveInputFormat for
627 hadoop 20. The user can always overwrite it - if there is a bug in
628 CombinedHiveInputFormat, it can always be manually set to
629 HiveInputFormat. </description>
630 </property>
631
632 <property>
633 <name>hive.udtf.auto.progress</name>
634 <value>false</value>
635 <description>Whether Hive should automatically send progress
636 information to TaskTracker when using UDTF's to prevent the task
637 getting killed because of inactivity. Users should be cautious
638 because this may prevent TaskTracker from killing tasks with infinte
639 loops. </description>
640 </property>
641
642 <property>
643 <name>hive.mapred.reduce.tasks.speculative.execution</name>
644 <value>true</value>
645 <description>Whether speculative execution for reducers should be
646 turned on. </description>
647 </property>
648
649 <property>
650 <name>hive.exec.counters.pull.interval</name>
651 <value>1000</value>
652 <description>The interval with which to poll the JobTracker for the
653 counters the running job. The smaller it is the more load there will
654 be on the jobtracker, the higher it is the less granular the caught
655 will be.</description>
656 </property>
657
658 <property>
659 <name>hive.enforce.bucketing</name>
660 <value>false</value>
661 <description>Whether bucketing is enforced. If true, while inserting
662 into the table, bucketing is enforced. </description>
663 </property>
664
665 <property>
666 <name>hive.enforce.sorting</name>
667 <value>false</value>
668 <description>Whether sorting is enforced. If true, while inserting
669 into the table, sorting is enforced. </description>
670 </property>
671
672 <property>
673 <name>hive.metastore.ds.connection.url.hook</name>
674 <value></value>
675 <description>Name of the hook to use for retriving the JDO connection
676 URL. If empty, the value in javax.jdo.option.ConnectionURL is used
677 </description>
678 </property>
679
680 <property>
681 <name>hive.metastore.ds.retry.attempts</name>
682 <value>1</value>
683 <description>The number of times to retry a metastore call if there
684 were a connection error</description>
685 </property>
686
687 <property>
688 <name>hive.metastore.ds.retry.interval</name>
689 <value>1000</value>
690 <description>The number of miliseconds between metastore retry
691 attempts</description>
692 </property>
693
694 <property>
695 <name>hive.metastore.server.min.threads</name>
696 <value>200</value>
697 <description>Minimum number of worker threads in the Thrift server's
698 pool.</description>
699 </property>
700
701 <property>
702 <name>hive.metastore.server.max.threads</name>
703 <value>100000</value>
704 <description>Maximum number of worker threads in the Thrift server's
705 pool.</description>
706 </property>
707
708 <property>
709 <name>hive.metastore.server.tcp.keepalive</name>
710 <value>true</value>
711 <description>Whether to enable TCP keepalive for the metastore server.
712 Keepalive will prevent accumulation of half-open connections.
713 </description>
714 </property>
715
716 <property>
717 <name>hive.optimize.reducededuplication</name>
718 <value>true</value>
719 <description>Remove extra map-reduce jobs if the data is already
720 clustered by the same key which needs to be used again. This should
721 always be set to true. Since it is a new feature, it has been made
722 configurable.</description>
723 </property>
724
725 <property>
726 <name>hive.exec.dynamic.partition</name>
727 <value>false</value>
728 <description>Whether or not to allow dynamic partitions in DML/DDL.
729 </description>
730 </property>
731
732 <property>
733 <name>hive.exec.dynamic.partition.mode</name>
734 <value>strict</value>
735 <description>In strict mode, the user must specify at least one static
736 partition in case the user accidentally overwrites all partitions.
737 </description>
738 </property>
739
740 <property>
741 <name>hive.exec.max.dynamic.partitions</name>
742 <value>1000</value>
743 <description>Maximum number of dynamic partitions allowed to be
744 created in total.</description>
745 </property>
746
747 <property>
748 <name>hive.exec.max.dynamic.partitions.pernode</name>
749 <value>100</value>
750 <description>Maximum number of dynamic partitions allowed to be
751 created in each mapper/reducer node.</description>
752 </property>
753
754 <property>
755 <name>hive.default.partition.name</name>
756 <value>__HIVE_DEFAULT_PARTITION__</value>
757 <description>The default partition name in case the dynamic partition
758 column value is null/empty string or anyother values that cannot be
759 escaped. This value must not contain any special character used in
760 HDFS URI (e.g., ':', '%', '/' etc). The user has to be aware that the
761 dynamic partition value should not contain this value to avoid
762 confusions.</description>
763 </property>
764
765 <property>
766 <name>fs.har.impl</name>
767 <value>org.apache.hadoop.hive.shims.HiveHarFileSystem</value>
768 <description>The implementation for accessing Hadoop Archives. Note
769 that this won't be applicable to Hadoop vers less than 0.20
770 </description>
771 </property>
772
773 <property>
774 <name>hive.archive.enabled</name>
775 <value>false</value>
776 <description>Whether archiving operations are permitted</description>
777 </property>
778
779 <property>
780 <name>hive.archive.har.parentdir.settable</name>
781 <value>false</value>
782 <description>In new Hadoop versions, the parent directory must be set
783 while
784 creating a HAR. Because this functionality is hard to detect
785 with just
786 version
787 numbers, this conf var needs to be set manually.
788 </description>
789 </property>
790
791 <!-- HBase Storage Handler Parameters -->
792
793 <property>
794 <name>hive.hbase.wal.enabled</name>
795 <value>true</value>
796 <description>Whether writes to HBase should be forced to the
797 write-ahead log. Disabling this improves HBase write performance at
798 the risk of lost writes in case of a crash.</description>
799 </property>
800
801 <property>
802 <name>hive.exec.drop.ignorenonexistent</name>
803 <value>true</value>
804 <description>drop table always works.</description>
805 </property>
806
807</configuration>