blob: eef407135bf91d97b20a3036ffa4ee6cc7436e7a [file] [log] [blame]
buyingyic73348c2012-11-02 00:31:31 +00001<?xml version="1.0"?>
2<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3
4<configuration>
5
6 <!-- Hive Configuration can either be stored in this file or in the hadoop
7 configuration files -->
8 <!-- that are implied by Hadoop setup variables. -->
9 <!-- Aside from Hadoop setup variables - this file is provided as a convenience
10 so that Hive -->
11 <!-- users do not have to edit hadoop configuration files (that may be managed
12 as a centralized -->
13 <!-- resource). -->
14
15 <!-- Hive Execution Parameters -->
16 <property>
17 <name>mapred.reduce.tasks</name>
18 <value>-1</value>
19 <description>The default number of reduce tasks per job. Typically set
20 to a prime close to the number of available hosts. Ignored when
21 mapred.job.tracker is "local". Hadoop set this to 1 by default,
22 whereas hive uses -1 as its default value.
23 By setting this property to
24 -1, Hive will automatically figure out what
25 should be the number of
26 reducers.
27 </description>
28 </property>
29
30 <property>
31 <name>hive.hyracks.connectorpolicy</name>
32 <value>SEND_SIDE_MAT_PIPELINING</value>
33 </property>
34
35 <property>
36 <name>hive.hyracks.host</name>
37 <value>127.0.0.1</value>
38 </property>
39
40 <property>
41 <name>hive.hyracks.port</name>
42 <value>13099</value>
43 </property>
44
45 <property>
46 <name>hive.hyracks.app</name>
47 <value>hivesterix</value>
48 </property>
49
50
51 <property>
52 <name>hive.hyracks.parrallelism</name>
53 <value>2</value>
54 </property>
55
56 <property>
57 <name>hive.algebricks.groupby.external</name>
58 <value>false</value>
59 </property>
60
61 <property>
62 <name>hive.algebricks.groupby.external.memory</name>
63 <value>3072</value>
64 </property>
65
66 <property>
67 <name>hive.algebricks.sort.memory</name>
68 <value>3072</value>
69 </property>
70
71 <property>
72 <name>hive.algebricks.framesize</name>
73 <value>768</value>
74 </property>
75
76 <property>
77 <name>hive.exec.reducers.bytes.per.reducer</name>
78 <value>1000000000</value>
79 <description>size per reducer.The default is 1G, i.e if the input size
80 is 10G, it will use 10 reducers.
81 </description>
82 </property>
83
84 <property>
85 <name>hive.exec.reducers.max</name>
86 <value>999</value>
87 <description>max number of reducers will be used. If the one
88 specified
89 in the configuration parameter mapred.reduce.tasks is
90 negative, hive
91 will use this one as the max number of reducers when
92 automatically
93 determine number of reducers.
94 </description>
95 </property>
96
97 <property>
98 <name>hive.exec.scratchdir</name>
99 <value>/tmp/hive-${user.name}</value>
100 <description>Scratch space for Hive jobs</description>
101 </property>
102
103 <property>
104 <name>hive.test.mode</name>
105 <value>false</value>
106 <description>whether hive is running in test mode. If yes, it turns on
107 sampling and prefixes the output tablename
108 </description>
109 </property>
110
111 <property>
112 <name>hive.test.mode.prefix</name>
113 <value>test_</value>
114 <description>if hive is running in test mode, prefixes the output
115 table by this string
116 </description>
117 </property>
118
119 <!-- If the input table is not bucketed, the denominator of the tablesample
120 is determinied by the parameter below -->
121 <!-- For example, the following query: -->
122 <!-- INSERT OVERWRITE TABLE dest -->
123 <!-- SELECT col1 from src -->
124 <!-- would be converted to -->
125 <!-- INSERT OVERWRITE TABLE test_dest -->
126 <!-- SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1)) -->
127 <property>
128 <name>hive.test.mode.samplefreq</name>
129 <value>32</value>
130 <description>if hive is running in test mode and table is not
131 bucketed, sampling frequency
132 </description>
133 </property>
134
135 <property>
136 <name>hive.test.mode.nosamplelist</name>
137 <value></value>
138 <description>if hive is running in test mode, dont sample the above
139 comma seperated list of tables
140 </description>
141 </property>
142
143 <property>
144 <name>hive.metastore.local</name>
145 <value>true</value>
146 <description>controls whether to connect to remove metastore server or
147 open a new metastore server in Hive Client JVM
148 </description>
149 </property>
150
151 <property>
152 <name>javax.jdo.option.ConnectionURL</name>
153 <value>jdbc:derby:;databaseName=metastore_db;create=true</value>
154 <description>JDBC connect string for a JDBC metastore</description>
155 </property>
156
157 <property>
158 <name>javax.jdo.option.ConnectionDriverName</name>
159 <value>org.apache.derby.jdbc.EmbeddedDriver</value>
160 <description>Driver class name for a JDBC metastore</description>
161 </property>
162
163 <property>
164 <name>javax.jdo.PersistenceManagerFactoryClass</name>
165 <value>org.datanucleus.jdo.JDOPersistenceManagerFactory</value>
166 <description>class implementing the jdo persistence</description>
167 </property>
168
169 <property>
170 <name>datanucleus.connectionPoolingType</name>
171 <value>DBCP</value>
172 <description>Uses a DBCP connection pool for JDBC metastore
173 </description>
174 </property>
175
176 <property>
177 <name>javax.jdo.option.DetachAllOnCommit</name>
178 <value>true</value>
179 <description>detaches all objects from session so that they can be
180 used after transaction is committed
181 </description>
182 </property>
183
184 <property>
185 <name>javax.jdo.option.NonTransactionalRead</name>
186 <value>true</value>
187 <description>reads outside of transactions</description>
188 </property>
189
190 <property>
191 <name>javax.jdo.option.ConnectionUserName</name>
192 <value>APP</value>
193 <description>username to use against metastore database</description>
194 </property>
195
196 <property>
197 <name>javax.jdo.option.ConnectionPassword</name>
198 <value>mine</value>
199 <description>password to use against metastore database</description>
200 </property>
201
202 <property>
203 <name>datanucleus.validateTables</name>
204 <value>false</value>
205 <description>validates existing schema against code. turn this on if
206 you want to verify existing schema
207 </description>
208 </property>
209
210 <property>
211 <name>datanucleus.validateColumns</name>
212 <value>false</value>
213 <description>validates existing schema against code. turn this on if
214 you want to verify existing schema
215 </description>
216 </property>
217
218 <property>
219 <name>datanucleus.validateConstraints</name>
220 <value>false</value>
221 <description>validates existing schema against code. turn this on if
222 you want to verify existing schema
223 </description>
224 </property>
225
226 <property>
227 <name>datanucleus.storeManagerType</name>
228 <value>rdbms</value>
229 <description>metadata store type</description>
230 </property>
231
232 <property>
233 <name>datanucleus.autoCreateSchema</name>
234 <value>true</value>
235 <description>creates necessary schema on a startup if one doesn't
236 exist. set this to false, after creating it once
237 </description>
238 </property>
239
240 <property>
241 <name>datanucleus.autoStartMechanismMode</name>
242 <value>checked</value>
243 <description>throw exception if metadata tables are incorrect
244 </description>
245 </property>
246
247 <property>
248 <name>datanucleus.transactionIsolation</name>
249 <value>read-committed</value>
250 <description>Default transaction isolation level for identity
251 generation.
252 </description>
253 </property>
254
255 <property>
256 <name>datanucleus.cache.level2</name>
257 <value>false</value>
258 <description>Use a level 2 cache. Turn this off if metadata is changed
259 independently of hive metastore server
260 </description>
261 </property>
262
263 <property>
264 <name>datanucleus.cache.level2.type</name>
265 <value>SOFT</value>
266 <description>SOFT=soft reference based cache, WEAK=weak reference
267 based cache.
268 </description>
269 </property>
270
271 <property>
272 <name>datanucleus.identifierFactory</name>
273 <value>datanucleus</value>
274 <description>Name of the identifier factory to use when generating
275 table/column names etc. 'datanucleus' is used for backward
276 compatibility
277 </description>
278 </property>
279
280 <property>
281 <name>hive.metastore.warehouse.dir</name>
282 <value>/tmp/hivesterix</value>
283 <description>location of default database for the warehouse
284 </description>
285 </property>
286
287 <property>
288 <name>hive.metastore.connect.retries</name>
289 <value>5</value>
290 <description>Number of retries while opening a connection to metastore
291 </description>
292 </property>
293
294 <property>
295 <name>hive.metastore.rawstore.impl</name>
296 <value>org.apache.hadoop.hive.metastore.ObjectStore</value>
297 <description>Name of the class that implements
298 org.apache.hadoop.hive.metastore.rawstore interface. This class is
299 used to store and retrieval of raw metadata objects such as table,
300 database
301 </description>
302 </property>
303
304 <property>
305 <name>hive.default.fileformat</name>
306 <value>TextFile</value>
307 <description>Default file format for CREATE TABLE statement. Options
308 are TextFile and SequenceFile. Users can explicitly say CREATE TABLE
309 ... STORED AS &lt;TEXTFILE|SEQUENCEFILE&gt; to override</description>
310 </property>
311
312 <property>
313 <name>hive.fileformat.check</name>
314 <value>true</value>
315 <description>Whether to check file format or not when loading data
316 files
317 </description>
318 </property>
319
320 <property>
321 <name>hive.map.aggr</name>
322 <value>true</value>
323 <description>Whether to use map-side aggregation in Hive Group By
324 queries
325 </description>
326 </property>
327
328 <property>
329 <name>hive.groupby.skewindata</name>
330 <value>false</value>
331 <description>Whether there is skew in data to optimize group by
332 queries
333 </description>
334 </property>
335
336 <property>
337 <name>hive.groupby.mapaggr.checkinterval</name>
338 <value>100000</value>
339 <description>Number of rows after which size of the grouping
340 keys/aggregation classes is performed
341 </description>
342 </property>
343
344 <property>
345 <name>hive.mapred.local.mem</name>
346 <value>0</value>
347 <description>For local mode, memory of the mappers/reducers
348 </description>
349 </property>
350
351 <property>
352 <name>hive.map.aggr.hash.percentmemory</name>
353 <value>0.5</value>
354 <description>Portion of total memory to be used by map-side grup
355 aggregation hash table
356 </description>
357 </property>
358
359 <property>
360 <name>hive.map.aggr.hash.min.reduction</name>
361 <value>0.5</value>
362 <description>Hash aggregation will be turned off if the ratio between
363 hash
364 table size and input rows is bigger than this number. Set to 1 to
365 make
366 sure
367 hash aggregation is never turned off.
368 </description>
369 </property>
370
371 <property>
372 <name>hive.optimize.cp</name>
373 <value>true</value>
374 <description>Whether to enable column pruner</description>
375 </property>
376
377 <property>
378 <name>hive.optimize.ppd</name>
379 <value>true</value>
380 <description>Whether to enable predicate pushdown</description>
381 </property>
382
383 <property>
384 <name>hive.optimize.pruner</name>
385 <value>true</value>
386 <description>Whether to enable the new partition pruner which depends
387 on predicate pushdown. If this is disabled,
388 the old partition pruner
389 which is based on AST will be enabled.
390 </description>
391 </property>
392
393 <property>
394 <name>hive.optimize.groupby</name>
395 <value>true</value>
396 <description>Whether to enable the bucketed group by from bucketed
397 partitions/tables.
398 </description>
399 </property>
400
401 <property>
402 <name>hive.join.emit.interval</name>
403 <value>1000</value>
404 <description>How many rows in the right-most join operand Hive should
405 buffer before emitting the join result.
406 </description>
407 </property>
408
409 <property>
410 <name>hive.join.cache.size</name>
411 <value>25000</value>
412 <description>How many rows in the joining tables (except the streaming
413 table) should be cached in memory.
414 </description>
415 </property>
416
417 <property>
418 <name>hive.mapjoin.bucket.cache.size</name>
419 <value>100</value>
420 <description>How many values in each keys in the map-joined table
421 should be cached in memory.
422 </description>
423 </property>
424
425 <property>
426 <name>hive.mapjoin.maxsize</name>
427 <value>100000</value>
428 <description>Maximum # of rows of the small table that can be handled
429 by map-side join. If the size is reached and hive.task.progress is
430 set, a fatal error counter is set and the job will be killed.
431 </description>
432 </property>
433
434 <property>
435 <name>hive.mapjoin.cache.numrows</name>
436 <value>25000</value>
437 <description>How many rows should be cached by jdbm for map join.
438 </description>
439 </property>
440
441 <property>
442 <name>hive.optimize.skewjoin</name>
443 <value>false</value>
444 <description>Whether to enable skew join optimization. </description>
445 </property>
446
447 <property>
448 <name>hive.skewjoin.key</name>
449 <value>100000</value>
450 <description>Determine if we get a skew key in join. If we see more
451 than the specified number of rows with the same key in join operator,
452 we think the key as a skew join key.
453 </description>
454 </property>
455
456 <property>
457 <name>hive.skewjoin.mapjoin.map.tasks</name>
458 <value>10000</value>
459 <description> Determine the number of map task used in the follow up
460 map join job
461 for a skew join. It should be used together with
462 hive.skewjoin.mapjoin.min.split
463 to perform a fine grained control.
464 </description>
465 </property>
466
467 <property>
468 <name>hive.skewjoin.mapjoin.min.split</name>
469 <value>33554432</value>
470 <description> Determine the number of map task at most used in the
471 follow up map join job
472 for a skew join by specifying the minimum split
473 size. It should be used
474 together with
475 hive.skewjoin.mapjoin.map.tasks
476 to perform a fine grained control.
477 </description>
478 </property>
479
480 <property>
481 <name>hive.mapred.mode</name>
482 <value>nonstrict</value>
483 <description>The mode in which the hive operations are being
484 performed. In strict mode, some risky queries are not allowed to run
485 </description>
486 </property>
487
488 <property>
489 <name>hive.exec.script.maxerrsize</name>
490 <value>100000</value>
491 <description>Maximum number of bytes a script is allowed to emit to
492 standard error (per map-reduce task). This prevents runaway scripts
493 from filling logs partitions to capacity
494 </description>
495 </property>
496
497 <property>
498 <name>hive.exec.script.allow.partial.consumption</name>
499 <value>false</value>
500 <description> When enabled, this option allows a user script to exit
501 successfully without consuming all the data from the standard input.
502 </description>
503 </property>
504
505 <property>
506 <name>hive.script.operator.id.env.var</name>
507 <value>HIVE_SCRIPT_OPERATOR_ID</value>
508 <description> Name of the environment variable that holds the unique
509 script operator ID in the user's transform function (the custom
510 mapper/reducer that the user has specified in the query)
511 </description>
512 </property>
513
514 <property>
515 <name>hive.exec.compress.output</name>
516 <value>false</value>
517 <description> This controls whether the final outputs of a query (to a
518 local/hdfs file or a hive table) is compressed. The compression codec
519 and other options are determined from hadoop config variables
520 mapred.output.compress*
521 </description>
522 </property>
523
524 <property>
525 <name>hive.exec.compress.intermediate</name>
526 <value>false</value>
527 <description> This controls whether intermediate files produced by
528 hive between multiple map-reduce jobs are compressed. The compression
529 codec and other options are determined from hadoop config variables
530 mapred.output.compress*
531 </description>
532 </property>
533
534 <property>
535 <name>hive.exec.parallel</name>
536 <value>false</value>
537 <description>Whether to execute jobs in parallel</description>
538 </property>
539
540 <property>
541 <name>hive.exec.parallel.thread.number</name>
542 <value>8</value>
543 <description>How many jobs at most can be executed in parallel
544 </description>
545 </property>
546
547 <property>
548 <name>hive.hwi.war.file</name>
549 <value>lib\hive-hwi-0.7.0.war</value>
550 <description>This sets the path to the HWI war file, relative to
551 ${HIVE_HOME}.
552 </description>
553 </property>
554
555 <property>
556 <name>hive.hwi.listen.host</name>
557 <value>0.0.0.0</value>
558 <description>This is the host address the Hive Web Interface will
559 listen on
560 </description>
561 </property>
562
563 <property>
564 <name>hive.hwi.listen.port</name>
565 <value>9999</value>
566 <description>This is the port the Hive Web Interface will listen on
567 </description>
568 </property>
569
570 <property>
571 <name>hive.exec.pre.hooks</name>
572 <value></value>
573 <description>Pre Execute Hook for Tests</description>
574 </property>
575
576 <property>
577 <name>hive.merge.mapfiles</name>
578 <value>true</value>
579 <description>Merge small files at the end of a map-only job
580 </description>
581 </property>
582
583 <property>
584 <name>hive.merge.mapredfiles</name>
585 <value>false</value>
586 <description>Merge small files at the end of a map-reduce job
587 </description>
588 </property>
589
590 <property>
591 <name>hive.heartbeat.interval</name>
592 <value>1000</value>
593 <description>Send a heartbeat after this interval - used by mapjoin
594 and filter operators
595 </description>
596 </property>
597
598 <property>
599 <name>hive.merge.size.per.task</name>
600 <value>256000000</value>
601 <description>Size of merged files at the end of the job</description>
602 </property>
603
604 <property>
605 <name>hive.merge.size.smallfiles.avgsize</name>
606 <value>16000000</value>
607 <description>When the average output file size of a job is less than
608 this number, Hive will start an additional map-reduce job to merge
609 the output files into bigger files. This is only done for map-only
610 jobs if hive.merge.mapfiles is true, and for map-reduce jobs if
611 hive.merge.mapredfiles is true.
612 </description>
613 </property>
614
615 <property>
616 <name>hive.script.auto.progress</name>
617 <value>false</value>
618 <description>Whether Hive Tranform/Map/Reduce Clause should
619 automatically send progress information to TaskTracker to avoid the
620 task getting killed because of inactivity. Hive sends progress
621 information when the script is outputting to stderr. This option
622 removes the need of periodically producing stderr messages, but users
623 should be cautious because this may prevent infinite loops in the
624 scripts to be killed by TaskTracker.
625 </description>
626 </property>
627
628 <property>
629 <name>hive.script.serde</name>
630 <value>org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe</value>
631 <description>The default serde for trasmitting input data to and
632 reading output data from the user scripts.
633 </description>
634 </property>
635
636 <property>
637 <name>hive.script.recordreader</name>
638 <value>org.apache.hadoop.hive.ql.exec.TextRecordReader</value>
639 <description>The default record reader for reading data from the user
640 scripts.
641 </description>
642 </property>
643
644 <property>
645 <name>hive.script.recordwriter</name>
646 <value>org.apache.hadoop.hive.ql.exec.TextRecordWriter</value>
647 <description>The default record writer for writing data to the user
648 scripts.
649 </description>
650 </property>
651
652 <property>
653 <name>hive.input.format</name>
654 <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
655 <description>The default input format, if it is not specified, the
656 system assigns it. It is set to HiveInputFormat for hadoop versions
657 17, 18 and 19, whereas it is set to CombinedHiveInputFormat for
658 hadoop 20. The user can always overwrite it - if there is a bug in
659 CombinedHiveInputFormat, it can always be manually set to
660 HiveInputFormat.
661 </description>
662 </property>
663
664 <property>
665 <name>hive.udtf.auto.progress</name>
666 <value>false</value>
667 <description>Whether Hive should automatically send progress
668 information to TaskTracker when using UDTF's to prevent the task
669 getting killed because of inactivity. Users should be cautious
670 because this may prevent TaskTracker from killing tasks with infinte
671 loops.
672 </description>
673 </property>
674
675 <property>
676 <name>hive.mapred.reduce.tasks.speculative.execution</name>
677 <value>true</value>
678 <description>Whether speculative execution for reducers should be
679 turned on.
680 </description>
681 </property>
682
683 <property>
684 <name>hive.exec.counters.pull.interval</name>
685 <value>1000</value>
686 <description>The interval with which to poll the JobTracker for the
687 counters the running job. The smaller it is the more load there will
688 be on the jobtracker, the higher it is the less granular the caught
689 will be.
690 </description>
691 </property>
692
693 <property>
694 <name>hive.enforce.bucketing</name>
695 <value>false</value>
696 <description>Whether bucketing is enforced. If true, while inserting
697 into the table, bucketing is enforced.
698 </description>
699 </property>
700
701 <property>
702 <name>hive.enforce.sorting</name>
703 <value>false</value>
704 <description>Whether sorting is enforced. If true, while inserting
705 into the table, sorting is enforced.
706 </description>
707 </property>
708
709 <property>
710 <name>hive.metastore.ds.connection.url.hook</name>
711 <value></value>
712 <description>Name of the hook to use for retriving the JDO connection
713 URL. If empty, the value in javax.jdo.option.ConnectionURL is used
714 </description>
715 </property>
716
717 <property>
718 <name>hive.metastore.ds.retry.attempts</name>
719 <value>1</value>
720 <description>The number of times to retry a metastore call if there
721 were a connection error
722 </description>
723 </property>
724
725 <property>
726 <name>hive.metastore.ds.retry.interval</name>
727 <value>1000</value>
728 <description>The number of miliseconds between metastore retry
729 attempts
730 </description>
731 </property>
732
733 <property>
734 <name>hive.metastore.server.min.threads</name>
735 <value>200</value>
736 <description>Minimum number of worker threads in the Thrift server's
737 pool.
738 </description>
739 </property>
740
741 <property>
742 <name>hive.metastore.server.max.threads</name>
743 <value>100000</value>
744 <description>Maximum number of worker threads in the Thrift server's
745 pool.
746 </description>
747 </property>
748
749 <property>
750 <name>hive.metastore.server.tcp.keepalive</name>
751 <value>true</value>
752 <description>Whether to enable TCP keepalive for the metastore server.
753 Keepalive will prevent accumulation of half-open connections.
754 </description>
755 </property>
756
757 <property>
758 <name>hive.optimize.reducededuplication</name>
759 <value>true</value>
760 <description>Remove extra map-reduce jobs if the data is already
761 clustered by the same key which needs to be used again. This should
762 always be set to true. Since it is a new feature, it has been made
763 configurable.
764 </description>
765 </property>
766
767 <property>
768 <name>hive.exec.dynamic.partition</name>
769 <value>false</value>
770 <description>Whether or not to allow dynamic partitions in DML/DDL.
771 </description>
772 </property>
773
774 <property>
775 <name>hive.exec.dynamic.partition.mode</name>
776 <value>strict</value>
777 <description>In strict mode, the user must specify at least one static
778 partition in case the user accidentally overwrites all partitions.
779 </description>
780 </property>
781
782 <property>
783 <name>hive.exec.max.dynamic.partitions</name>
784 <value>1000</value>
785 <description>Maximum number of dynamic partitions allowed to be
786 created in total.
787 </description>
788 </property>
789
790 <property>
791 <name>hive.exec.max.dynamic.partitions.pernode</name>
792 <value>100</value>
793 <description>Maximum number of dynamic partitions allowed to be
794 created in each mapper/reducer node.
795 </description>
796 </property>
797
798 <property>
799 <name>hive.default.partition.name</name>
800 <value>__HIVE_DEFAULT_PARTITION__</value>
801 <description>The default partition name in case the dynamic partition
802 column value is null/empty string or anyother values that cannot be
803 escaped. This value must not contain any special character used in
804 HDFS URI (e.g., ':', '%', '/' etc). The user has to be aware that the
805 dynamic partition value should not contain this value to avoid
806 confusions.
807 </description>
808 </property>
809
810 <property>
811 <name>fs.har.impl</name>
812 <value>org.apache.hadoop.hive.shims.HiveHarFileSystem</value>
813 <description>The implementation for accessing Hadoop Archives. Note
814 that this won't be applicable to Hadoop vers less than 0.20
815 </description>
816 </property>
817
818 <property>
819 <name>hive.archive.enabled</name>
820 <value>false</value>
821 <description>Whether archiving operations are permitted</description>
822 </property>
823
824 <property>
825 <name>hive.archive.har.parentdir.settable</name>
826 <value>false</value>
827 <description>In new Hadoop versions, the parent directory must be set
828 while
829 creating a HAR. Because this functionality is hard to detect
830 with just
831 version
832 numbers, this conf var needs to be set manually.
833 </description>
834 </property>
835
836 <!-- HBase Storage Handler Parameters -->
837
838 <property>
839 <name>hive.hbase.wal.enabled</name>
840 <value>true</value>
841 <description>Whether writes to HBase should be forced to the
842 write-ahead log. Disabling this improves HBase write performance at
843 the risk of lost writes in case of a crash.
844 </description>
845 </property>
846
847 <property>
848 <name>hive.exec.drop.ignorenonexistent</name>
849 <value>true</value>
850 <description>drop table always works.</description>
851 </property>
852
853</configuration>