blob: 1fce28e5f83c3bd9c7c741c188b88812298468a9 [file] [log] [blame]
buyingyic73348c2012-11-02 00:31:31 +00001<?xml version="1.0"?>
Till Westmann276bbc22013-06-05 18:56:27 -07002<!--
3 ! Copyright 2009-2013 by The Regents of the University of California
4 ! Licensed under the Apache License, Version 2.0 (the "License");
5 ! you may not use this file except in compliance with the License.
6 ! you may obtain a copy of the License from
7 !
8 ! http://www.apache.org/licenses/LICENSE-2.0
9 !
10 ! Unless required by applicable law or agreed to in writing, software
11 ! distributed under the License is distributed on an "AS IS" BASIS,
12 ! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 ! See the License for the specific language governing permissions and
14 ! limitations under the License.
15 !-->
buyingyic73348c2012-11-02 00:31:31 +000016<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
17
18<configuration>
19
20 <!-- Hive Configuration can either be stored in this file or in the hadoop
21 configuration files -->
22 <!-- that are implied by Hadoop setup variables. -->
23 <!-- Aside from Hadoop setup variables - this file is provided as a convenience
24 so that Hive -->
25 <!-- users do not have to edit hadoop configuration files (that may be managed
26 as a centralized -->
27 <!-- resource). -->
28
29 <!-- Hive Execution Parameters -->
30 <property>
31 <name>mapred.reduce.tasks</name>
32 <value>-1</value>
33 <description>The default number of reduce tasks per job. Typically set
34 to a prime close to the number of available hosts. Ignored when
35 mapred.job.tracker is "local". Hadoop set this to 1 by default,
36 whereas hive uses -1 as its default value.
37 By setting this property to -1, Hive will automatically figure out what
38 should be the number of reducers.
39 </description>
buyingyic73348c2012-11-02 00:31:31 +000040
buyingyi657ce582013-03-11 06:49:18 +000041 <property>
42 <name>hive.hyracks.connectorpolicy</name>
43 <value>PIPELINING</value>
44 </property>
buyingyic73348c2012-11-02 00:31:31 +000045
46 <property>
47 <name>hive.hyracks.parrallelism</name>
48 <value>4</value>
49 </property>
50
51 <property>
52 <name>hive.algebricks.groupby.external</name>
53 <value>true</value>
54 </property>
55
56 <property>
57 <name>hive.algebricks.groupby.external.memory</name>
buyingyi657ce582013-03-11 06:49:18 +000058 <value>33554432</value>
buyingyic73348c2012-11-02 00:31:31 +000059 </property>
60
61 <property>
62 <name>hive.algebricks.sort.memory</name>
buyingyi657ce582013-03-11 06:49:18 +000063 <value>33554432</value>
buyingyic73348c2012-11-02 00:31:31 +000064 </property>
65
66 <property>
67 <name>hive.exec.reducers.bytes.per.reducer</name>
68 <value>1000000000</value>
69 <description>size per reducer.The default is 1G, i.e if the input size
70 is 10G, it will use 10 reducers.</description>
71 </property>
72
73 <property>
74 <name>hive.exec.reducers.max</name>
75 <value>999</value>
76 <description>max number of reducers will be used. If the one
77 specified in the configuration parameter mapred.reduce.tasks is
78 negative, hive will use this one as the max number of reducers when
79 automatically determine number of reducers.</description>
80 </property>
81
82 <property>
83 <name>hive.exec.scratchdir</name>
84 <value>/hive-${user.name}</value>
85 <description>Scratch space for Hive jobs</description>
86 </property>
87
88 <property>
89 <name>hive.test.mode</name>
90 <value>false</value>
91 <description>whether hive is running in test mode. If yes, it turns on
92 sampling and prefixes the output tablename</description>
93 </property>
94
95 <property>
96 <name>hive.test.mode.prefix</name>
97 <value>test_</value>
98 <description>if hive is running in test mode, prefixes the output
99 table by this string</description>
100 </property>
101
102 <!-- If the input table is not bucketed, the denominator of the tablesample
103 is determinied by the parameter below -->
104 <!-- For example, the following query: -->
105 <!-- INSERT OVERWRITE TABLE dest -->
106 <!-- SELECT col1 from src -->
107 <!-- would be converted to -->
108 <!-- INSERT OVERWRITE TABLE test_dest -->
109 <!-- SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1)) -->
110 <property>
111 <name>hive.test.mode.samplefreq</name>
112 <value>32</value>
113 <description>if hive is running in test mode and table is not
114 bucketed, sampling frequency</description>
115 </property>
116
117 <property>
118 <name>hive.test.mode.nosamplelist</name>
119 <value></value>
120 <description>if hive is running in test mode, dont sample the above
121 comma seperated list of tables</description>
122 </property>
123
124 <property>
125 <name>hive.metastore.local</name>
126 <value>true</value>
127 <description>controls whether to connect to remove metastore server or
128 open a new metastore server in Hive Client JVM</description>
129 </property>
130
131 <property>
132 <name>javax.jdo.option.ConnectionURL</name>
133 <value>jdbc:derby:;databaseName=metastore_db;create=true</value>
134 <description>JDBC connect string for a JDBC metastore</description>
135 </property>
136
137 <property>
138 <name>javax.jdo.option.ConnectionDriverName</name>
139 <value>org.apache.derby.jdbc.EmbeddedDriver</value>
140 <description>Driver class name for a JDBC metastore</description>
141 </property>
142
143 <property>
144 <name>javax.jdo.PersistenceManagerFactoryClass</name>
145 <value>org.datanucleus.jdo.JDOPersistenceManagerFactory</value>
146 <description>class implementing the jdo persistence</description>
147 </property>
148
149 <property>
150 <name>datanucleus.connectionPoolingType</name>
151 <value>DBCP</value>
152 <description>Uses a DBCP connection pool for JDBC metastore
153 </description>
154 </property>
155
156 <property>
157 <name>javax.jdo.option.DetachAllOnCommit</name>
158 <value>true</value>
159 <description>detaches all objects from session so that they can be
160 used after transaction is committed</description>
161 </property>
162
163 <property>
164 <name>javax.jdo.option.NonTransactionalRead</name>
165 <value>true</value>
166 <description>reads outside of transactions</description>
167 </property>
168
169 <property>
170 <name>javax.jdo.option.ConnectionUserName</name>
171 <value>APP</value>
172 <description>username to use against metastore database</description>
173 </property>
174
175 <property>
176 <name>javax.jdo.option.ConnectionPassword</name>
177 <value>mine</value>
178 <description>password to use against metastore database</description>
179 </property>
180
181 <property>
182 <name>datanucleus.validateTables</name>
183 <value>false</value>
184 <description>validates existing schema against code. turn this on if
185 you want to verify existing schema </description>
186 </property>
187
188 <property>
189 <name>datanucleus.validateColumns</name>
190 <value>false</value>
191 <description>validates existing schema against code. turn this on if
192 you want to verify existing schema </description>
193 </property>
194
195 <property>
196 <name>datanucleus.validateConstraints</name>
197 <value>false</value>
198 <description>validates existing schema against code. turn this on if
199 you want to verify existing schema </description>
200 </property>
201
202 <property>
203 <name>datanucleus.storeManagerType</name>
204 <value>rdbms</value>
205 <description>metadata store type</description>
206 </property>
207
208 <property>
209 <name>datanucleus.autoCreateSchema</name>
210 <value>true</value>
211 <description>creates necessary schema on a startup if one doesn't
212 exist. set this to false, after creating it once</description>
213 </property>
214
215 <property>
216 <name>datanucleus.autoStartMechanismMode</name>
217 <value>checked</value>
218 <description>throw exception if metadata tables are incorrect
219 </description>
220 </property>
221
222 <property>
223 <name>datanucleus.transactionIsolation</name>
224 <value>read-committed</value>
225 <description>Default transaction isolation level for identity
226 generation. </description>
227 </property>
228
229 <property>
230 <name>datanucleus.cache.level2</name>
231 <value>false</value>
232 <description>Use a level 2 cache. Turn this off if metadata is changed
233 independently of hive metastore server</description>
234 </property>
235
236 <property>
237 <name>datanucleus.cache.level2.type</name>
238 <value>SOFT</value>
239 <description>SOFT=soft reference based cache, WEAK=weak reference
240 based cache.</description>
241 </property>
242
243 <property>
244 <name>datanucleus.identifierFactory</name>
245 <value>datanucleus</value>
246 <description>Name of the identifier factory to use when generating
247 table/column names etc. 'datanucleus' is used for backward
248 compatibility</description>
249 </property>
250
251 <property>
252 <name>hive.metastore.warehouse.dir</name>
253 <value>/user/hivesterix</value>
254 <description>location of default database for the warehouse
255 </description>
256 </property>
257
258 <property>
259 <name>hive.metastore.connect.retries</name>
260 <value>5</value>
261 <description>Number of retries while opening a connection to metastore
262 </description>
263 </property>
264
265 <property>
266 <name>hive.metastore.rawstore.impl</name>
267 <value>org.apache.hadoop.hive.metastore.ObjectStore</value>
268 <description>Name of the class that implements
269 org.apache.hadoop.hive.metastore.rawstore interface. This class is
270 used to store and retrieval of raw metadata objects such as table,
271 database</description>
272 </property>
273
274 <property>
275 <name>hive.default.fileformat</name>
276 <value>TextFile</value>
277 <description>Default file format for CREATE TABLE statement. Options
278 are TextFile and SequenceFile. Users can explicitly say CREATE TABLE
279 ... STORED AS &lt;TEXTFILE|SEQUENCEFILE&gt; to override</description>
280 </property>
281
282 <property>
283 <name>hive.fileformat.check</name>
284 <value>true</value>
285 <description>Whether to check file format or not when loading data
286 files</description>
287 </property>
288
289 <property>
290 <name>hive.map.aggr</name>
291 <value>true</value>
292 <description>Whether to use map-side aggregation in Hive Group By
293 queries</description>
294 </property>
295
296 <property>
297 <name>hive.groupby.skewindata</name>
298 <value>false</value>
299 <description>Whether there is skew in data to optimize group by
300 queries</description>
301 </property>
302
303 <property>
304 <name>hive.groupby.mapaggr.checkinterval</name>
305 <value>100000</value>
306 <description>Number of rows after which size of the grouping
307 keys/aggregation classes is performed</description>
308 </property>
309
310 <property>
311 <name>hive.mapred.local.mem</name>
312 <value>0</value>
313 <description>For local mode, memory of the mappers/reducers
314 </description>
315 </property>
316
317 <property>
318 <name>hive.map.aggr.hash.percentmemory</name>
319 <value>0.5</value>
320 <description>Portion of total memory to be used by map-side grup
321 aggregation hash table</description>
322 </property>
323
324 <property>
325 <name>hive.map.aggr.hash.min.reduction</name>
326 <value>0.5</value>
327 <description>Hash aggregation will be turned off if the ratio between
328 hash
329 table size and input rows is bigger than this number. Set to 1 to make
330 sure
331 hash aggregation is never turned off.</description>
332 </property>
333
334 <property>
335 <name>hive.optimize.cp</name>
336 <value>true</value>
337 <description>Whether to enable column pruner</description>
338 </property>
339
340 <property>
341 <name>hive.optimize.ppd</name>
342 <value>true</value>
343 <description>Whether to enable predicate pushdown</description>
344 </property>
345
346 <property>
347 <name>hive.optimize.pruner</name>
348 <value>true</value>
349 <description>Whether to enable the new partition pruner which depends
350 on predicate pushdown. If this is disabled,
351 the old partition pruner which is based on AST will be enabled.
352 </description>
353 </property>
354
355 <property>
356 <name>hive.optimize.groupby</name>
357 <value>true</value>
358 <description>Whether to enable the bucketed group by from bucketed
359 partitions/tables.</description>
360 </property>
361
362 <property>
363 <name>hive.join.emit.interval</name>
364 <value>1000</value>
365 <description>How many rows in the right-most join operand Hive should
366 buffer before emitting the join result. </description>
367 </property>
368
369 <property>
370 <name>hive.join.cache.size</name>
371 <value>25000</value>
372 <description>How many rows in the joining tables (except the streaming
373 table) should be cached in memory. </description>
374 </property>
375
376 <property>
377 <name>hive.mapjoin.bucket.cache.size</name>
378 <value>100</value>
379 <description>How many values in each keys in the map-joined table
380 should be cached in memory. </description>
381 </property>
382
383 <property>
384 <name>hive.mapjoin.maxsize</name>
385 <value>100000</value>
386 <description>Maximum # of rows of the small table that can be handled
387 by map-side join. If the size is reached and hive.task.progress is
388 set, a fatal error counter is set and the job will be killed.
389 </description>
390 </property>
391
392 <property>
393 <name>hive.mapjoin.cache.numrows</name>
394 <value>25000</value>
395 <description>How many rows should be cached by jdbm for map join.
396 </description>
397 </property>
398
399 <property>
400 <name>hive.optimize.skewjoin</name>
401 <value>false</value>
402 <description>Whether to enable skew join optimization. </description>
403 </property>
404
405 <property>
406 <name>hive.skewjoin.key</name>
407 <value>100000</value>
408 <description>Determine if we get a skew key in join. If we see more
409 than the specified number of rows with the same key in join operator,
410 we think the key as a skew join key. </description>
411 </property>
412
413 <property>
414 <name>hive.skewjoin.mapjoin.map.tasks</name>
415 <value>10000</value>
416 <description> Determine the number of map task used in the follow up
417 map join job
418 for a skew join. It should be used together with
419 hive.skewjoin.mapjoin.min.split
420 to perform a fine grained control.</description>
421 </property>
422
423 <property>
424 <name>hive.skewjoin.mapjoin.min.split</name>
425 <value>33554432</value>
426 <description> Determine the number of map task at most used in the
427 follow up map join job
428 for a skew join by specifying the minimum split size. It should be used
429 together with
430 hive.skewjoin.mapjoin.map.tasks to perform a fine grained control.</description>
431 </property>
432
433 <property>
434 <name>hive.mapred.mode</name>
435 <value>nonstrict</value>
436 <description>The mode in which the hive operations are being
437 performed. In strict mode, some risky queries are not allowed to run
438 </description>
439 </property>
440
441 <property>
442 <name>hive.exec.script.maxerrsize</name>
443 <value>100000</value>
444 <description>Maximum number of bytes a script is allowed to emit to
445 standard error (per map-reduce task). This prevents runaway scripts
446 from filling logs partitions to capacity </description>
447 </property>
448
449 <property>
450 <name>hive.exec.script.allow.partial.consumption</name>
451 <value>false</value>
452 <description> When enabled, this option allows a user script to exit
453 successfully without consuming all the data from the standard input.
454 </description>
455 </property>
456
457 <property>
458 <name>hive.script.operator.id.env.var</name>
459 <value>HIVE_SCRIPT_OPERATOR_ID</value>
460 <description> Name of the environment variable that holds the unique
461 script operator ID in the user's transform function (the custom
462 mapper/reducer that the user has specified in the query)
463 </description>
464 </property>
465
466 <property>
467 <name>hive.exec.compress.output</name>
468 <value>false</value>
469 <description> This controls whether the final outputs of a query (to a
470 local/hdfs file or a hive table) is compressed. The compression codec
471 and other options are determined from hadoop config variables
472 mapred.output.compress* </description>
473 </property>
474
475 <property>
476 <name>hive.exec.compress.intermediate</name>
477 <value>false</value>
478 <description> This controls whether intermediate files produced by
479 hive between multiple map-reduce jobs are compressed. The compression
480 codec and other options are determined from hadoop config variables
481 mapred.output.compress* </description>
482 </property>
483
484 <property>
485 <name>hive.exec.parallel</name>
486 <value>false</value>
487 <description>Whether to execute jobs in parallel</description>
488 </property>
489
490 <property>
491 <name>hive.exec.parallel.thread.number</name>
492 <value>8</value>
493 <description>How many jobs at most can be executed in parallel
494 </description>
495 </property>
496
497 <property>
498 <name>hive.hwi.war.file</name>
499 <value>lib\hive-hwi-0.7.0.war</value>
500 <description>This sets the path to the HWI war file, relative to
501 ${HIVE_HOME}. </description>
502 </property>
503
504 <property>
505 <name>hive.hwi.listen.host</name>
506 <value>0.0.0.0</value>
507 <description>This is the host address the Hive Web Interface will
508 listen on</description>
509 </property>
510
511 <property>
512 <name>hive.hwi.listen.port</name>
513 <value>9999</value>
514 <description>This is the port the Hive Web Interface will listen on
515 </description>
516 </property>
517
518 <property>
519 <name>hive.exec.pre.hooks</name>
520 <value></value>
521 <description>Pre Execute Hook for Tests</description>
522 </property>
523
524 <property>
525 <name>hive.merge.mapfiles</name>
526 <value>true</value>
527 <description>Merge small files at the end of a map-only job
528 </description>
529 </property>
530
531 <property>
532 <name>hive.merge.mapredfiles</name>
533 <value>false</value>
534 <description>Merge small files at the end of a map-reduce job
535 </description>
536 </property>
537
538 <property>
539 <name>hive.heartbeat.interval</name>
540 <value>1000</value>
541 <description>Send a heartbeat after this interval - used by mapjoin
542 and filter operators</description>
543 </property>
544
545 <property>
546 <name>hive.merge.size.per.task</name>
547 <value>256000000</value>
548 <description>Size of merged files at the end of the job</description>
549 </property>
550
551 <property>
552 <name>hive.merge.size.smallfiles.avgsize</name>
553 <value>16000000</value>
554 <description>When the average output file size of a job is less than
555 this number, Hive will start an additional map-reduce job to merge
556 the output files into bigger files. This is only done for map-only
557 jobs if hive.merge.mapfiles is true, and for map-reduce jobs if
558 hive.merge.mapredfiles is true.</description>
559 </property>
560
561 <property>
562 <name>hive.script.auto.progress</name>
563 <value>false</value>
564 <description>Whether Hive Tranform/Map/Reduce Clause should
565 automatically send progress information to TaskTracker to avoid the
566 task getting killed because of inactivity. Hive sends progress
567 information when the script is outputting to stderr. This option
568 removes the need of periodically producing stderr messages, but users
569 should be cautious because this may prevent infinite loops in the
570 scripts to be killed by TaskTracker. </description>
571 </property>
572
573 <property>
574 <name>hive.script.serde</name>
575 <value>org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe</value>
576 <description>The default serde for trasmitting input data to and
577 reading output data from the user scripts. </description>
578 </property>
579
580 <property>
581 <name>hive.script.recordreader</name>
582 <value>org.apache.hadoop.hive.ql.exec.TextRecordReader</value>
583 <description>The default record reader for reading data from the user
584 scripts. </description>
585 </property>
586
587 <property>
588 <name>hive.script.recordwriter</name>
589 <value>org.apache.hadoop.hive.ql.exec.TextRecordWriter</value>
590 <description>The default record writer for writing data to the user
591 scripts. </description>
592 </property>
593
594 <property>
595 <name>hive.input.format</name>
596 <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
597 <description>The default input format, if it is not specified, the
598 system assigns it. It is set to HiveInputFormat for hadoop versions
599 17, 18 and 19, whereas it is set to CombinedHiveInputFormat for
600 hadoop 20. The user can always overwrite it - if there is a bug in
601 CombinedHiveInputFormat, it can always be manually set to
602 HiveInputFormat. </description>
603 </property>
604
605 <property>
606 <name>hive.udtf.auto.progress</name>
607 <value>false</value>
608 <description>Whether Hive should automatically send progress
609 information to TaskTracker when using UDTF's to prevent the task
610 getting killed because of inactivity. Users should be cautious
611 because this may prevent TaskTracker from killing tasks with infinte
612 loops. </description>
613 </property>
614
615 <property>
616 <name>hive.mapred.reduce.tasks.speculative.execution</name>
617 <value>true</value>
618 <description>Whether speculative execution for reducers should be
619 turned on. </description>
620 </property>
621
622 <property>
623 <name>hive.exec.counters.pull.interval</name>
624 <value>1000</value>
625 <description>The interval with which to poll the JobTracker for the
626 counters the running job. The smaller it is the more load there will
627 be on the jobtracker, the higher it is the less granular the caught
628 will be.</description>
629 </property>
630
631 <property>
632 <name>hive.enforce.bucketing</name>
633 <value>false</value>
634 <description>Whether bucketing is enforced. If true, while inserting
635 into the table, bucketing is enforced. </description>
636 </property>
637
638 <property>
639 <name>hive.enforce.sorting</name>
640 <value>false</value>
641 <description>Whether sorting is enforced. If true, while inserting
642 into the table, sorting is enforced. </description>
643 </property>
644
645 <property>
646 <name>hive.metastore.ds.connection.url.hook</name>
647 <value></value>
648 <description>Name of the hook to use for retriving the JDO connection
649 URL. If empty, the value in javax.jdo.option.ConnectionURL is used
650 </description>
651 </property>
652
653 <property>
654 <name>hive.metastore.ds.retry.attempts</name>
655 <value>1</value>
656 <description>The number of times to retry a metastore call if there
657 were a connection error</description>
658 </property>
659
660 <property>
661 <name>hive.metastore.ds.retry.interval</name>
662 <value>1000</value>
663 <description>The number of miliseconds between metastore retry
664 attempts</description>
665 </property>
666
667 <property>
668 <name>hive.metastore.server.min.threads</name>
669 <value>200</value>
670 <description>Minimum number of worker threads in the Thrift server's
671 pool.</description>
672 </property>
673
674 <property>
675 <name>hive.metastore.server.max.threads</name>
676 <value>100000</value>
677 <description>Maximum number of worker threads in the Thrift server's
678 pool.</description>
679 </property>
680
681 <property>
682 <name>hive.metastore.server.tcp.keepalive</name>
683 <value>true</value>
684 <description>Whether to enable TCP keepalive for the metastore server.
685 Keepalive will prevent accumulation of half-open connections.
686 </description>
687 </property>
688
689 <property>
690 <name>hive.optimize.reducededuplication</name>
691 <value>true</value>
692 <description>Remove extra map-reduce jobs if the data is already
693 clustered by the same key which needs to be used again. This should
694 always be set to true. Since it is a new feature, it has been made
695 configurable.</description>
696 </property>
697
698 <property>
699 <name>hive.exec.dynamic.partition</name>
700 <value>false</value>
701 <description>Whether or not to allow dynamic partitions in DML/DDL.
702 </description>
703 </property>
704
705 <property>
706 <name>hive.exec.dynamic.partition.mode</name>
707 <value>strict</value>
708 <description>In strict mode, the user must specify at least one static
709 partition in case the user accidentally overwrites all partitions.
710 </description>
711 </property>
712
713 <property>
714 <name>hive.exec.max.dynamic.partitions</name>
715 <value>1000</value>
716 <description>Maximum number of dynamic partitions allowed to be
717 created in total.</description>
718 </property>
719
720 <property>
721 <name>hive.exec.max.dynamic.partitions.pernode</name>
722 <value>100</value>
723 <description>Maximum number of dynamic partitions allowed to be
724 created in each mapper/reducer node.</description>
725 </property>
726
727 <property>
728 <name>hive.default.partition.name</name>
729 <value>__HIVE_DEFAULT_PARTITION__</value>
730 <description>The default partition name in case the dynamic partition
731 column value is null/empty string or anyother values that cannot be
732 escaped. This value must not contain any special character used in
733 HDFS URI (e.g., ':', '%', '/' etc). The user has to be aware that the
734 dynamic partition value should not contain this value to avoid
735 confusions.</description>
736 </property>
737
738 <property>
739 <name>fs.har.impl</name>
740 <value>org.apache.hadoop.hive.shims.HiveHarFileSystem</value>
741 <description>The implementation for accessing Hadoop Archives. Note
742 that this won't be applicable to Hadoop vers less than 0.20
743 </description>
744 </property>
745
746 <property>
747 <name>hive.archive.enabled</name>
748 <value>false</value>
749 <description>Whether archiving operations are permitted</description>
750 </property>
751
752 <property>
753 <name>hive.archive.har.parentdir.settable</name>
754 <value>false</value>
755 <description>In new Hadoop versions, the parent directory must be set
756 while
757 creating a HAR. Because this functionality is hard to detect with just
758 version
759 numbers, this conf var needs to be set manually.</description>
760 </property>
761
762 <!-- HBase Storage Handler Parameters -->
763
764 <property>
765 <name>hive.hbase.wal.enabled</name>
766 <value>true</value>
767 <description>Whether writes to HBase should be forced to the
768 write-ahead log. Disabling this improves HBase write performance at
769 the risk of lost writes in case of a crash.</description>
770 </property>
771
772</configuration>