Make LSM bulkload append-only and write-once.
Allows for usage of LSM indexes with underlying storage that is append-only.
This also results in a small improvement for LSM component bulk load speed.
- Tree metadata (filters, etc) now lie at the back of the tree file in
append-only mode.
-- Note that you should *not* ever give the append-only flag on bulk-load,
if the tree is ever to be modified in place.
- Append-only operations bypass the buffer cache for writes, but utilize
the buffer cache for memory allocation and reads.
- Addresses ASTERIXDB-1059
Change-Id: I80fb891b5310252143854a336b591bf3f8cd4ba7
Reviewed-on: https://asterix-gerrit.ics.uci.edu/255
Tested-by: Jenkins <jenkins@fulliautomatix.ics.uci.edu>
Reviewed-by: Young-Seok Kim <kisskys@gmail.com>
Reviewed-by: Murtadha Hubail <hubailmor@gmail.com>
diff --git a/hyracks/hyracks-api/src/main/java/org/apache/hyracks/api/io/IIOManager.java b/hyracks/hyracks-api/src/main/java/org/apache/hyracks/api/io/IIOManager.java
index a0cca95..b5c7aa0 100644
--- a/hyracks/hyracks-api/src/main/java/org/apache/hyracks/api/io/IIOManager.java
+++ b/hyracks/hyracks-api/src/main/java/org/apache/hyracks/api/io/IIOManager.java
@@ -55,5 +55,7 @@
public void setExecutor(Executor executor);
+ public long getSize(IFileHandle fileHandle);
+
public void deleteWorkspaceFiles();
-}
\ No newline at end of file
+}
diff --git a/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/ClusterControllerService.java b/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/ClusterControllerService.java
index e311903..1c27376 100644
--- a/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/ClusterControllerService.java
+++ b/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/ClusterControllerService.java
@@ -637,11 +637,10 @@
deploymentRunMap.remove(deploymentKey);
}
- public synchronized void setShutdownRun(ShutdownRun sRun) {
+ public synchronized void setShutdownRun(ShutdownRun sRun){
shutdownCallback = sRun;
}
-
- public synchronized ShutdownRun getShutdownRun() {
+ public synchronized ShutdownRun getShutdownRun(){
return shutdownCallback;
}
diff --git a/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/NodeControllerService.java b/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/NodeControllerService.java
index ab0f16b..b96abf8 100644
--- a/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/NodeControllerService.java
+++ b/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/NodeControllerService.java
@@ -38,6 +38,7 @@
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -333,6 +334,9 @@
if (!shuttedDown) {
LOGGER.log(Level.INFO, "Stopping NodeControllerService");
executor.shutdownNow();
+ if(!executor.awaitTermination(10, TimeUnit.SECONDS)){
+ LOGGER.log(Level.SEVERE, "Some jobs failed to exit, continuing shutdown abnormally");
+ }
partitionManager.close();
datasetPartitionManager.close();
heartbeatTask.cancel();
diff --git a/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/io/IOManager.java b/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/io/IOManager.java
index f673a07..6881f73 100644
--- a/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/io/IOManager.java
+++ b/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/io/IOManager.java
@@ -260,6 +260,11 @@
}
@Override
+ public long getSize(IFileHandle fileHandle) {
+ return ((FileHandle) fileHandle).getFileReference().getFile().length();
+ }
+
+ @Override
public void deleteWorkspaceFiles() {
for (IODeviceHandle ioDevice : workAreaIODevices) {
File workspaceFolder = new File(ioDevice.getPath(), ioDevice.getWorkAreaPath());
@@ -277,4 +282,4 @@
return name.endsWith(WORKSPACE_FILE_SUFFIX);
}
};
-}
\ No newline at end of file
+}
diff --git a/hyracks/hyracks-storage-am-bloomfilter/src/main/java/org/apache/hyracks/storage/am/bloomfilter/impls/BloomFilter.java b/hyracks/hyracks-storage-am-bloomfilter/src/main/java/org/apache/hyracks/storage/am/bloomfilter/impls/BloomFilter.java
index b62e483..ed65902 100644
--- a/hyracks/hyracks-storage-am-bloomfilter/src/main/java/org/apache/hyracks/storage/am/bloomfilter/impls/BloomFilter.java
+++ b/hyracks/hyracks-storage-am-bloomfilter/src/main/java/org/apache/hyracks/storage/am/bloomfilter/impls/BloomFilter.java
@@ -28,6 +28,7 @@
import org.apache.hyracks.storage.am.common.api.IndexException;
import org.apache.hyracks.storage.common.buffercache.IBufferCache;
import org.apache.hyracks.storage.common.buffercache.ICachedPage;
+import org.apache.hyracks.storage.common.buffercache.IFIFOPageQueue;
import org.apache.hyracks.storage.common.file.BufferedFileHandle;
import org.apache.hyracks.storage.common.file.IFileMapProvider;
@@ -73,7 +74,7 @@
public int getNumPages() throws HyracksDataException {
if (!isActivated) {
- throw new HyracksDataException("The bloom filter is not activated.");
+ activate();
}
return numPages;
}
@@ -141,17 +142,6 @@
throw new HyracksDataException("Failed to create the bloom filter since it is activated.");
}
prepareFile();
- ICachedPage metaPage = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, METADATA_PAGE_ID), true);
- metaPage.acquireWriteLatch();
- try {
- metaPage.getBuffer().putInt(NUM_PAGES_OFFSET, 0);
- metaPage.getBuffer().putInt(NUM_HASHES_USED_OFFSET, 0);
- metaPage.getBuffer().putLong(NUM_ELEMENTS_OFFSET, 0L);
- metaPage.getBuffer().putLong(NUM_BITS_OFFSET, 0L);
- } finally {
- metaPage.releaseWriteLatch(true);
- bufferCache.unpin(metaPage);
- }
bufferCache.closeFile(fileId);
}
@@ -166,6 +156,13 @@
}
private void readBloomFilterMetaData() throws HyracksDataException {
+ if (bufferCache.getNumPagesOfFile(fileId) == 0) {
+ numPages = 0;
+ numHashes = 0;
+ numElements = 0;
+ numBits = 0;
+ return;
+ }
ICachedPage metaPage = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, METADATA_PAGE_ID), false);
metaPage.acquireReadLatch();
try {
@@ -211,12 +208,15 @@
private final int numHashes;
private final long numBits;
private final int numPages;
+ private IFIFOPageQueue queue;
+ private ICachedPage[] pages;
+ private ICachedPage metaDataPage = null;
public BloomFilterBuilder(long numElements, int numHashes, int numBitsPerElement) throws HyracksDataException {
if (!isActivated) {
throw new HyracksDataException("Failed to create the bloom filter builder since it is not activated.");
}
-
+ queue = bufferCache.createFIFOQueue();
this.numElements = numElements;
this.numHashes = numHashes;
numBits = this.numElements * numBitsPerElement;
@@ -225,18 +225,12 @@
throw new HyracksDataException("Cannot create a bloom filter with his huge number of pages.");
}
numPages = (int) tmp;
- persistBloomFilterMetaData();
- readBloomFilterMetaData();
+ pages = new ICachedPage[numPages];
int currentPageId = 1;
while (currentPageId <= numPages) {
- ICachedPage page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, currentPageId), true);
- page.acquireWriteLatch();
- try {
- initPage(page.getBuffer().array());
- } finally {
- page.releaseWriteLatch(true);
- bufferCache.unpin(page);
- }
+ ICachedPage page = bufferCache.confiscatePage(BufferedFileHandle.getDiskPageId(fileId, currentPageId));
+ initPage(page.getBuffer().array());
+ pages[currentPageId - 1] = page;
++currentPageId;
}
}
@@ -254,18 +248,14 @@
}
}
- private void persistBloomFilterMetaData() throws HyracksDataException {
- ICachedPage metaPage = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, METADATA_PAGE_ID), false);
- metaPage.acquireWriteLatch();
- try {
- metaPage.getBuffer().putInt(NUM_PAGES_OFFSET, numPages);
- metaPage.getBuffer().putInt(NUM_HASHES_USED_OFFSET, numHashes);
- metaPage.getBuffer().putLong(NUM_ELEMENTS_OFFSET, numElements);
- metaPage.getBuffer().putLong(NUM_BITS_OFFSET, numBits);
- } finally {
- metaPage.releaseWriteLatch(true);
- bufferCache.unpin(metaPage);
+ private void allocateAndInitMetaDataPage() throws HyracksDataException {
+ if (metaDataPage == null) {
+ metaDataPage = bufferCache.confiscatePage(BufferedFileHandle.getDiskPageId(fileId, METADATA_PAGE_ID));
}
+ metaDataPage.getBuffer().putInt(NUM_PAGES_OFFSET, numPages);
+ metaDataPage.getBuffer().putInt(NUM_HASHES_USED_OFFSET, numHashes);
+ metaDataPage.getBuffer().putLong(NUM_ELEMENTS_OFFSET, numElements);
+ metaDataPage.getBuffer().putLong(NUM_BITS_OFFSET, numBits);
}
@Override
@@ -277,28 +267,42 @@
MurmurHash128Bit.hash3_x64_128(tuple, keyFields, SEED, hashes);
for (int i = 0; i < numHashes; ++i) {
long hash = Math.abs((hashes[0] + i * hashes[1]) % numBits);
+ ICachedPage page = pages[((int) (hash / numBitsPerPage))];
+ ByteBuffer buffer = page.getBuffer();
+ int byteIndex = (int) (hash % numBitsPerPage) >> 3; // divide by 8
+ byte b = buffer.get(byteIndex);
+ int bitIndex = (int) (hash % numBitsPerPage) & 0x07; // mod 8
+ b = (byte) (b | (1 << bitIndex));
- // we increment the page id by one, since the metadata page id of the filter is 0.
- ICachedPage page = bufferCache.pin(
- BufferedFileHandle.getDiskPageId(fileId, (int) (hash / numBitsPerPage) + 1), false);
- page.acquireWriteLatch();
- try {
- ByteBuffer buffer = page.getBuffer();
- int byteIndex = (int) (hash % numBitsPerPage) >> 3; // divide by 8
- byte b = buffer.array()[byteIndex];
- int bitIndex = (int) (hash % numBitsPerPage) & 0x07; // mod 8
- b = (byte) (b | (1 << bitIndex));
- buffer.array()[byteIndex] = b;
- } finally {
- page.releaseWriteLatch(true);
- bufferCache.unpin(page);
- }
+ buffer.put(byteIndex, b);
}
}
@Override
public void end() throws HyracksDataException, IndexException {
+ allocateAndInitMetaDataPage();
+ queue.put(metaDataPage);
+ for (ICachedPage p : pages) {
+ queue.put(p);
+ }
+ bufferCache.finishQueue();
+ BloomFilter.this.numBits = numBits;
+ BloomFilter.this.numHashes = numHashes;
+ BloomFilter.this.numElements = numElements;
+ BloomFilter.this.numPages = numPages;
+ }
+
+ @Override
+ public void abort() throws HyracksDataException {
+ for (ICachedPage p : pages) {
+ if (p != null) {
+ bufferCache.returnPage(p, false);
+ }
+ }
+ if (metaDataPage != null ){
+ bufferCache.returnPage(metaDataPage,false);
+ }
}
}
-}
\ No newline at end of file
+}
diff --git a/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/frames/BTreeFieldPrefixNSMLeafFrame.java b/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/frames/BTreeFieldPrefixNSMLeafFrame.java
index f4fd394..e908c1b 100644
--- a/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/frames/BTreeFieldPrefixNSMLeafFrame.java
+++ b/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/frames/BTreeFieldPrefixNSMLeafFrame.java
@@ -91,7 +91,7 @@
}
@Override
- public int getBytesRequriedToWriteTuple(ITupleReference tuple) {
+ public int getBytesRequiredToWriteTuple(ITupleReference tuple) {
return tupleWriter.bytesRequired(tuple) + slotManager.getSlotSize();
}
diff --git a/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/frames/BTreeNSMInteriorFrame.java b/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/frames/BTreeNSMInteriorFrame.java
index ab5e55f..5b2bdfc 100644
--- a/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/frames/BTreeNSMInteriorFrame.java
+++ b/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/frames/BTreeNSMInteriorFrame.java
@@ -59,7 +59,7 @@
}
@Override
- public int getBytesRequriedToWriteTuple(ITupleReference tuple) {
+ public int getBytesRequiredToWriteTuple(ITupleReference tuple) {
return tupleWriter.bytesRequired(tuple) + childPtrSize + slotManager.getSlotSize();
}
diff --git a/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/frames/BTreeNSMLeafFrame.java b/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/frames/BTreeNSMLeafFrame.java
index c96a2cf..5172a92 100644
--- a/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/frames/BTreeNSMLeafFrame.java
+++ b/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/frames/BTreeNSMLeafFrame.java
@@ -50,7 +50,7 @@
}
@Override
- public int getBytesRequriedToWriteTuple(ITupleReference tuple) {
+ public int getBytesRequiredToWriteTuple(ITupleReference tuple) {
return tupleWriter.bytesRequired(tuple) + slotManager.getSlotSize();
}
diff --git a/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/impls/BTree.java b/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/impls/BTree.java
index 3d02065..8c192a1 100644
--- a/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/impls/BTree.java
+++ b/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/impls/BTree.java
@@ -31,7 +31,6 @@
import org.apache.hyracks.api.io.FileReference;
import org.apache.hyracks.data.std.primitive.IntegerPointable;
import org.apache.hyracks.dataflow.common.data.accessors.ITupleReference;
-import org.apache.hyracks.dataflow.common.data.marshalling.IntegerSerializerDeserializer;
import org.apache.hyracks.dataflow.common.util.TupleUtils;
import org.apache.hyracks.storage.am.btree.api.IBTreeFrame;
import org.apache.hyracks.storage.am.btree.api.IBTreeInteriorFrame;
@@ -41,22 +40,8 @@
import org.apache.hyracks.storage.am.btree.exceptions.BTreeNotUpdateableException;
import org.apache.hyracks.storage.am.btree.frames.BTreeNSMInteriorFrame;
import org.apache.hyracks.storage.am.btree.impls.BTreeOpContext.PageValidationInfo;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
-import org.apache.hyracks.storage.am.common.api.IIndexAccessor;
-import org.apache.hyracks.storage.am.common.api.IIndexBulkLoader;
-import org.apache.hyracks.storage.am.common.api.IIndexCursor;
-import org.apache.hyracks.storage.am.common.api.IModificationOperationCallback;
-import org.apache.hyracks.storage.am.common.api.ISearchOperationCallback;
-import org.apache.hyracks.storage.am.common.api.ISearchPredicate;
-import org.apache.hyracks.storage.am.common.api.ISplitKey;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexAccessor;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexCursor;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexFrame;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexTupleReference;
-import org.apache.hyracks.storage.am.common.api.IndexException;
-import org.apache.hyracks.storage.am.common.api.TreeIndexException;
-import org.apache.hyracks.storage.am.common.api.UnsortedInputException;
+import org.apache.hyracks.storage.am.common.api.*;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
import org.apache.hyracks.storage.am.common.exceptions.TreeIndexDuplicateKeyException;
import org.apache.hyracks.storage.am.common.exceptions.TreeIndexNonExistentKeyException;
import org.apache.hyracks.storage.am.common.frames.FrameOpSpaceStatus;
@@ -66,6 +51,7 @@
import org.apache.hyracks.storage.am.common.impls.TreeIndexDiskOrderScanCursor;
import org.apache.hyracks.storage.am.common.ophelpers.IndexOperation;
import org.apache.hyracks.storage.am.common.ophelpers.MultiComparator;
+import org.apache.hyracks.storage.common.buffercache.BufferCache;
import org.apache.hyracks.storage.common.buffercache.IBufferCache;
import org.apache.hyracks.storage.common.buffercache.ICachedPage;
import org.apache.hyracks.storage.common.file.BufferedFileHandle;
@@ -83,7 +69,7 @@
private final ReadWriteLock treeLatch;
private final int maxTupleSize;
- public BTree(IBufferCache bufferCache, IFileMapProvider fileMapProvider, IFreePageManager freePageManager,
+ public BTree(IBufferCache bufferCache, IFileMapProvider fileMapProvider, IMetaDataPageManager freePageManager,
ITreeIndexFrameFactory interiorFrameFactory, ITreeIndexFrameFactory leafFrameFactory,
IBinaryComparatorFactory[] cmpFactories, int fieldCount, FileReference file) {
super(bufferCache, fileMapProvider, freePageManager, interiorFrameFactory, leafFrameFactory, cmpFactories,
@@ -100,8 +86,8 @@
TreeIndexDiskOrderScanCursor cursor = (TreeIndexDiskOrderScanCursor) icursor;
ctx.reset();
RangePredicate diskOrderScanPred = new RangePredicate(null, null, true, true, ctx.cmp, ctx.cmp);
- int currentPageId = rootPage;
int maxPageId = freePageManager.getMaxPage(ctx.metaFrame);
+ int currentPageId = bulkloadLeafStart;
ICachedPage page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, currentPageId), false);
page.acquireReadLatch();
try {
@@ -128,7 +114,9 @@
NoOpOperationCallback.INSTANCE);
PageValidationInfo pvi = accessor.ctx.createPageValidationInfo(null);
accessor.ctx.validationInfos.addFirst(pvi);
- validate(accessor.ctx, rootPage);
+ if (isActive) {
+ validate(accessor.ctx, rootPage);
+ }
}
private void validate(BTreeOpContext ctx, int pageId) throws HyracksDataException {
@@ -316,8 +304,8 @@
}
private void insert(ITupleReference tuple, BTreeOpContext ctx) throws HyracksDataException, TreeIndexException {
- int tupleSize = Math.max(ctx.leafFrame.getBytesRequriedToWriteTuple(tuple),
- ctx.interiorFrame.getBytesRequriedToWriteTuple(tuple));
+ int tupleSize = Math.max(ctx.leafFrame.getBytesRequiredToWriteTuple(tuple),
+ ctx.interiorFrame.getBytesRequiredToWriteTuple(tuple));
if (tupleSize > maxTupleSize) {
throw new TreeIndexException("Space required for record (" + tupleSize
+ ") larger than maximum acceptable size (" + maxTupleSize + ")");
@@ -327,8 +315,8 @@
}
private void upsert(ITupleReference tuple, BTreeOpContext ctx) throws HyracksDataException, TreeIndexException {
- int tupleSize = Math.max(ctx.leafFrame.getBytesRequriedToWriteTuple(tuple),
- ctx.interiorFrame.getBytesRequriedToWriteTuple(tuple));
+ int tupleSize = Math.max(ctx.leafFrame.getBytesRequiredToWriteTuple(tuple),
+ ctx.interiorFrame.getBytesRequiredToWriteTuple(tuple));
if (tupleSize > maxTupleSize) {
throw new TreeIndexException("Space required for record (" + tupleSize
+ ") larger than maximum acceptable size (" + maxTupleSize + ")");
@@ -344,8 +332,8 @@
if (fieldCount == ctx.cmp.getKeyFieldCount()) {
throw new BTreeNotUpdateableException("Cannot perform updates when the entire tuple forms the key.");
}
- int tupleSize = Math.max(ctx.leafFrame.getBytesRequriedToWriteTuple(tuple),
- ctx.interiorFrame.getBytesRequriedToWriteTuple(tuple));
+ int tupleSize = Math.max(ctx.leafFrame.getBytesRequiredToWriteTuple(tuple),
+ ctx.interiorFrame.getBytesRequiredToWriteTuple(tuple));
if (tupleSize > maxTupleSize) {
throw new TreeIndexException("Space required for record (" + tupleSize
+ ") larger than maximum acceptable size (" + maxTupleSize + ")");
@@ -756,7 +744,7 @@
bufferCache.unpin(node);
}
if (restartOp) {
- // Wait for the SMO to finish before restarting.
+ // Wait for the SMO to persistFrontiers before restarting.
treeLatch.readLock().lock();
treeLatch.readLock().unlock();
ctx.pageLsns.removeLast();
@@ -942,8 +930,13 @@
@Override
public IIndexBulkLoader createBulkLoader(float fillFactor, boolean verifyInput, long numElementsHint,
boolean checkIfEmptyIndex) throws TreeIndexException {
+ return createBulkLoader(fillFactor, verifyInput, numElementsHint, checkIfEmptyIndex, false);
+ }
+
+ public IIndexBulkLoader createBulkLoader(float fillFactor, boolean verifyInput, long numElementsHint,
+ boolean checkIfEmptyIndex, boolean appendOnly) throws TreeIndexException {
try {
- return new BTreeBulkLoader(fillFactor, verifyInput);
+ return new BTreeBulkLoader(fillFactor, verifyInput, appendOnly);
} catch (HyracksDataException e) {
throw new TreeIndexException(e);
}
@@ -952,19 +945,22 @@
public class BTreeBulkLoader extends AbstractTreeIndex.AbstractTreeIndexBulkLoader {
protected final ISplitKey splitKey;
protected final boolean verifyInput;
+ protected List<ICachedPage> pagesToWrite;
- public BTreeBulkLoader(float fillFactor, boolean verifyInput) throws TreeIndexException, HyracksDataException {
- super(fillFactor);
+ public BTreeBulkLoader(float fillFactor, boolean verifyInput, boolean appendOnly) throws TreeIndexException,
+ HyracksDataException {
+ super(fillFactor, appendOnly);
this.verifyInput = verifyInput;
splitKey = new BTreeSplitKey(leafFrame.getTupleWriter().createTupleReference());
splitKey.getTuple().setFieldCount(cmp.getKeyFieldCount());
+ pagesToWrite = new ArrayList<ICachedPage>();
}
@Override
public void add(ITupleReference tuple) throws IndexException, HyracksDataException {
try {
- int tupleSize = Math.max(leafFrame.getBytesRequriedToWriteTuple(tuple),
- interiorFrame.getBytesRequriedToWriteTuple(tuple));
+ int tupleSize = Math.max(leafFrame.getBytesRequiredToWriteTuple(tuple),
+ interiorFrame.getBytesRequiredToWriteTuple(tuple));
if (tupleSize > maxTupleSize) {
throw new TreeIndexException("Space required for record (" + tupleSize
+ ") larger than maximum acceptable size (" + maxTupleSize + ")");
@@ -980,7 +976,7 @@
leafFrame.compress();
spaceUsed = leafFrame.getBuffer().capacity() - leafFrame.getTotalFreeSpace();
}
-
+ //full, allocate new page
if (spaceUsed + spaceNeeded > leafMaxBytes) {
leafFrontier.lastTuple.resetByTupleIndex(leafFrame, leafFrame.getTupleCount() - 1);
if (verifyInput) {
@@ -992,18 +988,20 @@
.getBuffer().array(), 0);
splitKey.getTuple().resetByTupleOffset(splitKey.getBuffer(), 0);
splitKey.setLeftPage(leafFrontier.pageId);
+
+ pagesToWrite.clear();
+ propagateBulk(1, pagesToWrite);
leafFrontier.pageId = freePageManager.getFreePage(metaFrame);
((IBTreeLeafFrame) leafFrame).setNextLeaf(leafFrontier.pageId);
- leafFrontier.page.releaseWriteLatch(true);
- bufferCache.unpin(leafFrontier.page);
+ queue.put(leafFrontier.page);
+ for (ICachedPage c : pagesToWrite) {
+ queue.put(c);
+ }
splitKey.setRightPage(leafFrontier.pageId);
- propagateBulk(1);
-
- leafFrontier.page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, leafFrontier.pageId),
- true);
- leafFrontier.page.acquireWriteLatch();
+ leafFrontier.page = bufferCache.confiscatePage(BufferedFileHandle.getDiskPageId(fileId,
+ leafFrontier.pageId));
leafFrame.setPage(leafFrontier.page);
leafFrame.initBuffer((byte) 0);
} else {
@@ -1039,7 +1037,7 @@
}
}
- protected void propagateBulk(int level) throws HyracksDataException {
+ protected void propagateBulk(int level, List<ICachedPage> pagesToWrite) throws HyracksDataException {
if (splitKey.getBuffer() == null)
return;
@@ -1063,25 +1061,73 @@
tupleWriter.writeTupleFields(frontier.lastTuple, 0, cmp.getKeyFieldCount(), splitKey.getBuffer()
.array(), 0);
splitKey.getTuple().resetByTupleOffset(splitKey.getBuffer(), 0);
- splitKey.setLeftPage(frontier.pageId);
((IBTreeInteriorFrame) interiorFrame).deleteGreatest();
+ int finalPageId = freePageManager.getFreePage(metaFrame);
+ bufferCache.setPageDiskId(frontier.page, BufferedFileHandle.getDiskPageId(fileId, finalPageId));
+ pagesToWrite.add(frontier.page);
+ splitKey.setLeftPage(finalPageId);
- frontier.page.releaseWriteLatch(true);
- bufferCache.unpin(frontier.page);
- frontier.pageId = freePageManager.getFreePage(metaFrame);
-
- splitKey.setRightPage(frontier.pageId);
- propagateBulk(level + 1);
-
- frontier.page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, frontier.pageId), true);
- frontier.page.acquireWriteLatch();
+ propagateBulk(level + 1, pagesToWrite);
+ frontier.page = bufferCache.confiscatePage(BufferCache.INVALID_DPID);
interiorFrame.setPage(frontier.page);
interiorFrame.initBuffer((byte) level);
}
((IBTreeInteriorFrame) interiorFrame).insertSorted(tuple);
}
+ private void persistFrontiers(int level, int rightPage) throws HyracksDataException {
+ if (level >= nodeFrontiers.size()) {
+ //at root
+ if (appendOnly) {
+ rootPage = nodeFrontiers.get(level - 1).pageId;
+ }
+ releasedLatches = true;
+ return;
+ }
+ if (level < 1) {
+ ICachedPage lastLeaf = nodeFrontiers.get(level).page;
+ int lastLeafPage = nodeFrontiers.get(level).pageId;
+ setPageDpid(lastLeaf,nodeFrontiers.get(level).pageId);
+ queue.put(lastLeaf);
+ nodeFrontiers.get(level).page = null;
+ persistFrontiers(level + 1, lastLeafPage);
+ return;
+ }
+ NodeFrontier frontier = nodeFrontiers.get(level);
+ interiorFrame.setPage(frontier.page);
+ //just finalize = the layer right above the leaves has correct righthand pointers already
+ if (rightPage < 0) {
+ throw new HyracksDataException("Error in index creation. Internal node appears to have no rightmost guide");
+ }
+ ((IBTreeInteriorFrame) interiorFrame).setRightmostChildPageId(rightPage);
+ int finalPageId = freePageManager.getFreePage(metaFrame);
+ setPageDpid(frontier.page, finalPageId);
+ queue.put(frontier.page);
+ frontier.pageId = finalPageId;
+
+ persistFrontiers(level + 1, finalPageId);
+ }
+
+ @Override
+ protected void handleException() throws HyracksDataException {
+ super.handleException();
+ }
+
+ @Override
+ public void end() throws HyracksDataException {
+ persistFrontiers(0, -1);
+ super.end();
+ }
+
+ @Override
+ public void abort() throws HyracksDataException {
+ super.handleException();
+ }
+
+ private void setPageDpid(ICachedPage page, int pageId){
+ bufferCache.setPageDiskId(page, BufferedFileHandle.getDiskPageId(fileId,pageId));
+ }
}
@SuppressWarnings("rawtypes")
@@ -1109,7 +1155,8 @@
tuple.resetByTupleIndex(interiorFrame, i);
// Print child pointer.
int numFields = tuple.getFieldCount();
- int childPageId = IntegerPointable.getInteger(tuple.getFieldData(numFields - 1), tuple.getFieldStart(numFields - 1) + tuple.getFieldLength(numFields - 1));
+ int childPageId = IntegerPointable.getInteger(tuple.getFieldData(numFields - 1),
+ tuple.getFieldStart(numFields - 1) + tuple.getFieldLength(numFields - 1));
strBuilder.append("(" + childPageId + ") ");
String tupleString = TupleUtils.printTuple(tuple, fieldSerdes);
strBuilder.append(tupleString + " | ");
diff --git a/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/util/BTreeUtils.java b/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/util/BTreeUtils.java
index 5aab7ae..7c2abb1 100644
--- a/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/util/BTreeUtils.java
+++ b/hyracks/hyracks-storage-am-btree/src/main/java/org/apache/hyracks/storage/am/btree/util/BTreeUtils.java
@@ -29,12 +29,12 @@
import org.apache.hyracks.storage.am.btree.frames.BTreeNSMInteriorFrameFactory;
import org.apache.hyracks.storage.am.btree.frames.BTreeNSMLeafFrameFactory;
import org.apache.hyracks.storage.am.btree.impls.BTree;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrameFactory;
import org.apache.hyracks.storage.am.common.api.ITreeIndexTupleWriterFactory;
import org.apache.hyracks.storage.am.common.frames.LIFOMetaDataFrameFactory;
-import org.apache.hyracks.storage.am.common.freepage.LinkedListFreePageManager;
+import org.apache.hyracks.storage.am.common.freepage.LinkedMetaDataPageManager;
import org.apache.hyracks.storage.am.common.ophelpers.MultiComparator;
import org.apache.hyracks.storage.am.common.tuples.TypeAwareTupleWriterFactory;
import org.apache.hyracks.storage.common.buffercache.IBufferCache;
@@ -48,13 +48,14 @@
ITreeIndexFrameFactory leafFrameFactory = getLeafFrameFactory(tupleWriterFactory, leafType);
ITreeIndexFrameFactory interiorFrameFactory = new BTreeNSMInteriorFrameFactory(tupleWriterFactory);
ITreeIndexMetaDataFrameFactory metaFrameFactory = new LIFOMetaDataFrameFactory();
- IFreePageManager freePageManager = new LinkedListFreePageManager(bufferCache, 0, metaFrameFactory);
+ IMetaDataPageManager freePageManager;
+ freePageManager = new LinkedMetaDataPageManager(bufferCache, metaFrameFactory);
BTree btree = new BTree(bufferCache, fileMapProvider, freePageManager, interiorFrameFactory, leafFrameFactory,
cmpFactories, typeTraits.length, file);
return btree;
}
- public static BTree createBTree(IBufferCache bufferCache, IFreePageManager freePageManager,
+ public static BTree createBTree(IBufferCache bufferCache, IMetaDataPageManager freePageManager,
IFileMapProvider fileMapProvider, ITypeTraits[] typeTraits, IBinaryComparatorFactory[] cmpFactories,
BTreeLeafFrameType leafType, FileReference file) throws BTreeException {
TypeAwareTupleWriterFactory tupleWriterFactory = new TypeAwareTupleWriterFactory(typeTraits);
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IFreePageManager.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IFreePageManager.java
deleted file mode 100644
index 57bcd6c..0000000
--- a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IFreePageManager.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hyracks.storage.am.common.api;
-
-import org.apache.hyracks.api.exceptions.HyracksDataException;
-
-public interface IFreePageManager {
- public void open(int fileId);
-
- public void close();
-
- public int getFreePage(ITreeIndexMetaDataFrame metaFrame)
- throws HyracksDataException;
-
- public void addFreePage(ITreeIndexMetaDataFrame metaFrame, int freePage)
- throws HyracksDataException;
-
- public int getMaxPage(ITreeIndexMetaDataFrame metaFrame)
- throws HyracksDataException;
-
- public void init(ITreeIndexMetaDataFrame metaFrame, int currentMaxPage)
- throws HyracksDataException;
-
- public ITreeIndexMetaDataFrameFactory getMetaDataFrameFactory();
-
- // required to return negative values
- public byte getMetaPageLevelIndicator();
-
- public byte getFreePageLevelIndicator();
-
- // determined by examining level indicator
- public boolean isMetaPage(ITreeIndexMetaDataFrame metaFrame);
-
- public boolean isFreePage(ITreeIndexMetaDataFrame metaFrame);
-
- public int getFirstMetadataPage();
-}
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IIndex.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IIndex.java
index 9fe9f06..e670390 100644
--- a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IIndex.java
+++ b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IIndex.java
@@ -126,9 +126,11 @@
*/
public IIndexBulkLoader createBulkLoader(float fillFactor, boolean verifyInput, long numElementsHint,
boolean checkIfEmptyIndex) throws IndexException;
-
+
+ public IIndexBulkLoader createBulkLoader(float fillFactor, boolean verifyInput, long numElementsHint,
+ boolean checkIfEmptyIndex, boolean appendOnly) throws IndexException;
+
/**
- *
* @return true if the index needs memory components
*/
public boolean hasMemoryComponents();
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IIndexBulkLoader.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IIndexBulkLoader.java
index d390154..b837022 100644
--- a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IIndexBulkLoader.java
+++ b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IIndexBulkLoader.java
@@ -16,31 +16,36 @@
* specific language governing permissions and limitations
* under the License.
*/
-package org.apache.hyracks.storage.am.common.api;
-
-import org.apache.hyracks.api.exceptions.HyracksDataException;
-import org.apache.hyracks.dataflow.common.data.accessors.ITupleReference;
-
-public interface IIndexBulkLoader {
- /**
- * Append a tuple to the index in the context of a bulk load.
- *
- * @param tuple
- * Tuple to be inserted.
- * @throws IndexException
- * If the input stream is invalid for bulk loading (e.g., is not sorted).
- * @throws HyracksDataException
- * If the BufferCache throws while un/pinning or un/latching.
- */
- public void add(ITupleReference tuple) throws IndexException, HyracksDataException;
-
- /**
- * Finalize the bulk loading operation in the given context.
- *
- * @throws IndexException
- * @throws HyracksDataException
- * If the BufferCache throws while un/pinning or un/latching.
- */
- public void end() throws IndexException, HyracksDataException;
-
-}
+package org.apache.hyracks.storage.am.common.api;
+
+import org.apache.hyracks.api.exceptions.HyracksDataException;
+import org.apache.hyracks.dataflow.common.data.accessors.ITupleReference;
+
+public interface IIndexBulkLoader {
+ /**
+ * Append a tuple to the index in the context of a bulk load.
+ *
+ * @param tuple
+ * Tuple to be inserted.
+ * @throws IndexException
+ * If the input stream is invalid for bulk loading (e.g., is not sorted).
+ * @throws HyracksDataException
+ * If the BufferCache throws while un/pinning or un/latching.
+ */
+ public void add(ITupleReference tuple) throws IndexException, HyracksDataException;
+
+ /**
+ * Finalize the bulk loading operation in the given context.
+ *
+ * @throws IndexException
+ * @throws HyracksDataException
+ * If the BufferCache throws while un/pinning or un/latching.
+ */
+ public void end() throws IndexException, HyracksDataException;
+
+ /**
+ * Release all resources held by this bulkloader, with no guarantee of
+ * persisted content.
+ */
+ void abort() throws HyracksDataException;
+}
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IMetaDataPageManager.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IMetaDataPageManager.java
new file mode 100644
index 0000000..432eb59
--- /dev/null
+++ b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IMetaDataPageManager.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hyracks.storage.am.common.api;
+
+import org.apache.hyracks.api.exceptions.HyracksDataException;
+import org.apache.hyracks.storage.common.buffercache.ICachedPage;
+
+public interface IMetaDataPageManager {
+ /**
+ * This is the class through which one interfaces with index metadata.
+ * The index metadata contains information such as the LSN of the index, free page information,
+ * and filter page locations.
+ */
+ /**
+ * Open an index file's metadata
+ * @param fileId The file which to open the metadata of
+ */
+ public void open(int fileId);
+
+ /**
+ * Close an index file's metadata.
+ * @throws HyracksDataException
+ */
+
+ public void close() throws HyracksDataException;
+
+ /**
+ * Get the locaiton of a free page to use for index operations
+ * @param metaFrame A metadata frame to use to wrap the raw page
+ * @return A page location, or -1 if no free page could be found or allocated
+ * @throws HyracksDataException
+ */
+
+ public int getFreePage(ITreeIndexMetaDataFrame metaFrame) throws HyracksDataException;
+
+ /**
+ * Add a page back to the pool of free pages within an index file
+ * @param metaFrame A metadata frame to use to wrap the raw page
+ * @param freePage The page which to return to the free space
+ * @throws HyracksDataException
+ */
+
+ public void addFreePage(ITreeIndexMetaDataFrame metaFrame, int freePage) throws HyracksDataException;
+
+ /**
+ * Gets the highest page offset according to the metadata
+ * @param metaFrame A metadata frame to use to wrap the raw page
+ * @return The locaiton of the highest offset page
+ * @throws HyracksDataException
+ */
+
+ public int getMaxPage(ITreeIndexMetaDataFrame metaFrame) throws HyracksDataException;
+
+ /**
+ * Initializes the index metadata
+ * @param metaFrame A metadata farme to use to wrap the raw page
+ * @param currentMaxPage The highest page offset to consider valid
+ * @throws HyracksDataException
+ */
+
+ public void init(ITreeIndexMetaDataFrame metaFrame, int currentMaxPage) throws HyracksDataException;
+
+ public ITreeIndexMetaDataFrameFactory getMetaDataFrameFactory();
+
+ // required to return negative values
+ public byte getMetaPageLevelIndicator();
+
+ public byte getFreePageLevelIndicator();
+
+ // determined by examining level indicator
+
+ public boolean isMetaPage(ITreeIndexMetaDataFrame metaFrame);
+
+ public boolean isFreePage(ITreeIndexMetaDataFrame metaFrame);
+
+ /**
+ * Determines where the metadata page is located in an index file
+ * @return The locaiton of the metadata page, or -1 if the file appears to be corrupt
+ * @throws HyracksDataException
+ */
+
+ public int getFirstMetadataPage() throws HyracksDataException;
+
+ /**
+ * Initializes the metadata manager on an open index file
+ * @param metaFrame A metadata frame used to wrap the raw page
+ * @throws HyracksDataException
+ */
+
+ void init(ITreeIndexMetaDataFrame metaFrame) throws HyracksDataException;
+
+ /**
+ * Locate the filter page in an index file
+ * @return The offset of the filter page if it exists, or less than zero if no filter page exists yet
+ * @throws HyracksDataException
+ */
+
+ int getFilterPageId() throws HyracksDataException;
+
+ void setFilterPageId(int filterPageId) throws HyracksDataException;
+
+ long getLSN() throws HyracksDataException;
+
+ void setLSN(long lsn) throws HyracksDataException;
+
+ /**
+ * Set the cached page to manage for filter data
+ * @param page The page to manage
+ */
+
+ void setFilterPage(ICachedPage page);
+
+ ICachedPage getFilterPage();
+
+ boolean appendOnlyMode();
+
+}
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IFreePageManagerFactory.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IMetadataManagerFactory.java
similarity index 81%
rename from hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IFreePageManagerFactory.java
rename to hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IMetadataManagerFactory.java
index 3dbe0ab..e9dd674 100644
--- a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IFreePageManagerFactory.java
+++ b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IMetadataManagerFactory.java
@@ -18,6 +18,8 @@
*/
package org.apache.hyracks.storage.am.common.api;
-public interface IFreePageManagerFactory {
- public IFreePageManager createFreePageManager();
+import org.apache.hyracks.api.exceptions.HyracksDataException;
+
+public interface IMetadataManagerFactory {
+ public IMetaDataPageManager createFreePageManager() throws HyracksDataException;
}
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/ITreeIndex.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/ITreeIndex.java
index d3a7c6d..a29f329 100644
--- a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/ITreeIndex.java
+++ b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/ITreeIndex.java
@@ -41,7 +41,7 @@
/**
* @return The index's free page manager.
*/
- public IFreePageManager getFreePageManager();
+ public IMetaDataPageManager getMetaManager();
/**
* @return The number of fields tuples of this index have.
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/ITreeIndexFrame.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/ITreeIndexFrame.java
index 18fe4c0..9ac09a3 100644
--- a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/ITreeIndexFrame.java
+++ b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/ITreeIndexFrame.java
@@ -65,7 +65,7 @@
public int getMaxTupleSize(int pageSize);
- public int getBytesRequriedToWriteTuple(ITupleReference tuple);
+ public int getBytesRequiredToWriteTuple(ITupleReference tuple);
// for debugging
public String printHeader();
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IVirtualFreePageManager.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IVirtualMetaDataPageManager.java
similarity index 92%
rename from hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IVirtualFreePageManager.java
rename to hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IVirtualMetaDataPageManager.java
index 4247e64..0073f59 100644
--- a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IVirtualFreePageManager.java
+++ b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IVirtualMetaDataPageManager.java
@@ -18,7 +18,7 @@
*/
package org.apache.hyracks.storage.am.common.api;
-public interface IVirtualFreePageManager extends IFreePageManager {
+public interface IVirtualMetaDataPageManager extends IMetaDataPageManager {
public int getCapacity();
public void reset();
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/dataflow/TreeIndexStatsOperatorNodePushable.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/dataflow/TreeIndexStatsOperatorNodePushable.java
index fff3d57..48e65bb 100644
--- a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/dataflow/TreeIndexStatsOperatorNodePushable.java
+++ b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/dataflow/TreeIndexStatsOperatorNodePushable.java
@@ -68,11 +68,11 @@
IBufferCache bufferCache = opDesc.getStorageManager().getBufferCache(ctx);
IFileMapProvider fileMapProvider = opDesc.getStorageManager().getFileMapProvider(ctx);
int indexFileId = fileMapProvider.lookupFileId(treeIndexHelper.getFileReference());
- statsGatherer = new TreeIndexStatsGatherer(bufferCache, treeIndex.getFreePageManager(), indexFileId,
+ statsGatherer = new TreeIndexStatsGatherer(bufferCache, treeIndex.getMetaManager(), indexFileId,
treeIndex.getRootPageId());
- TreeIndexStats stats = statsGatherer.gatherStats(treeIndex.getLeafFrameFactory().createFrame(),
- treeIndex.getInteriorFrameFactory().createFrame(),
- treeIndex.getFreePageManager().getMetaDataFrameFactory().createFrame());
+ TreeIndexStats stats = statsGatherer.gatherStats(treeIndex.getLeafFrameFactory().createFrame(), treeIndex
+ .getInteriorFrameFactory().createFrame(), treeIndex.getMetaManager().getMetaDataFrameFactory()
+ .createFrame());
// Write the stats output as a single string field.
FrameTupleAppender appender = new FrameTupleAppender(new VSizeFrame(ctx));
ArrayTupleBuilder tb = new ArrayTupleBuilder(1);
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/frames/AbstractSlotManager.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/frames/AbstractSlotManager.java
index 667f76c..7d61659 100644
--- a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/frames/AbstractSlotManager.java
+++ b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/frames/AbstractSlotManager.java
@@ -24,8 +24,8 @@
public abstract class AbstractSlotManager implements ISlotManager {
- protected final int GREATEST_KEY_INDICATOR = -1;
- protected final int ERROR_INDICATOR = -2;
+ public static final int GREATEST_KEY_INDICATOR = -1;
+ public static final int ERROR_INDICATOR = -2;
protected static final int slotSize = 4;
protected ITreeIndexFrame frame;
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/frames/LIFOMetaDataFrame.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/frames/LIFOMetaDataFrame.java
index 2d1ab42..16fdecd 100644
--- a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/frames/LIFOMetaDataFrame.java
+++ b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/frames/LIFOMetaDataFrame.java
@@ -112,7 +112,7 @@
public void initBuffer(byte level) {
buf.putInt(tupleCountOff, 0);
buf.putInt(freeSpaceOff, lsnOff + 8);
- //buf.putInt(maxPageOff, -1);
+ buf.putInt(maxPageOff, 0);
buf.put(levelOff, level);
buf.putInt(nextPageOff, -1);
buf.putInt(additionalFilteringPageOff, -1);
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/freepage/LinkedListFreePageManager.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/freepage/LinkedListFreePageManager.java
deleted file mode 100644
index 55ddbb7..0000000
--- a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/freepage/LinkedListFreePageManager.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hyracks.storage.am.common.freepage;
-
-import org.apache.hyracks.api.exceptions.HyracksDataException;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrame;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrameFactory;
-import org.apache.hyracks.storage.common.buffercache.IBufferCache;
-import org.apache.hyracks.storage.common.buffercache.ICachedPage;
-import org.apache.hyracks.storage.common.file.BufferedFileHandle;
-
-public class LinkedListFreePageManager implements IFreePageManager {
-
- private static final byte META_PAGE_LEVEL_INDICATOR = -1;
- private static final byte FREE_PAGE_LEVEL_INDICATOR = -2;
- private final IBufferCache bufferCache;
- private final int headPage;
- private int fileId = -1;
- private final ITreeIndexMetaDataFrameFactory metaDataFrameFactory;
-
- public LinkedListFreePageManager(IBufferCache bufferCache,
- int headPage, ITreeIndexMetaDataFrameFactory metaDataFrameFactory) {
- this.bufferCache = bufferCache;
- this.headPage = headPage;
- this.metaDataFrameFactory = metaDataFrameFactory;
- }
-
- @Override
- public void addFreePage(ITreeIndexMetaDataFrame metaFrame, int freePage)
- throws HyracksDataException {
-
- ICachedPage metaNode = bufferCache.pin(
- BufferedFileHandle.getDiskPageId(fileId, headPage), false);
- metaNode.acquireWriteLatch();
-
- try {
- metaFrame.setPage(metaNode);
-
- if (metaFrame.hasSpace()) {
- metaFrame.addFreePage(freePage);
- } else {
- // allocate a new page in the chain of meta pages
- int newPage = metaFrame.getFreePage();
- if (newPage < 0) {
- throw new Exception(
- "Inconsistent Meta Page State. It has no space, but it also has no entries.");
- }
-
- ICachedPage newNode = bufferCache.pin(
- BufferedFileHandle.getDiskPageId(fileId, newPage),
- false);
- newNode.acquireWriteLatch();
-
- try {
- int metaMaxPage = metaFrame.getMaxPage();
-
- // copy metaDataPage to newNode
- System.arraycopy(metaNode.getBuffer().array(), 0, newNode
- .getBuffer().array(), 0, metaNode.getBuffer()
- .capacity());
-
- metaFrame.initBuffer(META_PAGE_LEVEL_INDICATOR);
- metaFrame.setNextPage(newPage);
- metaFrame.setMaxPage(metaMaxPage);
- metaFrame.addFreePage(freePage);
- } finally {
- newNode.releaseWriteLatch(true);
- bufferCache.unpin(newNode);
- }
- }
- } catch (Exception e) {
- e.printStackTrace();
- } finally {
- metaNode.releaseWriteLatch(true);
- bufferCache.unpin(metaNode);
- }
- }
-
- @Override
- public int getFreePage(ITreeIndexMetaDataFrame metaFrame)
- throws HyracksDataException {
- ICachedPage metaNode = bufferCache.pin(
- BufferedFileHandle.getDiskPageId(fileId, headPage), false);
-
- metaNode.acquireWriteLatch();
-
- int freePage = -1;
- try {
- metaFrame.setPage(metaNode);
- freePage = metaFrame.getFreePage();
- if (freePage < 0) { // no free page entry on this page
- int nextPage = metaFrame.getNextPage();
- if (nextPage > 0) { // sibling may have free pages
- ICachedPage nextNode = bufferCache.pin(
- BufferedFileHandle.getDiskPageId(fileId, nextPage),
- false);
-
- nextNode.acquireWriteLatch();
- // we copy over the free space entries of nextpage into the
- // first meta page (metaDataPage)
- // we need to link the first page properly to the next page
- // of nextpage
- try {
- // remember entries that remain unchanged
- int maxPage = metaFrame.getMaxPage();
-
- // copy entire page (including sibling pointer, free
- // page entries, and all other info)
- // after this copy nextPage is considered a free page
- System.arraycopy(nextNode.getBuffer().array(), 0,
- metaNode.getBuffer().array(), 0, nextNode
- .getBuffer().capacity());
-
- // reset unchanged entry
- metaFrame.setMaxPage(maxPage);
-
- freePage = metaFrame.getFreePage();
- // sibling also has no free pages, this "should" not
- // happen, but we deal with it anyway just to be safe
- if (freePage < 0) {
- freePage = nextPage;
- } else {
- metaFrame.addFreePage(nextPage);
- }
- } finally {
- nextNode.releaseWriteLatch(true);
- bufferCache.unpin(nextNode);
- }
- } else {
- freePage = metaFrame.getMaxPage();
- freePage++;
- metaFrame.setMaxPage(freePage);
- }
- }
- } finally {
- metaNode.releaseWriteLatch(true);
- bufferCache.unpin(metaNode);
- }
-
- return freePage;
- }
-
- @Override
- public int getMaxPage(ITreeIndexMetaDataFrame metaFrame)
- throws HyracksDataException {
- ICachedPage metaNode = bufferCache.pin(
- BufferedFileHandle.getDiskPageId(fileId, headPage), false);
- metaNode.acquireWriteLatch();
- int maxPage = -1;
- try {
- metaFrame.setPage(metaNode);
- maxPage = metaFrame.getMaxPage();
- } finally {
- metaNode.releaseWriteLatch(true);
- bufferCache.unpin(metaNode);
- }
- return maxPage;
- }
-
- @Override
- public void init(ITreeIndexMetaDataFrame metaFrame, int currentMaxPage)
- throws HyracksDataException {
- // initialize meta data page
- ICachedPage metaNode = bufferCache.pin(
- BufferedFileHandle.getDiskPageId(fileId, headPage), true);
-
- metaNode.acquireWriteLatch();
- try {
- metaFrame.setPage(metaNode);
- metaFrame.initBuffer(META_PAGE_LEVEL_INDICATOR);
- metaFrame.setMaxPage(currentMaxPage);
- } finally {
- metaNode.releaseWriteLatch(true);
- bufferCache.unpin(metaNode);
- }
- }
-
- @Override
- public ITreeIndexMetaDataFrameFactory getMetaDataFrameFactory() {
- return metaDataFrameFactory;
- }
-
- @Override
- public byte getFreePageLevelIndicator() {
- return FREE_PAGE_LEVEL_INDICATOR;
- }
-
- @Override
- public byte getMetaPageLevelIndicator() {
- return META_PAGE_LEVEL_INDICATOR;
- }
-
- @Override
- public boolean isFreePage(ITreeIndexMetaDataFrame metaFrame) {
- return metaFrame.getLevel() == FREE_PAGE_LEVEL_INDICATOR;
- }
-
- @Override
- public boolean isMetaPage(ITreeIndexMetaDataFrame metaFrame) {
- return metaFrame.getLevel() == META_PAGE_LEVEL_INDICATOR;
- }
-
- @Override
- public int getFirstMetadataPage() {
- return headPage;
- }
-
- @Override
- public void open(int fileId) {
- this.fileId = fileId;
- }
-
- @Override
- public void close() {
- fileId = -1;
- }
-}
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/freepage/LinkedListFreePageManagerFactory.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/freepage/LinkedListMetadataManagerFactory.java
similarity index 67%
rename from hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/freepage/LinkedListFreePageManagerFactory.java
rename to hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/freepage/LinkedListMetadataManagerFactory.java
index 16b5c02..a911413 100644
--- a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/freepage/LinkedListFreePageManagerFactory.java
+++ b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/freepage/LinkedListMetadataManagerFactory.java
@@ -19,23 +19,24 @@
package org.apache.hyracks.storage.am.common.freepage;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
-import org.apache.hyracks.storage.am.common.api.IFreePageManagerFactory;
+import org.apache.hyracks.api.exceptions.HyracksDataException;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
+import org.apache.hyracks.storage.am.common.api.IMetadataManagerFactory;
import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrameFactory;
import org.apache.hyracks.storage.common.buffercache.IBufferCache;
-public class LinkedListFreePageManagerFactory implements IFreePageManagerFactory {
+public class LinkedListMetadataManagerFactory implements IMetadataManagerFactory {
private final ITreeIndexMetaDataFrameFactory metaDataFrameFactory;
private final IBufferCache bufferCache;
- public LinkedListFreePageManagerFactory(IBufferCache bufferCache,
- ITreeIndexMetaDataFrameFactory metaDataFrameFactory) {
+ public LinkedListMetadataManagerFactory(IBufferCache bufferCache,
+ ITreeIndexMetaDataFrameFactory metaDataFrameFactory) {
this.metaDataFrameFactory = metaDataFrameFactory;
this.bufferCache = bufferCache;
}
- public IFreePageManager createFreePageManager() {
- return new LinkedListFreePageManager(bufferCache, 0, metaDataFrameFactory);
+ public IMetaDataPageManager createFreePageManager() throws HyracksDataException {
+ return new LinkedMetaDataPageManager(bufferCache, metaDataFrameFactory);
}
}
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/freepage/LinkedMetaDataPageManager.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/freepage/LinkedMetaDataPageManager.java
new file mode 100644
index 0000000..096908c
--- /dev/null
+++ b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/freepage/LinkedMetaDataPageManager.java
@@ -0,0 +1,458 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hyracks.storage.am.common.freepage;
+
+import java.util.logging.Logger;
+
+import org.apache.hyracks.api.exceptions.HyracksDataException;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
+import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrame;
+import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrameFactory;
+import org.apache.hyracks.storage.common.buffercache.BufferCache;
+import org.apache.hyracks.storage.common.buffercache.IBufferCache;
+import org.apache.hyracks.storage.common.buffercache.ICachedPage;
+import org.apache.hyracks.storage.common.buffercache.IFIFOPageQueue;
+import org.apache.hyracks.storage.common.file.BufferedFileHandle;
+
+public class LinkedMetaDataPageManager implements IMetaDataPageManager {
+
+ private static final byte META_PAGE_LEVEL_INDICATOR = -1;
+ private static final byte FREE_PAGE_LEVEL_INDICATOR = -2;
+ public static final int NO_FILTER_IN_PLACE = -1;
+ public static final int NO_FILTER_APPEND_ONLY = -2;
+ private final IBufferCache bufferCache;
+ private int headPage = -1;
+ private int fileId = -1;
+ private final ITreeIndexMetaDataFrameFactory metaDataFrameFactory;
+ private boolean appendOnly = false;
+ ICachedPage confiscatedMetaNode;
+ ICachedPage filterPage;
+ private static Logger LOGGER = Logger
+ .getLogger(LinkedMetaDataPageManager.class.getName());
+
+ public LinkedMetaDataPageManager(IBufferCache bufferCache, ITreeIndexMetaDataFrameFactory metaDataFrameFactory) {
+ this.bufferCache = bufferCache;
+ this.metaDataFrameFactory = metaDataFrameFactory;
+ }
+
+ @Override
+ public void addFreePage(ITreeIndexMetaDataFrame metaFrame, int freePage) throws HyracksDataException {
+
+ ICachedPage metaNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, getFirstMetadataPage()), false);
+ metaNode.acquireWriteLatch();
+
+ try {
+ metaFrame.setPage(metaNode);
+
+ if (metaFrame.hasSpace()) {
+ metaFrame.addFreePage(freePage);
+ } else {
+ // allocate a new page in the chain of meta pages
+ int newPage = metaFrame.getFreePage();
+ if (newPage < 0) {
+ throw new Exception("Inconsistent Meta Page State. It has no space, but it also has no entries.");
+ }
+
+ ICachedPage newNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, newPage), false);
+ newNode.acquireWriteLatch();
+
+ try {
+ int metaMaxPage = metaFrame.getMaxPage();
+
+ // copy metaDataPage to newNode
+ System.arraycopy(metaNode.getBuffer().array(), 0, newNode.getBuffer().array(), 0, metaNode
+ .getBuffer().capacity());
+
+ metaFrame.initBuffer(META_PAGE_LEVEL_INDICATOR);
+ metaFrame.setNextPage(newPage);
+ metaFrame.setMaxPage(metaMaxPage);
+ metaFrame.addFreePage(freePage);
+ } finally {
+ newNode.releaseWriteLatch(true);
+ bufferCache.unpin(newNode);
+ }
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ metaNode.releaseWriteLatch(true);
+ bufferCache.unpin(metaNode);
+ }
+ }
+
+ @Override
+ public int getFreePage(ITreeIndexMetaDataFrame metaFrame) throws HyracksDataException {
+ ICachedPage metaNode;
+ if (!appendOnly) {
+ metaNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, getFirstMetadataPage()), false);
+ } else {
+ metaNode = confiscatedMetaNode;
+ }
+
+ metaNode.acquireWriteLatch();
+
+ int freePage = IBufferCache.INVALID_PAGEID;
+ try {
+ metaFrame.setPage(metaNode);
+ freePage = metaFrame.getFreePage();
+ if (freePage < 0) { // no free page entry on this page
+ int nextPage = metaFrame.getNextPage();
+ if (nextPage > 0) { // sibling may have free pages
+ ICachedPage nextNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, nextPage), false);
+
+ nextNode.acquireWriteLatch();
+ // we copy over the free space entries of nextpage into the
+ // first meta page (metaDataPage)
+ // we need to link the first page properly to the next page
+ // of nextpage
+ try {
+ // remember entries that remain unchanged
+ int maxPage = metaFrame.getMaxPage();
+
+ // copy entire page (including sibling pointer, free
+ // page entries, and all other info)
+ // after this copy nextPage is considered a free page
+ System.arraycopy(nextNode.getBuffer().array(), 0, metaNode.getBuffer().array(), 0, nextNode
+ .getBuffer().capacity());
+
+ // reset unchanged entry
+ metaFrame.setMaxPage(maxPage);
+
+ freePage = metaFrame.getFreePage();
+ // sibling also has no free pages, this "should" not
+ // happen, but we deal with it anyway just to be safe
+ if (freePage < 0) {
+ freePage = nextPage;
+ } else {
+ metaFrame.addFreePage(nextPage);
+ }
+ } finally {
+ nextNode.releaseWriteLatch(true);
+ bufferCache.unpin(nextNode);
+ }
+ } else {
+ freePage = metaFrame.getMaxPage();
+ freePage++;
+ metaFrame.setMaxPage(freePage);
+ }
+ }
+ } finally {
+ if (!appendOnly) {
+ metaNode.releaseWriteLatch(true);
+ bufferCache.unpin(metaNode);
+ } else {
+ metaNode.releaseWriteLatch(false);
+ }
+ }
+
+ return freePage;
+ }
+
+ @Override
+ public int getMaxPage(ITreeIndexMetaDataFrame metaFrame) throws HyracksDataException {
+ ICachedPage metaNode;
+ if (!appendOnly || (appendOnly && confiscatedMetaNode == null)) {
+ int mdPage = getFirstMetadataPage();
+ if( mdPage <0 ){
+ return IBufferCache.INVALID_PAGEID;
+ }
+ metaNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, mdPage), false);
+ } else {
+ metaNode = confiscatedMetaNode;
+ }
+ metaNode.acquireReadLatch();
+ int maxPage = -1;
+ try {
+ metaFrame.setPage(metaNode);
+ maxPage = metaFrame.getMaxPage();
+ } finally {
+ metaNode.releaseReadLatch();
+ if (!appendOnly || (appendOnly && confiscatedMetaNode == null)) {
+ bufferCache.unpin(metaNode);
+ }
+ }
+ return maxPage;
+ }
+
+ @Override
+ public void setFilterPageId(int filterPageId) throws HyracksDataException {
+ ICachedPage metaNode;
+ if (!appendOnly) {
+ int mdPage = getFirstMetadataPage();
+ if( mdPage <0 ){
+ return;
+ }
+ metaNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, mdPage), false);
+ } else {
+ metaNode = confiscatedMetaNode;
+ }
+ ITreeIndexMetaDataFrame metaFrame = metaDataFrameFactory.createFrame();
+ metaNode.acquireWriteLatch();
+ try {
+ metaFrame.setPage(metaNode);
+ metaFrame.setLSMComponentFilterPageId(filterPageId);
+ } finally {
+ if (!appendOnly) {
+ metaNode.releaseWriteLatch(true);
+ bufferCache.unpin(metaNode);
+ } else {
+ metaNode.releaseWriteLatch(false);
+ }
+ }
+ }
+
+ @Override
+ public int getFilterPageId() throws HyracksDataException {
+ ICachedPage metaNode;
+ int filterPageId = NO_FILTER_IN_PLACE;
+ if (!appendOnly || (appendOnly && confiscatedMetaNode == null)) {
+ metaNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, getFirstMetadataPage()), false);
+ } else {
+ metaNode = confiscatedMetaNode;
+ }
+ ITreeIndexMetaDataFrame metaFrame = metaDataFrameFactory.createFrame();
+ metaNode.acquireReadLatch();
+ try {
+ metaFrame.setPage(metaNode);
+ filterPageId = metaFrame.getLSMComponentFilterPageId();
+ if(appendOnly && filterPageId == -1){
+ //hint to filter manager that we are in append-only mode
+ filterPageId = NO_FILTER_APPEND_ONLY;
+ }
+ } finally {
+ metaNode.releaseReadLatch();
+ if (!appendOnly || (appendOnly && confiscatedMetaNode == null)) {
+ bufferCache.unpin(metaNode);
+ }
+ }
+ return filterPageId;
+ }
+
+ @Override
+ public void init(ITreeIndexMetaDataFrame metaFrame, int currentMaxPage) throws HyracksDataException {
+ // initialize meta data page
+ int metaPage = getFirstMetadataPage();
+ if(metaPage == IBufferCache.INVALID_PAGEID){
+ throw new HyracksDataException("No valid metadata found in this file.");
+ }
+ ICachedPage metaNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, getFirstMetadataPage()), true);
+
+ metaNode.acquireWriteLatch();
+ try {
+ metaFrame.setPage(metaNode);
+ metaFrame.initBuffer(META_PAGE_LEVEL_INDICATOR);
+ metaFrame.setMaxPage(currentMaxPage);
+ } finally {
+ metaNode.releaseWriteLatch(true);
+ bufferCache.unpin(metaNode);
+ }
+ }
+
+ @Override
+ public void init(ITreeIndexMetaDataFrame metaFrame) throws HyracksDataException {
+ if (confiscatedMetaNode != null) { // don't init twice
+ return;
+ }
+ ICachedPage metaNode = bufferCache.confiscatePage(BufferCache.INVALID_DPID);
+ try {
+ metaFrame.setPage(metaNode);
+ metaFrame.initBuffer(META_PAGE_LEVEL_INDICATOR);
+ metaFrame.setMaxPage(0);
+ } finally {
+ confiscatedMetaNode = metaNode;
+ appendOnly = true;
+ }
+ }
+
+ @Override
+ public ITreeIndexMetaDataFrameFactory getMetaDataFrameFactory() {
+ return metaDataFrameFactory;
+ }
+
+ @Override
+ public byte getFreePageLevelIndicator() {
+ return FREE_PAGE_LEVEL_INDICATOR;
+ }
+
+ @Override
+ public byte getMetaPageLevelIndicator() {
+ return META_PAGE_LEVEL_INDICATOR;
+ }
+
+ @Override
+ public boolean isFreePage(ITreeIndexMetaDataFrame metaFrame) {
+ return metaFrame.getLevel() == FREE_PAGE_LEVEL_INDICATOR;
+ }
+
+ @Override
+ public boolean isMetaPage(ITreeIndexMetaDataFrame metaFrame) {
+ return (metaFrame.getLevel() == META_PAGE_LEVEL_INDICATOR);
+ }
+
+ @Override
+ public void open(int fileId) {
+ this.fileId = fileId;
+ }
+
+ @Override
+ public void close() throws HyracksDataException {
+ if (appendOnly && fileId >= 0 && confiscatedMetaNode != null) {
+ IFIFOPageQueue queue = bufferCache.createFIFOQueue();
+ writeFilterPage(queue);
+ ITreeIndexMetaDataFrame metaFrame = metaDataFrameFactory.createFrame();
+ metaFrame.setPage(confiscatedMetaNode);
+ metaFrame.setValid(true);
+ int finalMetaPage = getMaxPage(metaFrame) + 1;
+ bufferCache.setPageDiskId(confiscatedMetaNode, BufferedFileHandle.getDiskPageId(fileId,finalMetaPage));
+ queue.put(confiscatedMetaNode);
+ bufferCache.finishQueue();
+ confiscatedMetaNode = null;
+ }
+ }
+
+ private void writeFilterPage(IFIFOPageQueue queue) throws HyracksDataException {
+ if(filterPage != null) {
+ ITreeIndexMetaDataFrame metaFrame = metaDataFrameFactory.createFrame();
+ metaFrame.setPage(confiscatedMetaNode);
+ int finalFilterPage = getFreePage(metaFrame);
+ bufferCache.setPageDiskId(filterPage, BufferedFileHandle.getDiskPageId(fileId, finalFilterPage));
+ queue.put(filterPage);
+ }
+ }
+
+ /**
+ * For storage on append-only media (such as HDFS), the meta data page has to be written last.
+ * However, some implementations still write the meta data to the front. To deal with this as well
+ * as to provide downward compatibility, this method tries to find the meta data page first in the
+ * last and then in the first page of the file.
+ *
+ * @return The Id of the page holding the meta data
+ * @throws HyracksDataException
+ */
+ @Override
+ public int getFirstMetadataPage() throws HyracksDataException {
+ if (headPage != IBufferCache.INVALID_PAGEID)
+ return headPage;
+
+ ITreeIndexMetaDataFrame metaFrame = metaDataFrameFactory.createFrame();
+
+ int pages = bufferCache.getNumPagesOfFile(fileId);
+ //if there are no pages in the file yet, we're just initializing
+ if(pages == 0){
+ return 0;
+ }
+ //look at the front (modify in-place index)
+ int page = 0;
+ ICachedPage metaNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, page), false);
+ try {
+ metaNode.acquireReadLatch();
+ metaFrame.setPage(metaNode);
+
+ if (isMetaPage(metaFrame)) {
+ headPage = page;
+ return headPage;
+ }
+ } finally {
+ metaNode.releaseReadLatch();
+ bufferCache.unpin(metaNode);
+ }
+ //otherwise, look at the back. (append-only index)
+ page = pages-1 >0 ? pages -1 : 0;
+ metaNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, page), false);
+ try {
+ metaNode.acquireReadLatch();
+ metaFrame.setPage(metaNode);
+
+ if (isMetaPage(metaFrame)) {
+ headPage = page;
+ return headPage;
+ }
+ } finally {
+ metaNode.releaseReadLatch();
+ bufferCache.unpin(metaNode);
+ }
+ //if we find nothing, this isn't a tree (or isn't one yet).
+ if(pages>0){
+ return IBufferCache.INVALID_PAGEID;
+ }
+ else{
+ return 0;
+ }
+ }
+
+ @Override
+ public long getLSN() throws HyracksDataException {
+ ICachedPage metaNode;
+ if (!appendOnly || (appendOnly && confiscatedMetaNode == null)) {
+ metaNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, getFirstMetadataPage()), false);
+ } else {
+ metaNode = confiscatedMetaNode;
+ }
+ ITreeIndexMetaDataFrame metaFrame = metaDataFrameFactory.createFrame();
+ metaNode.acquireReadLatch();
+ try {
+ metaFrame.setPage(metaNode);
+ return metaFrame.getLSN();
+ } finally {
+ metaNode.releaseReadLatch();
+ if (!appendOnly || (appendOnly && confiscatedMetaNode == null)) {
+ bufferCache.unpin(metaNode);
+ }
+ }
+ }
+
+ @Override
+ public void setLSN(long lsn) throws HyracksDataException {
+ ICachedPage metaNode;
+ if (!appendOnly) {
+ metaNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, getFirstMetadataPage()), false);
+ } else {
+ metaNode = confiscatedMetaNode;
+ }
+ ITreeIndexMetaDataFrame metaFrame = metaDataFrameFactory.createFrame();
+ metaNode.acquireWriteLatch();
+ try {
+ metaFrame.setPage(metaNode);
+ metaFrame.setLSN(lsn);
+ } finally {
+ if (!appendOnly) {
+ metaNode.releaseWriteLatch(true);
+ bufferCache.unpin(metaNode);
+ } else {
+ metaNode.releaseWriteLatch(false);
+ }
+ }
+ }
+
+ @Override
+ public void setFilterPage(ICachedPage filterPage) {
+ this.filterPage = filterPage;
+ }
+
+ @Override
+ public ICachedPage getFilterPage() {
+ return this.filterPage;
+ }
+
+ @Override
+ public boolean appendOnlyMode() {
+ return appendOnly;
+ }
+}
+
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/impls/AbstractTreeIndex.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/impls/AbstractTreeIndex.java
index e7b535f..015404d 100644
--- a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/impls/AbstractTreeIndex.java
+++ b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/impls/AbstractTreeIndex.java
@@ -25,29 +25,22 @@
import org.apache.hyracks.api.exceptions.HyracksDataException;
import org.apache.hyracks.api.io.FileReference;
import org.apache.hyracks.dataflow.common.data.accessors.ITupleReference;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
-import org.apache.hyracks.storage.am.common.api.IIndexBulkLoader;
-import org.apache.hyracks.storage.am.common.api.ITreeIndex;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexAccessor;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexFrame;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrame;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexTupleWriter;
-import org.apache.hyracks.storage.am.common.api.IndexException;
-import org.apache.hyracks.storage.am.common.api.TreeIndexException;
+import org.apache.hyracks.storage.am.common.api.*;
import org.apache.hyracks.storage.am.common.ophelpers.MultiComparator;
import org.apache.hyracks.storage.common.buffercache.IBufferCache;
import org.apache.hyracks.storage.common.buffercache.ICachedPage;
+import org.apache.hyracks.storage.common.buffercache.IFIFOPageQueue;
import org.apache.hyracks.storage.common.file.BufferedFileHandle;
import org.apache.hyracks.storage.common.file.IFileMapProvider;
public abstract class AbstractTreeIndex implements ITreeIndex {
- protected final static int rootPage = 1;
+ public static final int MINIMAL_TREE_PAGE_COUNT = 2;
+ protected int rootPage = 1;
protected final IBufferCache bufferCache;
protected final IFileMapProvider fileMapProvider;
- protected final IFreePageManager freePageManager;
+ protected final IMetaDataPageManager freePageManager;
protected final ITreeIndexFrameFactory interiorFrameFactory;
protected final ITreeIndexFrameFactory leafFrameFactory;
@@ -58,10 +51,17 @@
protected FileReference file;
protected int fileId = -1;
- private boolean isActivated = false;
+ protected boolean isActive = false;
+ //hasEverBeenActivated is to stop the throwing of an exception of deactivating an index that
+ //was never activated or failed to activate in try/finally blocks, as there's no way to know if
+ //an index is activated or not from the outside.
+ protected boolean hasEverBeenActivated = false;
+ protected boolean appendOnly = false;
+
+ protected int bulkloadLeafStart = 0;
public AbstractTreeIndex(IBufferCache bufferCache, IFileMapProvider fileMapProvider,
- IFreePageManager freePageManager, ITreeIndexFrameFactory interiorFrameFactory,
+ IMetaDataPageManager freePageManager, ITreeIndexFrameFactory interiorFrameFactory,
ITreeIndexFrameFactory leafFrameFactory, IBinaryComparatorFactory[] cmpFactories, int fieldCount,
FileReference file) {
this.bufferCache = bufferCache;
@@ -75,7 +75,11 @@
}
public synchronized void create() throws HyracksDataException {
- if (isActivated) {
+ create(false);
+ }
+
+ private synchronized void create(boolean appendOnly) throws HyracksDataException {
+ if (isActive) {
throw new HyracksDataException("Failed to create the index since it is activated.");
}
@@ -99,8 +103,14 @@
}
freePageManager.open(fileId);
- initEmptyTree();
- freePageManager.close();
+ setRootAndMetadataPages(appendOnly);
+ if (!appendOnly) {
+ initEmptyTree();
+ freePageManager.close();
+ } else {
+ this.appendOnly = true;
+ initCachedMetadataPage();
+ }
bufferCache.closeFile(fileId);
}
@@ -108,7 +118,6 @@
ITreeIndexFrame frame = leafFrameFactory.createFrame();
ITreeIndexMetaDataFrame metaFrame = freePageManager.getMetaDataFrameFactory().createFrame();
freePageManager.init(metaFrame, rootPage);
-
ICachedPage rootNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, rootPage), true);
rootNode.acquireWriteLatch();
try {
@@ -120,8 +129,28 @@
}
}
+ private void setRootAndMetadataPages(boolean appendOnly) throws HyracksDataException{
+ if (!appendOnly) {
+ // regular or empty tree
+ rootPage = 1;
+ bulkloadLeafStart = 2;
+ } else {
+ // bulkload-only tree (used e.g. for HDFS). -1 is meta page, -2 is root page
+ int numPages = bufferCache.getNumPagesOfFile(fileId);
+ //the root page is the last page before the metadata page
+ rootPage = numPages > MINIMAL_TREE_PAGE_COUNT ? numPages - MINIMAL_TREE_PAGE_COUNT : 0;
+ //leaves start from the very beginning of the file.
+ bulkloadLeafStart = 0;
+ }
+ }
+
+ private void initCachedMetadataPage() throws HyracksDataException {
+ ITreeIndexMetaDataFrame metaFrame = freePageManager.getMetaDataFrameFactory().createFrame();
+ freePageManager.init(metaFrame);
+ }
+
public synchronized void activate() throws HyracksDataException {
- if (isActivated) {
+ if (isActive) {
throw new HyracksDataException("Failed to activate the index since it is already activated.");
}
@@ -144,26 +173,44 @@
}
}
freePageManager.open(fileId);
+ int mdPageLoc = freePageManager.getFirstMetadataPage();
+ ITreeIndexMetaDataFrame metaFrame = freePageManager.getMetaDataFrameFactory().createFrame();
+ int numPages = freePageManager.getMaxPage(metaFrame);
+ if(mdPageLoc > 1 || (mdPageLoc == 1 && numPages <= MINIMAL_TREE_PAGE_COUNT -1 )){ //md page doesn't count itself
+ appendOnly = true;
+ }
+ else{
+ appendOnly = false;
+ }
+ setRootAndMetadataPages(appendOnly);
// TODO: Should probably have some way to check that the tree is physically consistent
// or that the file we just opened actually is a tree
- isActivated = true;
+ isActive = true;
+ hasEverBeenActivated = true;
}
public synchronized void deactivate() throws HyracksDataException {
- if (!isActivated) {
+ if (!isActive && hasEverBeenActivated) {
throw new HyracksDataException("Failed to deactivate the index since it is already deactivated.");
}
+ if (isActive) {
+ freePageManager.close();
+ bufferCache.closeFile(fileId);
+ }
- bufferCache.closeFile(fileId);
- freePageManager.close();
+ isActive = false;
+ }
- isActivated = false;
+ public synchronized void deactivateCloseHandle() throws HyracksDataException {
+ deactivate();
+ bufferCache.purgeHandle(fileId);
+
}
public synchronized void destroy() throws HyracksDataException {
- if (isActivated) {
+ if (isActive) {
throw new HyracksDataException("Failed to destroy the index since it is activated.");
}
@@ -176,13 +223,19 @@
}
public synchronized void clear() throws HyracksDataException {
- if (!isActivated) {
+ if (!isActive) {
throw new HyracksDataException("Failed to clear the index since it is not activated.");
}
initEmptyTree();
}
public boolean isEmptyTree(ITreeIndexFrame frame) throws HyracksDataException {
+ if (rootPage == -1) {
+ return true;
+ }
+ if(freePageManager.appendOnlyMode() && bufferCache.getNumPagesOfFile(fileId) <= MINIMAL_TREE_PAGE_COUNT){
+ return true;
+ }
ICachedPage rootNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, rootPage), false);
rootNode.acquireReadLatch();
try {
@@ -198,6 +251,8 @@
}
}
+
+
public byte getTreeHeight(ITreeIndexFrame frame) throws HyracksDataException {
ICachedPage rootNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, rootPage), false);
rootNode.acquireReadLatch();
@@ -234,7 +289,7 @@
return cmpFactories;
}
- public IFreePageManager getFreePageManager() {
+ public IMetaDataPageManager getMetaManager() {
return freePageManager;
}
@@ -256,13 +311,28 @@
protected final ITreeIndexTupleWriter tupleWriter;
protected ITreeIndexFrame leafFrame;
protected ITreeIndexFrame interiorFrame;
- private boolean releasedLatches;
+ // Immutable bulk loaders write their root page at page -2, as needed e.g. by append-only file systems such as HDFS.
+ // Since loading this tree relies on the root page actually being at that point, no further inserts into that tree are allowed.
+ // Currently, this is not enforced.
+ protected boolean releasedLatches;
+ public boolean appendOnly = false;
+ protected final IFIFOPageQueue queue;
- public AbstractTreeIndexBulkLoader(float fillFactor) throws TreeIndexException, HyracksDataException {
+ public AbstractTreeIndexBulkLoader(float fillFactor, boolean appendOnly) throws TreeIndexException,
+ HyracksDataException {
+ //Initialize the tree
+ if (appendOnly) {
+ create(appendOnly);
+ this.appendOnly = appendOnly;
+ activate();
+ }
+
leafFrame = leafFrameFactory.createFrame();
interiorFrame = interiorFrameFactory.createFrame();
metaFrame = freePageManager.getMetaDataFrameFactory().createFrame();
+ queue = bufferCache.createFIFOQueue();
+
if (!isEmptyTree(leafFrame)) {
throw new TreeIndexException("Cannot bulk-load a non-empty tree.");
}
@@ -276,8 +346,8 @@
NodeFrontier leafFrontier = new NodeFrontier(leafFrame.createTupleReference());
leafFrontier.pageId = freePageManager.getFreePage(metaFrame);
- leafFrontier.page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, leafFrontier.pageId), true);
- leafFrontier.page.acquireWriteLatch();
+ leafFrontier.page = bufferCache.confiscatePage(BufferedFileHandle
+ .getDiskPageId(fileId, leafFrontier.pageId));
interiorFrame.setPage(leafFrontier.page);
interiorFrame.initBuffer((byte) 0);
@@ -294,48 +364,50 @@
public abstract void add(ITupleReference tuple) throws IndexException, HyracksDataException;
protected void handleException() throws HyracksDataException {
- // Unlatch and unpin pages.
+ // Unlatch and unpin pages that weren't in the queue to avoid leaking memory.
for (NodeFrontier nodeFrontier : nodeFrontiers) {
- nodeFrontier.page.releaseWriteLatch(true);
- bufferCache.unpin(nodeFrontier.page);
+ ICachedPage frontierPage = nodeFrontier.page;
+ if (frontierPage.confiscated()) {
+ bufferCache.returnPage(frontierPage,false);
+ }
}
releasedLatches = true;
}
@Override
public void end() throws HyracksDataException {
- // copy the root generated from the bulk-load to *the* root page location
- ICachedPage newRoot = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, rootPage), true);
- newRoot.acquireWriteLatch();
- NodeFrontier lastNodeFrontier = nodeFrontiers.get(nodeFrontiers.size() - 1);
- try {
- System.arraycopy(lastNodeFrontier.page.getBuffer().array(), 0, newRoot.getBuffer().array(), 0,
- lastNodeFrontier.page.getBuffer().capacity());
- } finally {
- newRoot.releaseWriteLatch(true);
- bufferCache.unpin(newRoot);
+ //move the root page to the first data page if necessary
+ bufferCache.finishQueue();
+ if (!appendOnly) {
+ ICachedPage newRoot = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, rootPage), true);
+ newRoot.acquireWriteLatch();
+ //root will be the highest frontier
+ NodeFrontier lastNodeFrontier = nodeFrontiers.get(nodeFrontiers.size() - 1);
+ ICachedPage oldRoot = bufferCache.pin(
+ BufferedFileHandle.getDiskPageId(fileId, lastNodeFrontier.pageId), false);
+ oldRoot.acquireReadLatch();
+ lastNodeFrontier.page = oldRoot;
+ try {
+ System.arraycopy(lastNodeFrontier.page.getBuffer().array(), 0, newRoot.getBuffer().array(), 0,
+ lastNodeFrontier.page.getBuffer().capacity());
+ } finally {
+ newRoot.releaseWriteLatch(true);
+ bufferCache.flushDirtyPage(newRoot);
+ bufferCache.unpin(newRoot);
+ oldRoot.releaseReadLatch();
+ bufferCache.unpin(oldRoot);
- // register old root as a free page
- freePageManager.addFreePage(metaFrame, lastNodeFrontier.pageId);
+ // register old root as a free page
+ freePageManager.addFreePage(metaFrame, lastNodeFrontier.pageId);
- if (!releasedLatches) {
- for (int i = 0; i < nodeFrontiers.size(); i++) {
- try {
- nodeFrontiers.get(i).page.releaseWriteLatch(true);
- } catch (Exception e) {
- //ignore illegal monitor state exception
- }
- bufferCache.unpin(nodeFrontiers.get(i).page);
- }
}
}
}
protected void addLevel() throws HyracksDataException {
NodeFrontier frontier = new NodeFrontier(tupleWriter.createTupleReference());
- frontier.pageId = freePageManager.getFreePage(metaFrame);
- frontier.page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, frontier.pageId), true);
- frontier.page.acquireWriteLatch();
+ frontier.page = bufferCache.confiscatePage(IBufferCache.INVALID_DPID);
+ frontier.pageId = -1;
frontier.lastTuple.setFieldCount(cmp.getKeyFieldCount());
interiorFrame.setPage(frontier.page);
interiorFrame.initBuffer((byte) nodeFrontiers.size());
@@ -349,6 +421,7 @@
public void setLeafFrame(ITreeIndexFrame leafFrame) {
this.leafFrame = leafFrame;
}
+
}
public class TreeIndexInsertBulkLoader implements IIndexBulkLoader {
@@ -373,6 +446,11 @@
// do nothing
}
+ @Override
+ public void abort() {
+
+ }
+
}
@Override
@@ -383,7 +461,7 @@
public IBinaryComparatorFactory[] getCmpFactories() {
return cmpFactories;
}
-
+
@Override
public boolean hasMemoryComponents() {
return true;
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/util/TreeIndexBufferCacheWarmup.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/util/TreeIndexBufferCacheWarmup.java
index 14c1ad9..398a3f3 100644
--- a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/util/TreeIndexBufferCacheWarmup.java
+++ b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/util/TreeIndexBufferCacheWarmup.java
@@ -22,7 +22,7 @@
import java.util.Random;
import org.apache.hyracks.api.exceptions.HyracksDataException;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
import org.apache.hyracks.storage.am.common.api.ITreeIndexFrame;
import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrame;
import org.apache.hyracks.storage.am.common.ophelpers.IntArrayList;
@@ -32,13 +32,13 @@
public class TreeIndexBufferCacheWarmup {
private final IBufferCache bufferCache;
- private final IFreePageManager freePageManager;
+ private final IMetaDataPageManager freePageManager;
private final int fileId;
private final ArrayList<IntArrayList> pagesByLevel = new ArrayList<IntArrayList>();
private final Random rnd = new Random();
public TreeIndexBufferCacheWarmup(IBufferCache bufferCache,
- IFreePageManager freePageManager, int fileId) {
+ IMetaDataPageManager freePageManager, int fileId) {
this.bufferCache = bufferCache;
this.freePageManager = freePageManager;
this.fileId = fileId;
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/util/TreeIndexStats.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/util/TreeIndexStats.java
index ad1d888..8251c6f 100644
--- a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/util/TreeIndexStats.java
+++ b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/util/TreeIndexStats.java
@@ -20,7 +20,7 @@
import java.text.DecimalFormat;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
import org.apache.hyracks.storage.am.common.api.ITreeIndexFrame;
import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrame;
@@ -57,7 +57,7 @@
}
public void add(ITreeIndexMetaDataFrame metaFrame,
- IFreePageManager freePageManager) {
+ IMetaDataPageManager freePageManager) {
if (freePageManager.isFreePage(metaFrame)) {
freePages++;
} else if (freePageManager.isMetaPage(metaFrame)) {
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/util/TreeIndexStatsGatherer.java b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/util/TreeIndexStatsGatherer.java
index a12df46..3c15b23 100644
--- a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/util/TreeIndexStatsGatherer.java
+++ b/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/util/TreeIndexStatsGatherer.java
@@ -19,7 +19,7 @@
package org.apache.hyracks.storage.am.common.util;
import org.apache.hyracks.api.exceptions.HyracksDataException;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
import org.apache.hyracks.storage.am.common.api.ITreeIndexFrame;
import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrame;
import org.apache.hyracks.storage.common.buffercache.IBufferCache;
@@ -30,12 +30,12 @@
private final TreeIndexStats treeIndexStats = new TreeIndexStats();
private final IBufferCache bufferCache;
- private final IFreePageManager freePageManager;
+ private final IMetaDataPageManager freePageManager;
private final int fileId;
private final int rootPage;
public TreeIndexStatsGatherer(IBufferCache bufferCache,
- IFreePageManager freePageManager, int fileId, int rootPage) {
+ IMetaDataPageManager freePageManager, int fileId, int rootPage) {
this.bufferCache = bufferCache;
this.freePageManager = freePageManager;
this.fileId = fileId;
diff --git a/hyracks/hyracks-storage-am-lsm-btree/src/main/java/org/apache/hyracks/storage/am/lsm/btree/impls/ExternalBTree.java b/hyracks/hyracks-storage-am-lsm-btree/src/main/java/org/apache/hyracks/storage/am/lsm/btree/impls/ExternalBTree.java
index 3ade4fa..7861730 100644
--- a/hyracks/hyracks-storage-am-lsm-btree/src/main/java/org/apache/hyracks/storage/am/lsm/btree/impls/ExternalBTree.java
+++ b/hyracks/hyracks-storage-am-lsm-btree/src/main/java/org/apache/hyracks/storage/am/lsm/btree/impls/ExternalBTree.java
@@ -32,19 +32,8 @@
import org.apache.hyracks.storage.am.bloomfilter.impls.BloomFilterSpecification;
import org.apache.hyracks.storage.am.btree.impls.BTree;
import org.apache.hyracks.storage.am.btree.impls.BTree.BTreeBulkLoader;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
-import org.apache.hyracks.storage.am.common.api.IIndexBulkLoader;
-import org.apache.hyracks.storage.am.common.api.IIndexCursor;
-import org.apache.hyracks.storage.am.common.api.IIndexOperationContext;
-import org.apache.hyracks.storage.am.common.api.IModificationOperationCallback;
-import org.apache.hyracks.storage.am.common.api.ISearchOperationCallback;
-import org.apache.hyracks.storage.am.common.api.ISearchPredicate;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexCursor;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexTupleWriterFactory;
-import org.apache.hyracks.storage.am.common.api.ITwoPCIndexBulkLoader;
-import org.apache.hyracks.storage.am.common.api.IndexException;
-import org.apache.hyracks.storage.am.common.api.TreeIndexException;
+import org.apache.hyracks.storage.am.common.api.*;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
import org.apache.hyracks.storage.am.common.impls.NoOpOperationCallback;
import org.apache.hyracks.storage.am.common.ophelpers.IndexOperation;
import org.apache.hyracks.storage.am.lsm.btree.tuples.LSMBTreeRefrencingTupleWriterFactory;
@@ -510,7 +499,7 @@
frameTupleWriterFactory = ((LSMBTreeDiskComponent) component).getBTree().getLeafFrameFactory()
.getTupleWriterFactory();
bulkLoader = (BTreeBulkLoader) ((LSMBTreeDiskComponent) component).getBTree().createBulkLoader(fillFactor,
- verifyInput, numElementsHint, false);
+ verifyInput, numElementsHint, false, true);
int maxBucketsPerElement = BloomCalculations.maxBucketsPerElement(numElementsHint);
BloomFilterSpecification bloomFilterSpec = BloomCalculations.computeBloomSpec(maxBucketsPerElement,
@@ -652,7 +641,7 @@
}
@Override
- public IFreePageManager getFreePageManager() {
+ public IMetaDataPageManager getMetaManager() {
return null;
}
diff --git a/hyracks/hyracks-storage-am-lsm-btree/src/main/java/org/apache/hyracks/storage/am/lsm/btree/impls/ExternalBTreeWithBuddy.java b/hyracks/hyracks-storage-am-lsm-btree/src/main/java/org/apache/hyracks/storage/am/lsm/btree/impls/ExternalBTreeWithBuddy.java
index bf0040a..4b5e5c8 100644
--- a/hyracks/hyracks-storage-am-lsm-btree/src/main/java/org/apache/hyracks/storage/am/lsm/btree/impls/ExternalBTreeWithBuddy.java
+++ b/hyracks/hyracks-storage-am-lsm-btree/src/main/java/org/apache/hyracks/storage/am/lsm/btree/impls/ExternalBTreeWithBuddy.java
@@ -35,19 +35,7 @@
import org.apache.hyracks.storage.am.btree.impls.BTree;
import org.apache.hyracks.storage.am.btree.impls.BTree.BTreeBulkLoader;
import org.apache.hyracks.storage.am.btree.impls.RangePredicate;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
-import org.apache.hyracks.storage.am.common.api.IIndexBulkLoader;
-import org.apache.hyracks.storage.am.common.api.IIndexCursor;
-import org.apache.hyracks.storage.am.common.api.IIndexOperationContext;
-import org.apache.hyracks.storage.am.common.api.IModificationOperationCallback;
-import org.apache.hyracks.storage.am.common.api.ISearchOperationCallback;
-import org.apache.hyracks.storage.am.common.api.ISearchPredicate;
-import org.apache.hyracks.storage.am.common.api.ITreeIndex;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexCursor;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
-import org.apache.hyracks.storage.am.common.api.ITwoPCIndexBulkLoader;
-import org.apache.hyracks.storage.am.common.api.IndexException;
-import org.apache.hyracks.storage.am.common.api.TreeIndexException;
+import org.apache.hyracks.storage.am.common.api.*;
import org.apache.hyracks.storage.am.common.impls.NoOpOperationCallback;
import org.apache.hyracks.storage.am.common.ophelpers.IndexOperation;
import org.apache.hyracks.storage.am.common.ophelpers.MultiComparator;
@@ -299,7 +287,8 @@
public IIndexBulkLoader createBulkLoader(float fillLevel, boolean verifyInput, long numElementsHint,
boolean checkIfEmptyIndex) throws TreeIndexException {
try {
- return new LSMTwoPCBTreeWithBuddyBulkLoader(fillLevel, verifyInput, 0, checkIfEmptyIndex, false);
+ return new LSMTwoPCBTreeWithBuddyBulkLoader(fillLevel, verifyInput, 0, checkIfEmptyIndex,
+ false);
} catch (HyracksDataException e) {
throw new TreeIndexException(e);
}
@@ -503,14 +492,8 @@
public void markAsValid(ILSMComponent lsmComponent) throws HyracksDataException {
LSMBTreeWithBuddyDiskComponent component = (LSMBTreeWithBuddyDiskComponent) lsmComponent;
// Flush the bloom filter first.
- int fileId = component.getBloomFilter().getFileId();
- IBufferCache bufferCache = component.getBTree().getBufferCache();
- int startPage = 0;
- int maxPage = component.getBloomFilter().getNumPages();
- forceFlushDirtyPages(bufferCache, fileId, startPage, maxPage);
- forceFlushDirtyPages(component.getBTree());
+ markAsValidInternal(component.getBTree().getBufferCache(),component.getBloomFilter());
markAsValidInternal(component.getBTree());
- forceFlushDirtyPages(component.getBuddyBTree());
markAsValidInternal(component.getBuddyBTree());
}
@@ -590,7 +573,7 @@
}
@Override
- public IFreePageManager getFreePageManager() {
+ public IMetaDataPageManager getMetaManager() {
// This method should never be called for disk only indexes
return null;
}
@@ -625,13 +608,9 @@
.createLSMComponentInstance(new LSMComponentFileReferences(insertFileRef, deleteFileRef,
bloomFilterFileRef));
if (createComponent) {
- component.getBTree().create();
- component.getBuddyBTree().create();
component.getBloomFilter().create();
}
- component.getBTree().activate();
- component.getBuddyBTree().activate();
component.getBloomFilter().activate();
return component;
}
@@ -696,9 +675,9 @@
// Create the three loaders
btreeBulkLoader = (BTreeBulkLoader) ((LSMBTreeWithBuddyDiskComponent) component).getBTree()
- .createBulkLoader(fillFactor, verifyInput, numElementsHint, false);
+ .createBulkLoader(fillFactor, verifyInput, numElementsHint, false,true);
buddyBtreeBulkLoader = (BTreeBulkLoader) ((LSMBTreeWithBuddyDiskComponent) component).getBuddyBTree()
- .createBulkLoader(fillFactor, verifyInput, numElementsHint, false);
+ .createBulkLoader(fillFactor, verifyInput, numElementsHint, false, true);
int maxBucketsPerElement = BloomCalculations.maxBucketsPerElement(numElementsHint);
BloomFilterSpecification bloomFilterSpec = BloomCalculations.computeBloomSpec(maxBucketsPerElement,
bloomFilterFalsePositiveRate);
@@ -886,7 +865,7 @@
public boolean isPrimaryIndex() {
return false;
}
-
+
@Override
public Set<String> getLSMComponentPhysicalFiles(ILSMComponent lsmComponent) {
Set<String> files = new HashSet<String>();
@@ -900,7 +879,18 @@
}
@Override
+ public IIndexBulkLoader createBulkLoader(float fillFactor, boolean verifyInput, long numElementsHint,
+ boolean checkIfEmptyIndex, boolean appendOnly) throws IndexException {
+ if(!appendOnly){
+ throw new IndexException("LSM Indices do not support in-place inserts");
+ }
+ else{
+ return createBulkLoader(fillFactor, verifyInput,numElementsHint,checkIfEmptyIndex);
+ }
+ }
+
+ @Override
public void allocateMemoryComponents() throws HyracksDataException {
//do nothing since external index never use memory components
}
-}
\ No newline at end of file
+}
diff --git a/hyracks/hyracks-storage-am-lsm-btree/src/main/java/org/apache/hyracks/storage/am/lsm/btree/impls/LSMBTree.java b/hyracks/hyracks-storage-am-lsm-btree/src/main/java/org/apache/hyracks/storage/am/lsm/btree/impls/LSMBTree.java
index 822b320..04f76d5 100644
--- a/hyracks/hyracks-storage-am-lsm-btree/src/main/java/org/apache/hyracks/storage/am/lsm/btree/impls/LSMBTree.java
+++ b/hyracks/hyracks-storage-am-lsm-btree/src/main/java/org/apache/hyracks/storage/am/lsm/btree/impls/LSMBTree.java
@@ -39,19 +39,8 @@
import org.apache.hyracks.storage.am.btree.impls.BTree.BTreeBulkLoader;
import org.apache.hyracks.storage.am.btree.impls.BTreeRangeSearchCursor;
import org.apache.hyracks.storage.am.btree.impls.RangePredicate;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
-import org.apache.hyracks.storage.am.common.api.IIndexAccessor;
-import org.apache.hyracks.storage.am.common.api.IIndexBulkLoader;
-import org.apache.hyracks.storage.am.common.api.IIndexCursor;
-import org.apache.hyracks.storage.am.common.api.IIndexOperationContext;
-import org.apache.hyracks.storage.am.common.api.IModificationOperationCallback;
-import org.apache.hyracks.storage.am.common.api.ISearchOperationCallback;
-import org.apache.hyracks.storage.am.common.api.ISearchPredicate;
-import org.apache.hyracks.storage.am.common.api.ITreeIndex;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexCursor;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
-import org.apache.hyracks.storage.am.common.api.IndexException;
-import org.apache.hyracks.storage.am.common.api.TreeIndexException;
+import org.apache.hyracks.storage.am.common.api.*;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
import org.apache.hyracks.storage.am.common.exceptions.TreeIndexDuplicateKeyException;
import org.apache.hyracks.storage.am.common.impls.AbstractSearchPredicate;
import org.apache.hyracks.storage.am.common.impls.NoOpOperationCallback;
@@ -73,7 +62,7 @@
import org.apache.hyracks.storage.am.lsm.common.api.ILSMMergePolicy;
import org.apache.hyracks.storage.am.lsm.common.api.ILSMOperationTracker;
import org.apache.hyracks.storage.am.lsm.common.api.IVirtualBufferCache;
-import org.apache.hyracks.storage.am.lsm.common.freepage.VirtualFreePageManager;
+import org.apache.hyracks.storage.am.lsm.common.freepage.VirtualMetaDataPageManager;
import org.apache.hyracks.storage.am.lsm.common.impls.AbstractLSMIndex;
import org.apache.hyracks.storage.am.lsm.common.impls.BlockingIOOperationCallbackWrapper;
import org.apache.hyracks.storage.am.lsm.common.impls.LSMComponentFileReferences;
@@ -119,7 +108,7 @@
int i = 0;
for (IVirtualBufferCache virtualBufferCache : virtualBufferCaches) {
LSMBTreeMemoryComponent mutableComponent = new LSMBTreeMemoryComponent(new BTree(virtualBufferCache,
- virtualBufferCache.getFileMapProvider(), new VirtualFreePageManager(
+ virtualBufferCache.getFileMapProvider(), new VirtualMetaDataPageManager(
virtualBufferCache.getNumPages()), interiorFrameFactory, insertLeafFrameFactory,
cmpFactories, fieldCount, new FileReference(new File(fileManager.getBaseDir() + "_virtual_" + i))),
virtualBufferCache, i == 0 ? true : false, filterFactory == null ? null
@@ -213,7 +202,7 @@
LSMBTreeDiskComponent component = (LSMBTreeDiskComponent) c;
BTree btree = component.getBTree();
BloomFilter bloomFilter = component.getBloomFilter();
- btree.deactivate();
+ btree.deactivateCloseHandle();
bloomFilter.deactivate();
}
deallocateMemoryComponents();
@@ -449,7 +438,7 @@
LSMBTreeDiskComponent component = createDiskComponent(componentFactory, flushOp.getBTreeFlushTarget(),
flushOp.getBloomFilterFlushTarget(), true);
- IIndexBulkLoader bulkLoader = component.getBTree().createBulkLoader(1.0f, false, numElements, false);
+ IIndexBulkLoader bulkLoader = component.getBTree().createBulkLoader(1.0f, false, numElements, false, true);
IIndexBulkLoader builder = component.getBloomFilter().createBuilder(numElements,
bloomFilterSpec.getNumHashes(), bloomFilterSpec.getNumBucketsPerElements());
@@ -465,15 +454,18 @@
scanCursor.close();
builder.end();
}
- bulkLoader.end();
if (component.getLSMComponentFilter() != null) {
List<ITupleReference> filterTuples = new ArrayList<ITupleReference>();
filterTuples.add(flushingComponent.getLSMComponentFilter().getMinTuple());
filterTuples.add(flushingComponent.getLSMComponentFilter().getMaxTuple());
filterManager.updateFilterInfo(component.getLSMComponentFilter(), filterTuples);
- filterManager.writeFilterInfo(component.getLSMComponentFilter(), component.getBTree());
+ filterManager.writeFilterInfo(component.getLSMComponentFilter(), component.getBTree()
+ );
}
+
+ bulkLoader.end();
+
return component;
}
@@ -522,7 +514,8 @@
LSMBTreeDiskComponent mergedComponent = createDiskComponent(componentFactory, mergeOp.getBTreeMergeTarget(),
mergeOp.getBloomFilterMergeTarget(), true);
- IIndexBulkLoader bulkLoader = mergedComponent.getBTree().createBulkLoader(1.0f, false, numElements, false);
+ IIndexBulkLoader bulkLoader = mergedComponent.getBTree()
+ .createBulkLoader(1.0f, false, numElements, false, true);
IIndexBulkLoader builder = mergedComponent.getBloomFilter().createBuilder(numElements,
bloomFilterSpec.getNumHashes(), bloomFilterSpec.getNumBucketsPerElements());
try {
@@ -536,8 +529,6 @@
cursor.close();
builder.end();
}
- bulkLoader.end();
-
if (mergedComponent.getLSMComponentFilter() != null) {
List<ITupleReference> filterTuples = new ArrayList<ITupleReference>();
for (int i = 0; i < mergeOp.getMergingComponents().size(); ++i) {
@@ -545,9 +536,12 @@
filterTuples.add(mergeOp.getMergingComponents().get(i).getLSMComponentFilter().getMaxTuple());
}
filterManager.updateFilterInfo(mergedComponent.getLSMComponentFilter(), filterTuples);
- filterManager.writeFilterInfo(mergedComponent.getLSMComponentFilter(), mergedComponent.getBTree());
+ filterManager.writeFilterInfo(mergedComponent.getLSMComponentFilter(), mergedComponent.getBTree()
+ );
}
+ bulkLoader.end();
+
return mergedComponent;
}
@@ -557,14 +551,12 @@
// Create new BTree instance.
LSMBTreeDiskComponent component = (LSMBTreeDiskComponent) factory
.createLSMComponentInstance(new LSMComponentFileReferences(btreeFileRef, null, bloomFilterFileRef));
- if (createComponent) {
- component.getBTree().create();
- component.getBloomFilter().create();
- }
// BTree will be closed during cleanup of merge().
- component.getBTree().activate();
+ if (!createComponent) {
+ component.getBTree().activate();
+ }
component.getBloomFilter().activate();
- if (component.getLSMComponentFilter() != null) {
+ if (component.getLSMComponentFilter() != null && !createComponent) {
filterManager.readFilterInfo(component.getLSMComponentFilter(), component.getBTree());
}
return component;
@@ -591,13 +583,7 @@
// The order of forcing the dirty page to be flushed is critical. The
// bloom filter must be always done first.
LSMBTreeDiskComponent component = (LSMBTreeDiskComponent) lsmComponent;
- // Flush the bloom filter first.
- int fileId = component.getBloomFilter().getFileId();
- IBufferCache bufferCache = component.getBTree().getBufferCache();
- int startPage = 0;
- int maxPage = component.getBloomFilter().getNumPages();
- forceFlushDirtyPages(bufferCache, fileId, startPage, maxPage);
- forceFlushDirtyPages(component.getBTree());
+ markAsValidInternal(component.getBTree().getBufferCache(),component.getBloomFilter());
markAsValidInternal(component.getBTree());
}
@@ -623,7 +609,7 @@
throw new TreeIndexException(e);
}
bulkLoader = (BTreeBulkLoader) ((LSMBTreeDiskComponent) component).getBTree().createBulkLoader(fillFactor,
- verifyInput, numElementsHint, false);
+ verifyInput, numElementsHint, false, true);
int maxBucketsPerElement = BloomCalculations.maxBucketsPerElement(numElementsHint);
BloomFilterSpecification bloomFilterSpec = BloomCalculations.computeBloomSpec(maxBucketsPerElement,
@@ -672,10 +658,9 @@
protected void cleanupArtifacts() throws HyracksDataException, IndexException {
if (!cleanedUpArtifacts) {
cleanedUpArtifacts = true;
- // We make sure to end the bloom filter load to release latches.
if (!endedBloomFilterLoad) {
- builder.end();
- endedBloomFilterLoad = true;
+ builder.abort();
+ endedBloomFilterLoad = true;
}
((LSMBTreeDiskComponent) component).getBTree().deactivate();
((LSMBTreeDiskComponent) component).getBTree().destroy();
@@ -691,13 +676,14 @@
builder.end();
endedBloomFilterLoad = true;
}
- bulkLoader.end();
if (component.getLSMComponentFilter() != null) {
filterManager.writeFilterInfo(component.getLSMComponentFilter(),
((LSMBTreeDiskComponent) component).getBTree());
}
+ bulkLoader.end();
+
if (isEmptyComponent) {
cleanupArtifacts();
} else {
@@ -705,6 +691,18 @@
}
}
}
+
+ @Override
+ public void abort() throws HyracksDataException {
+ if(bulkLoader != null){
+ bulkLoader.abort();
+ }
+
+ if(builder != null){
+ builder.abort();
+ }
+
+ }
}
public LSMBTreeOpContext createOpContext(IModificationOperationCallback modificationCallback,
@@ -768,10 +766,10 @@
}
@Override
- public IFreePageManager getFreePageManager() {
+ public IMetaDataPageManager getMetaManager() {
LSMBTreeMemoryComponent mutableComponent = (LSMBTreeMemoryComponent) memoryComponents
.get(currentMutableComponentId.get());
- return mutableComponent.getBTree().getFreePageManager();
+ return mutableComponent.getBTree().getMetaManager();
}
@Override
@@ -820,6 +818,16 @@
}
@Override
+ public IIndexBulkLoader createBulkLoader(float fillFactor, boolean verifyInput, long numElementsHint,
+ boolean checkIfEmptyIndex, boolean appendOnly) throws IndexException {
+ try {
+ return new LSMBTreeBulkLoader(fillFactor, verifyInput, numElementsHint, checkIfEmptyIndex);
+ } catch (HyracksDataException e) {
+ throw new TreeIndexException(e);
+ }
+ }
+
+ @Override
public Set<String> getLSMComponentPhysicalFiles(ILSMComponent lsmComponent) {
Set<String> files = new HashSet<String>();
LSMBTreeDiskComponent component = (LSMBTreeDiskComponent) lsmComponent;
diff --git a/hyracks/hyracks-storage-am-lsm-btree/src/main/java/org/apache/hyracks/storage/am/lsm/btree/util/LSMBTreeUtils.java b/hyracks/hyracks-storage-am-lsm-btree/src/main/java/org/apache/hyracks/storage/am/lsm/btree/util/LSMBTreeUtils.java
index e8e54ff..c175336 100644
--- a/hyracks/hyracks-storage-am-lsm-btree/src/main/java/org/apache/hyracks/storage/am/lsm/btree/util/LSMBTreeUtils.java
+++ b/hyracks/hyracks-storage-am-lsm-btree/src/main/java/org/apache/hyracks/storage/am/lsm/btree/util/LSMBTreeUtils.java
@@ -28,11 +28,11 @@
import org.apache.hyracks.storage.am.btree.frames.BTreeNSMInteriorFrameFactory;
import org.apache.hyracks.storage.am.btree.frames.BTreeNSMLeafFrameFactory;
import org.apache.hyracks.storage.am.btree.impls.BTree;
-import org.apache.hyracks.storage.am.common.api.IFreePageManagerFactory;
+import org.apache.hyracks.storage.am.common.api.IMetadataManagerFactory;
import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrameFactory;
import org.apache.hyracks.storage.am.common.frames.LIFOMetaDataFrameFactory;
-import org.apache.hyracks.storage.am.common.freepage.LinkedListFreePageManagerFactory;
+import org.apache.hyracks.storage.am.common.freepage.LinkedListMetadataManagerFactory;
import org.apache.hyracks.storage.am.common.tuples.TypeAwareTupleWriterFactory;
import org.apache.hyracks.storage.am.lsm.btree.impls.ExternalBTree;
import org.apache.hyracks.storage.am.lsm.btree.impls.ExternalBTreeWithBuddy;
@@ -74,7 +74,7 @@
ITreeIndexFrameFactory deleteLeafFrameFactory = new BTreeNSMLeafFrameFactory(deleteTupleWriterFactory);
ITreeIndexFrameFactory interiorFrameFactory = new BTreeNSMInteriorFrameFactory(insertTupleWriterFactory);
ITreeIndexMetaDataFrameFactory metaFrameFactory = new LIFOMetaDataFrameFactory();
- IFreePageManagerFactory freePageManagerFactory = new LinkedListFreePageManagerFactory(diskBufferCache,
+ IMetadataManagerFactory freePageManagerFactory = new LinkedListMetadataManagerFactory(diskBufferCache,
metaFrameFactory);
TreeIndexFactory<BTree> diskBTreeFactory = new BTreeFactory(diskBufferCache, diskFileMapProvider,
@@ -123,7 +123,7 @@
ITreeIndexFrameFactory deleteLeafFrameFactory = new BTreeNSMLeafFrameFactory(deleteTupleWriterFactory);
ITreeIndexFrameFactory interiorFrameFactory = new BTreeNSMInteriorFrameFactory(insertTupleWriterFactory);
ITreeIndexMetaDataFrameFactory metaFrameFactory = new LIFOMetaDataFrameFactory();
- IFreePageManagerFactory freePageManagerFactory = new LinkedListFreePageManagerFactory(diskBufferCache,
+ IMetadataManagerFactory freePageManagerFactory = new LinkedListMetadataManagerFactory(diskBufferCache,
metaFrameFactory);
// This is the tuple writer that can do both inserts and deletes
LSMBTreeRefrencingTupleWriterFactory referencingTupleWriterFactory = new LSMBTreeRefrencingTupleWriterFactory(
@@ -181,7 +181,7 @@
ITreeIndexFrameFactory copyTupleLeafFrameFactory = new BTreeNSMLeafFrameFactory(copyTupleWriterFactory);
ITreeIndexFrameFactory interiorFrameFactory = new BTreeNSMInteriorFrameFactory(insertTupleWriterFactory);
ITreeIndexMetaDataFrameFactory metaFrameFactory = new LIFOMetaDataFrameFactory();
- IFreePageManagerFactory freePageManagerFactory = new LinkedListFreePageManagerFactory(diskBufferCache,
+ IMetadataManagerFactory freePageManagerFactory = new LinkedListMetadataManagerFactory(diskBufferCache,
metaFrameFactory);
TreeIndexFactory<BTree> diskBTreeFactory = new BTreeFactory(diskBufferCache, diskFileMapProvider,
diff --git a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/api/ILSMComponentFilterManager.java b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/api/ILSMComponentFilterManager.java
index 1a64865..20598ca 100644
--- a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/api/ILSMComponentFilterManager.java
+++ b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/api/ILSMComponentFilterManager.java
@@ -22,7 +22,9 @@
import org.apache.hyracks.api.exceptions.HyracksDataException;
import org.apache.hyracks.dataflow.common.data.accessors.ITupleReference;
+import org.apache.hyracks.storage.am.common.api.IIndexBulkLoader;
import org.apache.hyracks.storage.am.common.api.ITreeIndex;
+import org.apache.hyracks.storage.am.common.impls.AbstractTreeIndex.AbstractTreeIndexBulkLoader;
public interface ILSMComponentFilterManager {
@@ -31,6 +33,7 @@
public boolean readFilterInfo(ILSMComponentFilter filter, ITreeIndex treeIndex) throws HyracksDataException;
- public void writeFilterInfo(ILSMComponentFilter filter, ITreeIndex treeIndex) throws HyracksDataException;
+ public void writeFilterInfo(ILSMComponentFilter filter, ITreeIndex treeIndex)
+ throws HyracksDataException;
}
diff --git a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/api/ILSMIndexInternal.java b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/api/ILSMIndexInternal.java
index 08be21d..40d32a0 100644
--- a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/api/ILSMIndexInternal.java
+++ b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/api/ILSMIndexInternal.java
@@ -63,7 +63,7 @@
/**
* Populates the context's component holder with a snapshot of the components involved in the operation.
- *
+ *
* @param ctx
* - the operation's context
* @throws HyracksDataException
@@ -75,8 +75,8 @@
public void addInactiveDiskComponent(ILSMComponent diskComponent);
/**
- * Persistent the LSM component
- *
+ * Persist the LSM component
+ *
* @param lsmComponent
* , the component to be persistent
* @throws HyracksDataException
diff --git a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/freepage/VirtualFreePageManager.java b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/freepage/VirtualMetaDataPageManager.java
similarity index 72%
rename from hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/freepage/VirtualFreePageManager.java
rename to hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/freepage/VirtualMetaDataPageManager.java
index d62bc84..08ac2f8 100644
--- a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/freepage/VirtualFreePageManager.java
+++ b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/freepage/VirtualMetaDataPageManager.java
@@ -22,15 +22,16 @@
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hyracks.api.exceptions.HyracksDataException;
-import org.apache.hyracks.storage.am.common.api.IVirtualFreePageManager;
+import org.apache.hyracks.storage.am.common.api.IVirtualMetaDataPageManager;
import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrame;
import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrameFactory;
+import org.apache.hyracks.storage.common.buffercache.ICachedPage;
-public class VirtualFreePageManager implements IVirtualFreePageManager {
+public class VirtualMetaDataPageManager implements IVirtualMetaDataPageManager {
protected final int capacity;
protected final AtomicInteger currentPageId = new AtomicInteger();
- public VirtualFreePageManager(int capacity) {
+ public VirtualMetaDataPageManager(int capacity) {
// We start the currentPageId from 1, because the BTree uses
// the first page as metadata page, and the second page as root page.
// (when returning free pages we first increment, then get)
@@ -94,8 +95,8 @@
@Override
public int getFirstMetadataPage() {
- // Method doesn't make sense for this free page manager.
- return -1;
+ //MD page in a virtual context is always 0, because it is by nature an in-place modification tree
+ return 0;
}
@Override
@@ -117,4 +118,45 @@
}
}
+
+ @Override
+ public void init(ITreeIndexMetaDataFrame metaFrame) throws HyracksDataException {
+ // Method doesn't make sense for this free page manager.
+ }
+
+ @Override
+ public int getFilterPageId() throws HyracksDataException {
+ // Method doesn't make sense for this free page manager.
+ return 0;
+ }
+
+ @Override
+ public void setFilterPageId(int filterPageId) throws HyracksDataException {
+ // Method doesn't make sense for this free page manager.
+ }
+
+ @Override
+ public long getLSN() throws HyracksDataException {
+ // Method doesn't make sense for this free page manager.
+ return -1;
+ }
+
+ @Override
+ public void setLSN(long lsn) throws HyracksDataException {
+ // Method doesn't make sense for this free page manager.
+ }
+
+ public void setFilterPage(ICachedPage page) {
+ // Method doesn't make sense for this free page manager.
+ }
+
+ public ICachedPage getFilterPage(){
+ return null;
+ }
+
+ @Override
+ public boolean appendOnlyMode() {
+ return false;
+ }
+
}
diff --git a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/AbstractLSMIndex.java b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/AbstractLSMIndex.java
index adfadaa..441dda1 100644
--- a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/AbstractLSMIndex.java
+++ b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/AbstractLSMIndex.java
@@ -31,6 +31,7 @@
import org.apache.hyracks.api.exceptions.HyracksDataException;
import org.apache.hyracks.api.replication.IReplicationJob.ReplicationExecutionType;
import org.apache.hyracks.api.replication.IReplicationJob.ReplicationOperation;
+import org.apache.hyracks.storage.am.bloomfilter.impls.BloomFilter;
import org.apache.hyracks.storage.am.common.api.ITreeIndex;
import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrame;
import org.apache.hyracks.storage.am.lsm.common.api.ILSMComponent;
@@ -134,67 +135,12 @@
filterFields = null;
}
- protected void forceFlushDirtyPages(ITreeIndex treeIndex) throws HyracksDataException {
- int fileId = treeIndex.getFileId();
- IBufferCache bufferCache = treeIndex.getBufferCache();
- // Flush all dirty pages of the tree.
- // By default, metadata and data are flushed asynchronously in the buffercache.
- // This means that the flush issues writes to the OS, but the data may still lie in filesystem buffers.
- ITreeIndexMetaDataFrame metadataFrame = treeIndex.getFreePageManager().getMetaDataFrameFactory().createFrame();
- int startPage = 0;
- int maxPage = treeIndex.getFreePageManager().getMaxPage(metadataFrame);
- forceFlushDirtyPages(bufferCache, fileId, startPage, maxPage);
- }
-
- protected void forceFlushDirtyPages(IBufferCache bufferCache, int fileId, int startPageId, int endPageId)
- throws HyracksDataException {
- for (int i = startPageId; i <= endPageId; i++) {
- ICachedPage page = bufferCache.tryPin(BufferedFileHandle.getDiskPageId(fileId, i));
- // If tryPin returns null, it means the page is not cached, and therefore cannot be dirty.
- if (page == null) {
- continue;
- }
- try {
- bufferCache.flushDirtyPage(page);
- } finally {
- bufferCache.unpin(page);
- }
- }
- // Forces all pages of given file to disk. This guarantees the data makes it to disk.
- // If the index is not durable, then the flush is not necessary.
- if (durable) {
- bufferCache.force(fileId, true);
- }
- }
-
protected void markAsValidInternal(ITreeIndex treeIndex) throws HyracksDataException {
int fileId = treeIndex.getFileId();
IBufferCache bufferCache = treeIndex.getBufferCache();
- ITreeIndexMetaDataFrame metadataFrame = treeIndex.getFreePageManager().getMetaDataFrameFactory().createFrame();
- // Mark the component as a valid component by flushing the metadata page to disk
- int metadataPageId = treeIndex.getFreePageManager().getFirstMetadataPage();
- ICachedPage metadataPage = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, metadataPageId), false);
- metadataPage.acquireWriteLatch();
- try {
- metadataFrame.setPage(metadataPage);
- metadataFrame.setValid(true);
- } finally {
- metadataPage.releaseWriteLatch(true);
- bufferCache.unpin(metadataPage);
- }
-
+ treeIndex.getMetaManager().close();
// WARNING: flushing the metadata page should be done after releasing the write latch; otherwise, the page
// won't be flushed to disk because it won't be dirty until the write latch has been released.
- metadataPage = bufferCache.tryPin(BufferedFileHandle.getDiskPageId(fileId, metadataPageId));
- if (metadataPage != null) {
- try {
- // Flush the single modified page to disk.
- bufferCache.flushDirtyPage(metadataPage);
- } finally {
- bufferCache.unpin(metadataPage);
- }
- }
-
// Force modified metadata page to disk.
// If the index is not durable, then the flush is not necessary.
if (durable) {
@@ -202,6 +148,12 @@
}
}
+ protected void markAsValidInternal(IBufferCache bufferCache, BloomFilter filter) throws HyracksDataException {
+ if(durable){
+ bufferCache.force(filter.getFileId(),true);
+ }
+ }
+
@Override
public void addComponent(ILSMComponent c) throws HyracksDataException {
diskComponents.add(0, c);
diff --git a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/AbstractLSMIndexFileManager.java b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/AbstractLSMIndexFileManager.java
index 3ad1396..c5fccdc 100644
--- a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/AbstractLSMIndexFileManager.java
+++ b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/AbstractLSMIndexFileManager.java
@@ -82,8 +82,11 @@
IBufferCache bufferCache = treeIndex.getBufferCache();
treeIndex.activate();
try {
- int metadataPage = treeIndex.getFreePageManager().getFirstMetadataPage();
- ITreeIndexMetaDataFrame metadataFrame = treeIndex.getFreePageManager().getMetaDataFrameFactory()
+ int metadataPage = treeIndex.getMetaManager().getFirstMetadataPage();
+ if(metadataPage <0 ){
+ return false;
+ }
+ ITreeIndexMetaDataFrame metadataFrame = treeIndex.getMetaManager().getMetaDataFrameFactory()
.createFrame();
ICachedPage page = bufferCache.pin(BufferedFileHandle.getDiskPageId(treeIndex.getFileId(), metadataPage),
false);
diff --git a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/BTreeFactory.java b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/BTreeFactory.java
index a6838f5..5b36adf 100644
--- a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/BTreeFactory.java
+++ b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/BTreeFactory.java
@@ -20,9 +20,10 @@
package org.apache.hyracks.storage.am.lsm.common.impls;
import org.apache.hyracks.api.dataflow.value.IBinaryComparatorFactory;
+import org.apache.hyracks.api.exceptions.HyracksDataException;
import org.apache.hyracks.api.io.FileReference;
import org.apache.hyracks.storage.am.btree.impls.BTree;
-import org.apache.hyracks.storage.am.common.api.IFreePageManagerFactory;
+import org.apache.hyracks.storage.am.common.api.IMetadataManagerFactory;
import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
import org.apache.hyracks.storage.am.common.api.IndexException;
import org.apache.hyracks.storage.common.buffercache.IBufferCache;
@@ -31,7 +32,7 @@
public class BTreeFactory extends TreeIndexFactory<BTree> {
public BTreeFactory(IBufferCache bufferCache, IFileMapProvider fileMapProvider,
- IFreePageManagerFactory freePageManagerFactory, ITreeIndexFrameFactory interiorFrameFactory,
+ IMetadataManagerFactory freePageManagerFactory, ITreeIndexFrameFactory interiorFrameFactory,
ITreeIndexFrameFactory leafFrameFactory, IBinaryComparatorFactory[] cmpFactories, int fieldCount) {
super(bufferCache, fileMapProvider, freePageManagerFactory, interiorFrameFactory, leafFrameFactory,
cmpFactories, fieldCount);
@@ -39,8 +40,12 @@
@Override
public BTree createIndexInstance(FileReference file) throws IndexException {
- return new BTree(bufferCache, fileMapProvider, freePageManagerFactory.createFreePageManager(),
- interiorFrameFactory, leafFrameFactory, cmpFactories, fieldCount, file);
+ try {
+ return new BTree(bufferCache, fileMapProvider, freePageManagerFactory.createFreePageManager(),
+ interiorFrameFactory, leafFrameFactory, cmpFactories, fieldCount, file);
+ } catch (HyracksDataException e) {
+ throw new IndexException(e);
+ }
}
}
\ No newline at end of file
diff --git a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/IndexFactory.java b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/IndexFactory.java
index 9afcee7..c14b9c4 100644
--- a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/IndexFactory.java
+++ b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/IndexFactory.java
@@ -20,7 +20,7 @@
package org.apache.hyracks.storage.am.lsm.common.impls;
import org.apache.hyracks.api.io.FileReference;
-import org.apache.hyracks.storage.am.common.api.IFreePageManagerFactory;
+import org.apache.hyracks.storage.am.common.api.IMetadataManagerFactory;
import org.apache.hyracks.storage.am.common.api.IIndex;
import org.apache.hyracks.storage.am.common.api.IndexException;
import org.apache.hyracks.storage.common.buffercache.IBufferCache;
@@ -30,10 +30,10 @@
protected final IBufferCache bufferCache;
protected final IFileMapProvider fileMapProvider;
- protected final IFreePageManagerFactory freePageManagerFactory;
+ protected final IMetadataManagerFactory freePageManagerFactory;
public IndexFactory(IBufferCache bufferCache, IFileMapProvider fileMapProvider,
- IFreePageManagerFactory freePageManagerFactory) {
+ IMetadataManagerFactory freePageManagerFactory) {
this.bufferCache = bufferCache;
this.fileMapProvider = fileMapProvider;
this.freePageManagerFactory = freePageManagerFactory;
diff --git a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/LSMComponentFilterManager.java b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/LSMComponentFilterManager.java
index a9961c9..d0d7e69 100644
--- a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/LSMComponentFilterManager.java
+++ b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/LSMComponentFilterManager.java
@@ -23,8 +23,10 @@
import org.apache.hyracks.api.exceptions.HyracksDataException;
import org.apache.hyracks.dataflow.common.data.accessors.ITupleReference;
-import org.apache.hyracks.storage.am.common.api.ITreeIndex;
import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrame;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
+import org.apache.hyracks.storage.am.common.api.ITreeIndex;
+import org.apache.hyracks.storage.am.common.freepage.LinkedMetaDataPageManager;
import org.apache.hyracks.storage.am.common.ophelpers.MultiComparator;
import org.apache.hyracks.storage.am.lsm.common.api.ILSMComponentFilter;
import org.apache.hyracks.storage.am.lsm.common.api.ILSMComponentFilterFrame;
@@ -54,29 +56,39 @@
}
@Override
- public void writeFilterInfo(ILSMComponentFilter filter, ITreeIndex treeIndex) throws HyracksDataException {
+ public void writeFilterInfo(ILSMComponentFilter filter, ITreeIndex treeIndex ) throws HyracksDataException {
+ IMetaDataPageManager treeMetaManager = treeIndex.getMetaManager();
+ ICachedPage filterPage = null;
+ int componentFilterPageId = treeMetaManager.getFilterPageId();
+ boolean appendOnly = false;
int fileId = treeIndex.getFileId();
- ITreeIndexMetaDataFrame metadataFrame = treeIndex.getFreePageManager().getMetaDataFrameFactory().createFrame();
-
- // Read the filter page from the first metadata page of the tree.
- // If it is has not been created yet, then create a new one.
- int metadataPageId = treeIndex.getFreePageManager().getFirstMetadataPage();
- ICachedPage metadataPage = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, metadataPageId), false);
- metadataPage.acquireWriteLatch();
- int componentFilterPageId;
- try {
- metadataFrame.setPage(metadataPage);
- componentFilterPageId = metadataFrame.getLSMComponentFilterPageId();
- if (componentFilterPageId == -1) {
- componentFilterPageId = treeIndex.getFreePageManager().getFreePage(metadataFrame);
+ if(componentFilterPageId == LinkedMetaDataPageManager.NO_FILTER_IN_PLACE){//in-place mode, no filter page yet
+ ITreeIndexMetaDataFrame metadataFrame = treeIndex.getMetaManager().getMetaDataFrameFactory().createFrame();
+ int metaPageId = treeMetaManager.getFirstMetadataPage();
+ ICachedPage metadataPage = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, metaPageId), false);
+ metadataPage.acquireWriteLatch();
+ try{
+ metadataFrame.setPage(metadataPage);
+ componentFilterPageId = treeIndex.getMetaManager().getFreePage(metadataFrame);
metadataFrame.setLSMComponentFilterPageId(componentFilterPageId);
}
- } finally {
- metadataPage.releaseWriteLatch(true);
- bufferCache.unpin(metadataPage);
+ finally{
+ metadataPage.releaseWriteLatch(true);
+ bufferCache.unpin(metadataPage);
+ }
+ }
+ else if (componentFilterPageId <= LinkedMetaDataPageManager.NO_FILTER_APPEND_ONLY){
+ appendOnly = true;
+ filterPage = treeMetaManager.getFilterPage();
+ if(filterPage == null){
+ treeMetaManager.setFilterPage(bufferCache.confiscatePage(IBufferCache.INVALID_DPID));
+ filterPage = treeMetaManager.getFilterPage();
+ }
+ }
+ else{// in place, not a new filter page
+ filterPage = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, componentFilterPageId), true);
}
- ICachedPage filterPage = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, componentFilterPageId), true);
filterPage.acquireWriteLatch();
try {
ILSMComponentFilterFrame filterFrame = filterFrameFactory.createFrame();
@@ -90,32 +102,28 @@
}
} finally {
- filterPage.releaseWriteLatch(true);
- bufferCache.unpin(filterPage);
+ if(!appendOnly){
+ bufferCache.unpin(filterPage);
+ filterPage.releaseWriteLatch(true);
+ }
+ else{
+ filterPage.releaseWriteLatch(false);
+ }
}
}
@Override
public boolean readFilterInfo(ILSMComponentFilter filter, ITreeIndex treeIndex) throws HyracksDataException {
int fileId = treeIndex.getFileId();
- ITreeIndexMetaDataFrame metadataFrame = treeIndex.getFreePageManager().getMetaDataFrameFactory().createFrame();
- int metadataPageId = treeIndex.getFreePageManager().getFirstMetadataPage();
- ICachedPage metadataPage = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, metadataPageId), false);
- metadataPage.acquireReadLatch();
- int componentFilterPageId;
- try {
- metadataFrame.setPage(metadataPage);
- componentFilterPageId = metadataFrame.getLSMComponentFilterPageId();
- if (componentFilterPageId == -1) {
- return false;
- }
- } finally {
- metadataPage.releaseReadLatch();
- bufferCache.unpin(metadataPage);
- }
+ IMetaDataPageManager treeMetaManager = treeIndex.getMetaManager();
+
+ int componentFilterPageId = treeMetaManager.getFilterPageId();
+ if (componentFilterPageId < 0)
+ return false;
ICachedPage filterPage = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, componentFilterPageId), true);
+
filterPage.acquireReadLatch();
try {
ILSMComponentFilterFrame filterFrame = filterFrameFactory.createFrame();
@@ -135,4 +143,5 @@
}
return true;
}
+
}
diff --git a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/MultitenantVirtualBufferCache.java b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/MultitenantVirtualBufferCache.java
index 7dceb1d..1ef64ee 100644
--- a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/MultitenantVirtualBufferCache.java
+++ b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/MultitenantVirtualBufferCache.java
@@ -18,14 +18,19 @@
*/
package org.apache.hyracks.storage.am.lsm.common.impls;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
import org.apache.hyracks.api.exceptions.HyracksDataException;
import org.apache.hyracks.api.io.FileReference;
import org.apache.hyracks.api.replication.IIOReplicationManager;
import org.apache.hyracks.storage.am.lsm.common.api.IVirtualBufferCache;
import org.apache.hyracks.storage.common.buffercache.ICachedPage;
+import org.apache.hyracks.storage.common.buffercache.IFIFOPageQueue;
import org.apache.hyracks.storage.common.file.IFileMapManager;
public class MultitenantVirtualBufferCache implements IVirtualBufferCache {
+ private static final Logger LOGGER = Logger.getLogger(ExternalIndexHarness.class.getName());
private final IVirtualBufferCache vbc;
private int openCount;
@@ -125,30 +130,64 @@
//These 4 methods are not applicable here
@Override
public int createMemFile() throws HyracksDataException {
- // TODO Auto-generated method stub
- return 0;
+ throw new UnsupportedOperationException("Virtual Pages are not a valid concept in this context");
}
@Override
public void deleteMemFile(int fileId) throws HyracksDataException {
- // TODO Auto-generated method stub
-
+ throw new UnsupportedOperationException("Virtual Pages are not a valid concept in this context");
}
@Override
- public ICachedPage pinVirtual(long vpid) throws HyracksDataException {
- // TODO Auto-generated method stub
- return null;
+ public int getNumPagesOfFile(int fileId) throws HyracksDataException {
+ throw new UnsupportedOperationException();
}
@Override
- public ICachedPage unpinVirtual(long vpid, long dpid) throws HyracksDataException {
- // TODO Auto-generated method stub
- return null;
+ public void adviseWontNeed(ICachedPage page) {
+ if (LOGGER.isLoggable(Level.INFO)) {
+ LOGGER.log(Level.INFO, "Calling adviseWontNeed on " + this.getClass().getName()
+ + " makes no sense as this BufferCache cannot evict pages");
+ }
}
@Override
- public int getFileReferenceCount(int fileId) {
+ public ICachedPage confiscatePage(long dpid) throws HyracksDataException {
+ return vbc.confiscatePage(dpid);
+ }
+
+ @Override
+ public void returnPage(ICachedPage page) {
+ vbc.returnPage(page);
+ }
+
+ @Override
+ public IFIFOPageQueue createFIFOQueue() {
+ throw new UnsupportedOperationException("Virtual buffer caches don't have FIFO writers");
+ }
+
+ @Override
+ public void finishQueue() {
+ throw new UnsupportedOperationException("Virtual buffer caches don't have FIFO writers");
+ }
+
+ @Override
+ public void copyPage(ICachedPage src, ICachedPage dst) {
+
+ }
+
+ @Override
+ public void setPageDiskId(ICachedPage page, long dpid) {
+
+ }
+
+ @Override
+ public void returnPage(ICachedPage page, boolean reinsert) {
+
+ }
+
+ @Override
+ public int getFileReferenceCount(int fileId){
return 0;
}
@@ -161,4 +200,9 @@
public IIOReplicationManager getIOReplicationManager() {
return null;
}
+
+ @Override
+ public void purgeHandle(int fileId) throws HyracksDataException {
+
+ }
}
diff --git a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/TreeIndexFactory.java b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/TreeIndexFactory.java
index 646aa9c..640b9b07 100644
--- a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/TreeIndexFactory.java
+++ b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/TreeIndexFactory.java
@@ -20,7 +20,7 @@
package org.apache.hyracks.storage.am.lsm.common.impls;
import org.apache.hyracks.api.dataflow.value.IBinaryComparatorFactory;
-import org.apache.hyracks.storage.am.common.api.IFreePageManagerFactory;
+import org.apache.hyracks.storage.am.common.api.IMetadataManagerFactory;
import org.apache.hyracks.storage.am.common.api.ITreeIndex;
import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
import org.apache.hyracks.storage.common.buffercache.IBufferCache;
@@ -34,7 +34,7 @@
protected final int fieldCount;
public TreeIndexFactory(IBufferCache bufferCache, IFileMapProvider fileMapProvider,
- IFreePageManagerFactory freePageManagerFactory, ITreeIndexFrameFactory interiorFrameFactory,
+ IMetadataManagerFactory freePageManagerFactory, ITreeIndexFrameFactory interiorFrameFactory,
ITreeIndexFrameFactory leafFrameFactory, IBinaryComparatorFactory[] cmpFactories, int fieldCount) {
super(bufferCache, fileMapProvider, freePageManagerFactory);
this.interiorFrameFactory = interiorFrameFactory;
diff --git a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/VirtualBufferCache.java b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/VirtualBufferCache.java
index 54c4d7c..920a1d5 100644
--- a/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/VirtualBufferCache.java
+++ b/hyracks/hyracks-storage-am-lsm-common/src/main/java/org/apache/hyracks/storage/am/lsm/common/impls/VirtualBufferCache.java
@@ -24,6 +24,8 @@
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.logging.Level;
+import java.util.logging.Logger;
import org.apache.hyracks.api.exceptions.HyracksDataException;
import org.apache.hyracks.api.io.FileReference;
@@ -31,11 +33,15 @@
import org.apache.hyracks.storage.am.lsm.common.api.IVirtualBufferCache;
import org.apache.hyracks.storage.common.buffercache.ICacheMemoryAllocator;
import org.apache.hyracks.storage.common.buffercache.ICachedPage;
+import org.apache.hyracks.storage.common.buffercache.IFIFOPageQueue;
+import org.apache.hyracks.storage.common.buffercache.IQueueInfo;
import org.apache.hyracks.storage.common.file.BufferedFileHandle;
import org.apache.hyracks.storage.common.file.IFileMapManager;
import org.apache.hyracks.storage.common.file.TransientFileMapManager;
public class VirtualBufferCache implements IVirtualBufferCache {
+ private static final Logger LOGGER = Logger.getLogger(ExternalIndexHarness.class.getName());
+
private static final int OVERFLOW_PADDING = 8;
private final ICacheMemoryAllocator allocator;
@@ -344,32 +350,78 @@
public void releaseWriteLatch(boolean markDirty) {
latch.writeLock().unlock();
}
+ public boolean confiscated() {
+ return false;
+ }
+
+ @Override
+ public IQueueInfo getQueueInfo() {
+ return null;
+ }
+
+ @Override
+ public void setQueueInfo(IQueueInfo queueInfo) {
+ throw new UnsupportedOperationException();
+ }
}
//These 4 methods aren't applicable here.
@Override
public int createMemFile() throws HyracksDataException {
- // TODO Auto-generated method stub
return 0;
}
@Override
public void deleteMemFile(int fileId) throws HyracksDataException {
- // TODO Auto-generated method stub
+ }
+
+ @Override
+ public int getNumPagesOfFile(int fileId) throws HyracksDataException {
+ return numPages;
+ }
+
+ @Override
+ public void adviseWontNeed(ICachedPage page) {
+ if (LOGGER.isLoggable(Level.INFO)) {
+ LOGGER.log(Level.INFO, "Calling adviseWontNeed on " + this.getClass().getName()
+ + " makes no sense as this BufferCache cannot evict pages");
+ }
+ }
+
+ @Override
+ public void returnPage(ICachedPage page) {
}
@Override
- public ICachedPage pinVirtual(long vpid) throws HyracksDataException {
- // TODO Auto-generated method stub
- return null;
+ public IFIFOPageQueue createFIFOQueue() {
+ throw new UnsupportedOperationException("Virtual buffer caches don't have FIFO writers");
}
@Override
- public ICachedPage unpinVirtual(long vpid, long dpid) throws HyracksDataException {
- // TODO Auto-generated method stub
- return null;
+ public void finishQueue() {
+ throw new UnsupportedOperationException("Virtual buffer caches don't have FIFO writers");
+ }
+
+ @Override
+ public ICachedPage confiscatePage(long dpid) {
+ throw new UnsupportedOperationException("Virtual buffer caches don't have FIFO writers");
+ }
+
+ @Override
+ public void copyPage(ICachedPage src, ICachedPage dst) {
+ throw new UnsupportedOperationException("Virtual buffer caches don't have FIFO writers");
+ }
+
+ @Override
+ public void setPageDiskId(ICachedPage page, long dpid) {
+
+ }
+
+ @Override
+ public void returnPage(ICachedPage page, boolean reinsert) {
+ throw new UnsupportedOperationException("Virtual buffer caches don't have FIFO writers");
}
@Override
@@ -386,4 +438,9 @@
public IIOReplicationManager getIOReplicationManager() {
return null;
}
+
+ @Override
+ public void purgeHandle(int fileId) throws HyracksDataException {
+
+ }
}
diff --git a/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/impls/LSMInvertedIndex.java b/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/impls/LSMInvertedIndex.java
index d7a2611..1f8feca 100644
--- a/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/impls/LSMInvertedIndex.java
+++ b/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/impls/LSMInvertedIndex.java
@@ -38,16 +38,7 @@
import org.apache.hyracks.storage.am.btree.impls.BTree.BTreeAccessor;
import org.apache.hyracks.storage.am.btree.impls.RangePredicate;
import org.apache.hyracks.storage.am.btree.util.BTreeUtils;
-import org.apache.hyracks.storage.am.common.api.ICursorInitialState;
-import org.apache.hyracks.storage.am.common.api.IIndexAccessor;
-import org.apache.hyracks.storage.am.common.api.IIndexBulkLoader;
-import org.apache.hyracks.storage.am.common.api.IIndexCursor;
-import org.apache.hyracks.storage.am.common.api.IIndexOperationContext;
-import org.apache.hyracks.storage.am.common.api.IModificationOperationCallback;
-import org.apache.hyracks.storage.am.common.api.ISearchOperationCallback;
-import org.apache.hyracks.storage.am.common.api.ISearchPredicate;
-import org.apache.hyracks.storage.am.common.api.IVirtualFreePageManager;
-import org.apache.hyracks.storage.am.common.api.IndexException;
+import org.apache.hyracks.storage.am.common.api.*;
import org.apache.hyracks.storage.am.common.exceptions.TreeIndexDuplicateKeyException;
import org.apache.hyracks.storage.am.common.impls.AbstractSearchPredicate;
import org.apache.hyracks.storage.am.common.impls.NoOpOperationCallback;
@@ -69,7 +60,7 @@
import org.apache.hyracks.storage.am.lsm.common.api.ILSMMergePolicy;
import org.apache.hyracks.storage.am.lsm.common.api.ILSMOperationTracker;
import org.apache.hyracks.storage.am.lsm.common.api.IVirtualBufferCache;
-import org.apache.hyracks.storage.am.lsm.common.freepage.VirtualFreePageManager;
+import org.apache.hyracks.storage.am.lsm.common.freepage.VirtualMetaDataPageManager;
import org.apache.hyracks.storage.am.lsm.common.impls.AbstractLSMIndex;
import org.apache.hyracks.storage.am.lsm.common.impls.BTreeFactory;
import org.apache.hyracks.storage.am.lsm.common.impls.BlockingIOOperationCallbackWrapper;
@@ -115,8 +106,7 @@
IBinaryComparatorFactory[] tokenCmpFactories, IBinaryTokenizerFactory tokenizerFactory,
ILSMMergePolicy mergePolicy, ILSMOperationTracker opTracker, ILSMIOOperationScheduler ioScheduler,
ILSMIOOperationCallback ioOpCallback, int[] invertedIndexFields, int[] filterFields,
- int[] filterFieldsForNonBulkLoadOps, int[] invertedIndexFieldsForNonBulkLoadOps, boolean durable)
- throws IndexException {
+ int[] filterFieldsForNonBulkLoadOps, int[] invertedIndexFieldsForNonBulkLoadOps, boolean durable) throws IndexException {
super(virtualBufferCaches, diskInvIndexFactory.getBufferCache(), fileManager, diskFileMapProvider,
bloomFilterFalsePositiveRate, mergePolicy, opTracker, ioScheduler, ioOpCallback, filterFrameFactory,
filterManager, filterFields, durable);
@@ -136,11 +126,11 @@
int i = 0;
for (IVirtualBufferCache virtualBufferCache : virtualBufferCaches) {
InMemoryInvertedIndex memInvIndex = createInMemoryInvertedIndex(virtualBufferCache,
- new VirtualFreePageManager(virtualBufferCache.getNumPages()), i);
- BTree deleteKeysBTree = BTreeUtils.createBTree(virtualBufferCache, new VirtualFreePageManager(
- virtualBufferCache.getNumPages()), virtualBufferCache.getFileMapProvider(), invListTypeTraits,
- invListCmpFactories, BTreeLeafFrameType.REGULAR_NSM,
- new FileReference(new File(fileManager.getBaseDir() + "_virtual_del_" + i)));
+ new VirtualMetaDataPageManager(virtualBufferCache.getNumPages()), i);
+ BTree deleteKeysBTree = BTreeUtils.createBTree(virtualBufferCache, new VirtualMetaDataPageManager(
+ virtualBufferCache.getNumPages()), ((IVirtualBufferCache) virtualBufferCache).getFileMapProvider(),
+ invListTypeTraits, invListCmpFactories, BTreeLeafFrameType.REGULAR_NSM, new FileReference(new File(
+ fileManager.getBaseDir() + "_virtual_del_" + i)));
LSMInvertedIndexMemoryComponent mutableComponent = new LSMInvertedIndexMemoryComponent(memInvIndex,
deleteKeysBTree, virtualBufferCache, i == 0 ? true : false, filterFactory == null ? null
: filterFactory.createLSMComponentFilter());
@@ -480,7 +470,7 @@
memBTreeAccessor.search(scanCursor, nullPred);
// Bulk load the disk inverted index from the in-memory inverted index.
- IIndexBulkLoader invIndexBulkLoader = diskInvertedIndex.createBulkLoader(1.0f, false, 0L, false);
+ IIndexBulkLoader invIndexBulkLoader = diskInvertedIndex.createBulkLoader(1.0f, false, 0L, false, true);
try {
while (scanCursor.hasNext()) {
scanCursor.next();
@@ -489,6 +479,15 @@
} finally {
scanCursor.close();
}
+ if (component.getLSMComponentFilter() != null) {
+ List<ITupleReference> filterTuples = new ArrayList<ITupleReference>();
+ filterTuples.add(flushingComponent.getLSMComponentFilter().getMinTuple());
+ filterTuples.add(flushingComponent.getLSMComponentFilter().getMaxTuple());
+ filterManager.updateFilterInfo(component.getLSMComponentFilter(), filterTuples);
+ filterManager.writeFilterInfo(component.getLSMComponentFilter(),
+ ((OnDiskInvertedIndex) component.getInvIndex()).getBTree()
+ );
+ }
invIndexBulkLoader.end();
IIndexAccessor deletedKeysBTreeAccessor = flushingComponent.getDeletedKeysBTree().createAccessor(
@@ -506,44 +505,34 @@
btreeCountingCursor.close();
}
- if (numBTreeTuples > 0) {
- int maxBucketsPerElement = BloomCalculations.maxBucketsPerElement(numBTreeTuples);
- BloomFilterSpecification bloomFilterSpec = BloomCalculations.computeBloomSpec(maxBucketsPerElement,
- bloomFilterFalsePositiveRate);
+ int maxBucketsPerElement = BloomCalculations.maxBucketsPerElement(numBTreeTuples);
+ BloomFilterSpecification bloomFilterSpec = BloomCalculations.computeBloomSpec(maxBucketsPerElement,
+ bloomFilterFalsePositiveRate);
- // Create an BTree instance for the deleted keys.
- BTree diskDeletedKeysBTree = component.getDeletedKeysBTree();
+ // Create an BTree instance for the deleted keys.
+ BTree diskDeletedKeysBTree = component.getDeletedKeysBTree();
- // Create a scan cursor on the deleted keys BTree underlying the in-memory inverted index.
- IIndexCursor deletedKeysScanCursor = deletedKeysBTreeAccessor.createSearchCursor(false);
- deletedKeysBTreeAccessor.search(deletedKeysScanCursor, nullPred);
+ // Create a scan cursor on the deleted keys BTree underlying the in-memory inverted index.
+ IIndexCursor deletedKeysScanCursor = deletedKeysBTreeAccessor.createSearchCursor(false);
+ deletedKeysBTreeAccessor.search(deletedKeysScanCursor, nullPred);
- // Bulk load the deleted-keys BTree.
- IIndexBulkLoader deletedKeysBTreeBulkLoader = diskDeletedKeysBTree.createBulkLoader(1.0f, false, 0L, false);
- IIndexBulkLoader builder = component.getBloomFilter().createBuilder(numBTreeTuples,
- bloomFilterSpec.getNumHashes(), bloomFilterSpec.getNumBucketsPerElements());
+ // Bulk load the deleted-keys BTree.
+ IIndexBulkLoader deletedKeysBTreeBulkLoader = diskDeletedKeysBTree.createBulkLoader(1.0f, false, 0L, false,
+ true);
+ IIndexBulkLoader builder = component.getBloomFilter().createBuilder(numBTreeTuples,
+ bloomFilterSpec.getNumHashes(), bloomFilterSpec.getNumBucketsPerElements());
- try {
- while (deletedKeysScanCursor.hasNext()) {
- deletedKeysScanCursor.next();
- deletedKeysBTreeBulkLoader.add(deletedKeysScanCursor.getTuple());
- builder.add(deletedKeysScanCursor.getTuple());
- }
- } finally {
- deletedKeysScanCursor.close();
- builder.end();
+ try {
+ while (deletedKeysScanCursor.hasNext()) {
+ deletedKeysScanCursor.next();
+ deletedKeysBTreeBulkLoader.add(deletedKeysScanCursor.getTuple());
+ builder.add(deletedKeysScanCursor.getTuple());
}
- deletedKeysBTreeBulkLoader.end();
+ } finally {
+ deletedKeysScanCursor.close();
+ builder.end();
}
-
- if (component.getLSMComponentFilter() != null) {
- List<ITupleReference> filterTuples = new ArrayList<ITupleReference>();
- filterTuples.add(flushingComponent.getLSMComponentFilter().getMinTuple());
- filterTuples.add(flushingComponent.getLSMComponentFilter().getMaxTuple());
- filterManager.updateFilterInfo(component.getLSMComponentFilter(), filterTuples);
- filterManager.writeFilterInfo(component.getLSMComponentFilter(),
- ((OnDiskInvertedIndex) component.getInvIndex()).getBTree());
- }
+ deletedKeysBTreeBulkLoader.end();
return component;
}
@@ -566,7 +555,7 @@
String lastFileName = lastInvIndex.getBTree().getFileReference().getFile().getName();
LSMComponentFileReferences relMergeFileRefs = fileManager.getRelMergeFileReference(firstFileName, lastFileName);
- ILSMIndexAccessorInternal accessor = new LSMInvertedIndexAccessor(lsmHarness, ictx);
+ ILSMIndexAccessorInternal accessor = new LSMInvertedIndexAccessor(lsmHarness, ctx);
ioScheduler.scheduleOperation(new LSMInvertedIndexMergeOperation(accessor, mergingComponents, cursor,
relMergeFileRefs.getInsertIndexFileReference(), relMergeFileRefs.getDeleteIndexFileReference(),
relMergeFileRefs.getBloomFilterFileReference(), callback, fileManager.getBaseDir()));
@@ -601,7 +590,7 @@
search(opCtx, btreeCursor, mergePred);
BTree btree = component.getDeletedKeysBTree();
- IIndexBulkLoader btreeBulkLoader = btree.createBulkLoader(1.0f, true, 0L, false);
+ IIndexBulkLoader btreeBulkLoader = btree.createBulkLoader(1.0f, true, 0L, false, true);
long numElements = 0L;
for (int i = 0; i < mergeOp.getMergingComponents().size(); ++i) {
@@ -626,9 +615,13 @@
builder.end();
}
btreeBulkLoader.end();
+ } else {
+ BTree btree = component.getDeletedKeysBTree();
+ IIndexBulkLoader btreeBulkLoader = btree.createBulkLoader(1.0f, true, 0L, false, true);
+ btreeBulkLoader.end();
}
- IIndexBulkLoader invIndexBulkLoader = mergedDiskInvertedIndex.createBulkLoader(1.0f, true, 0L, false);
+ IIndexBulkLoader invIndexBulkLoader = mergedDiskInvertedIndex.createBulkLoader(1.0f, true, 0L, false, true);
try {
while (cursor.hasNext()) {
cursor.next();
@@ -638,8 +631,6 @@
} finally {
cursor.close();
}
- invIndexBulkLoader.end();
-
if (component.getLSMComponentFilter() != null) {
List<ITupleReference> filterTuples = new ArrayList<ITupleReference>();
for (int i = 0; i < mergeOp.getMergingComponents().size(); ++i) {
@@ -648,8 +639,10 @@
}
filterManager.updateFilterInfo(component.getLSMComponentFilter(), filterTuples);
filterManager.writeFilterInfo(component.getLSMComponentFilter(),
- ((OnDiskInvertedIndex) component.getInvIndex()).getBTree());
+ ((OnDiskInvertedIndex) component.getInvIndex()).getBTree()
+ );
}
+ invIndexBulkLoader.end();
return component;
}
@@ -664,7 +657,17 @@
public IIndexBulkLoader createBulkLoader(float fillFactor, boolean verifyInput, long numElementsHint,
boolean checkIfEmptyIndex) throws IndexException {
try {
- return new LSMInvertedIndexBulkLoader(fillFactor, verifyInput, numElementsHint, checkIfEmptyIndex);
+ return new LSMInvertedIndexBulkLoader(fillFactor, verifyInput, numElementsHint, checkIfEmptyIndex, true);
+ } catch (HyracksDataException e) {
+ throw new IndexException(e);
+ }
+ }
+
+ @Override
+ public IIndexBulkLoader createBulkLoader(float fillFactor, boolean verifyInput, long numElementsHint,
+ boolean checkIfEmptyIndex, boolean appendOnly) throws IndexException {
+ try {
+ return new LSMInvertedIndexBulkLoader(fillFactor, verifyInput, numElementsHint, checkIfEmptyIndex, appendOnly);
} catch (HyracksDataException e) {
throw new IndexException(e);
}
@@ -673,6 +676,7 @@
public class LSMInvertedIndexBulkLoader implements IIndexBulkLoader {
private final ILSMComponent component;
private final IIndexBulkLoader invIndexBulkLoader;
+ private final IIndexBulkLoader deletedKeysBTreeBulkLoader;
private boolean cleanedUpArtifacts = false;
private boolean isEmptyComponent = true;
public final PermutingTupleReference indexTuple;
@@ -680,7 +684,7 @@
public final MultiComparator filterCmp;
public LSMInvertedIndexBulkLoader(float fillFactor, boolean verifyInput, long numElementsHint,
- boolean checkIfEmptyIndex) throws IndexException, HyracksDataException {
+ boolean checkIfEmptyIndex, boolean appendOnly) throws IndexException, HyracksDataException {
if (checkIfEmptyIndex && !isEmptyIndex()) {
throw new IndexException("Cannot load an index that is not empty");
}
@@ -692,7 +696,11 @@
throw new IndexException(e);
}
invIndexBulkLoader = ((LSMInvertedIndexDiskComponent) component).getInvIndex().createBulkLoader(fillFactor,
- verifyInput, numElementsHint, false);
+ verifyInput, numElementsHint, false, true);
+
+ //validity of the component depends on the deleted keys file being there even if it's empty.
+ deletedKeysBTreeBulkLoader = ((LSMInvertedIndexDiskComponent) component).getDeletedKeysBTree().createBulkLoader(fillFactor,
+ verifyInput, numElementsHint, false, true);
if (filterFields != null) {
indexTuple = new PermutingTupleReference(invertedIndexFields);
@@ -747,13 +755,13 @@
@Override
public void end() throws IndexException, HyracksDataException {
if (!cleanedUpArtifacts) {
- invIndexBulkLoader.end();
-
if (component.getLSMComponentFilter() != null) {
filterManager.writeFilterInfo(component.getLSMComponentFilter(),
(((OnDiskInvertedIndex) ((LSMInvertedIndexDiskComponent) component).getInvIndex())
.getBTree()));
}
+ invIndexBulkLoader.end();
+ deletedKeysBTreeBulkLoader.end();
if (isEmptyComponent) {
cleanupArtifacts();
@@ -762,10 +770,21 @@
}
}
}
+
+ @Override
+ public void abort() throws HyracksDataException {
+ if( invIndexBulkLoader != null){
+ invIndexBulkLoader.abort();
+ }
+
+ if(deletedKeysBTreeBulkLoader != null){
+ deletedKeysBTreeBulkLoader.abort();
+ }
+ }
}
protected InMemoryInvertedIndex createInMemoryInvertedIndex(IVirtualBufferCache virtualBufferCache,
- IVirtualFreePageManager virtualFreePageManager, int id) throws IndexException {
+ IVirtualMetaDataPageManager virtualFreePageManager, int id) throws IndexException {
return InvertedIndexUtils.createInMemoryBTreeInvertedindex(virtualBufferCache, virtualFreePageManager,
invListTypeTraits, invListCmpFactories, tokenTypeTraits, tokenCmpFactories, tokenizerFactory,
new FileReference(new File(fileManager.getBaseDir() + "_virtual_vocab_" + id)));
@@ -778,15 +797,15 @@
.createLSMComponentInstance(new LSMComponentFileReferences(dictBTreeFileRef, btreeFileRef,
bloomFilterFileRef));
if (create) {
- component.getInvIndex().create();
- component.getDeletedKeysBTree().create();
component.getBloomFilter().create();
+ component.getBloomFilter().activate();
+ } else {
+ component.getInvIndex().activate();
+ component.getDeletedKeysBTree().activate();
+ component.getBloomFilter().activate();
}
// Will be closed during cleanup of merge().
- component.getInvIndex().activate();
- component.getDeletedKeysBTree().activate();
- component.getBloomFilter().activate();
- if (component.getLSMComponentFilter() != null) {
+ if (component.getLSMComponentFilter() != null && !create) {
filterManager.readFilterInfo(component.getLSMComponentFilter(),
((OnDiskInvertedIndex) component.getInvIndex()).getBTree());
}
@@ -851,14 +870,6 @@
return tokenizerFactory;
}
- protected void forceFlushInvListsFileDirtyPages(OnDiskInvertedIndex invIndex) throws HyracksDataException {
- int fileId = invIndex.getInvListsFileId();
- IBufferCache bufferCache = invIndex.getBufferCache();
- int startPageId = 0;
- int maxPageId = invIndex.getInvListsMaxPageId();
- forceFlushDirtyPages(bufferCache, fileId, startPageId, maxPageId);
- }
-
@Override
public void markAsValid(ILSMComponent lsmComponent) throws HyracksDataException {
LSMInvertedIndexDiskComponent invIndexComponent = (LSMInvertedIndexDiskComponent) lsmComponent;
@@ -868,15 +879,14 @@
IBufferCache bufferCache = invIndex.getBufferCache();
int startPage = 0;
int maxPage = invIndexComponent.getBloomFilter().getNumPages();
- forceFlushDirtyPages(bufferCache, fileId, startPage, maxPage);
+
+ markAsValidInternal(invIndex.getBufferCache(),invIndexComponent.getBloomFilter());
// Flush inverted index second.
- forceFlushDirtyPages(invIndex.getBTree());
- forceFlushInvListsFileDirtyPages(invIndex);
+ bufferCache.force(invIndex.getInvListsFileId(),true);
markAsValidInternal(invIndex.getBTree());
// Flush deleted keys BTree.
- forceFlushDirtyPages(invIndexComponent.getDeletedKeysBTree());
markAsValidInternal(invIndexComponent.getDeletedKeysBTree());
}
diff --git a/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/impls/PartitionedLSMInvertedIndex.java b/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/impls/PartitionedLSMInvertedIndex.java
index 3d1808e..7aecbd1 100644
--- a/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/impls/PartitionedLSMInvertedIndex.java
+++ b/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/impls/PartitionedLSMInvertedIndex.java
@@ -26,7 +26,7 @@
import org.apache.hyracks.api.dataflow.value.ITypeTraits;
import org.apache.hyracks.api.io.FileReference;
import org.apache.hyracks.storage.am.bloomfilter.impls.BloomFilterFactory;
-import org.apache.hyracks.storage.am.common.api.IVirtualFreePageManager;
+import org.apache.hyracks.storage.am.common.api.IVirtualMetaDataPageManager;
import org.apache.hyracks.storage.am.common.api.IndexException;
import org.apache.hyracks.storage.am.lsm.common.api.ILSMComponentFilterFactory;
import org.apache.hyracks.storage.am.lsm.common.api.ILSMComponentFilterFrameFactory;
@@ -67,7 +67,7 @@
@Override
protected InMemoryInvertedIndex createInMemoryInvertedIndex(IVirtualBufferCache virtualBufferCache,
- IVirtualFreePageManager virtualFreePageManager, int id) throws IndexException {
+ IVirtualMetaDataPageManager virtualFreePageManager, int id) throws IndexException {
return InvertedIndexUtils.createPartitionedInMemoryBTreeInvertedindex(virtualBufferCache,
virtualFreePageManager, invListTypeTraits, invListCmpFactories, tokenTypeTraits, tokenCmpFactories,
tokenizerFactory, new FileReference(new File(fileManager.getBaseDir() + "_virtual_vocab_" + id)));
diff --git a/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/inmemory/InMemoryInvertedIndex.java b/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/inmemory/InMemoryInvertedIndex.java
index 20d1172..0e26874 100644
--- a/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/inmemory/InMemoryInvertedIndex.java
+++ b/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/inmemory/InMemoryInvertedIndex.java
@@ -28,13 +28,8 @@
import org.apache.hyracks.storage.am.btree.impls.BTree;
import org.apache.hyracks.storage.am.btree.impls.BTree.BTreeAccessor;
import org.apache.hyracks.storage.am.btree.util.BTreeUtils;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
-import org.apache.hyracks.storage.am.common.api.IIndexAccessor;
-import org.apache.hyracks.storage.am.common.api.IIndexBulkLoader;
-import org.apache.hyracks.storage.am.common.api.IIndexOperationContext;
-import org.apache.hyracks.storage.am.common.api.IModificationOperationCallback;
-import org.apache.hyracks.storage.am.common.api.ISearchOperationCallback;
-import org.apache.hyracks.storage.am.common.api.IndexException;
+import org.apache.hyracks.storage.am.common.api.*;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
import org.apache.hyracks.storage.am.common.exceptions.TreeIndexDuplicateKeyException;
import org.apache.hyracks.storage.am.common.exceptions.TreeIndexNonExistentKeyException;
import org.apache.hyracks.storage.am.common.ophelpers.IndexOperation;
@@ -56,7 +51,7 @@
protected final ITypeTraits[] btreeTypeTraits;
protected final IBinaryComparatorFactory[] btreeCmpFactories;
- public InMemoryInvertedIndex(IBufferCache virtualBufferCache, IFreePageManager virtualFreePageManager,
+ public InMemoryInvertedIndex(IBufferCache virtualBufferCache, IMetaDataPageManager virtualFreePageManager,
ITypeTraits[] invListTypeTraits, IBinaryComparatorFactory[] invListCmpFactories,
ITypeTraits[] tokenTypeTraits, IBinaryComparatorFactory[] tokenCmpFactories,
IBinaryTokenizerFactory tokenizerFactory, FileReference btreeFileRef) throws BTreeException {
@@ -211,4 +206,10 @@
public boolean hasMemoryComponents() {
return true;
}
+
+ @Override
+ public IIndexBulkLoader createBulkLoader(float fillFactor, boolean verifyInput, long numElementsHint,
+ boolean checkIfEmptyIndex, boolean appendOnly) throws IndexException {
+ throw new UnsupportedOperationException("Bulk load not supported by in-memory inverted index.");
+ }
}
diff --git a/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/inmemory/PartitionedInMemoryInvertedIndex.java b/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/inmemory/PartitionedInMemoryInvertedIndex.java
index 1bdb4a8..273aa16 100644
--- a/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/inmemory/PartitionedInMemoryInvertedIndex.java
+++ b/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/inmemory/PartitionedInMemoryInvertedIndex.java
@@ -28,12 +28,8 @@
import org.apache.hyracks.dataflow.common.data.accessors.ITupleReference;
import org.apache.hyracks.storage.am.btree.exceptions.BTreeException;
import org.apache.hyracks.storage.am.btree.impls.BTree.BTreeAccessor;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
-import org.apache.hyracks.storage.am.common.api.IIndexAccessor;
-import org.apache.hyracks.storage.am.common.api.IIndexOperationContext;
-import org.apache.hyracks.storage.am.common.api.IModificationOperationCallback;
-import org.apache.hyracks.storage.am.common.api.ISearchOperationCallback;
-import org.apache.hyracks.storage.am.common.api.IndexException;
+import org.apache.hyracks.storage.am.common.api.*;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
import org.apache.hyracks.storage.am.common.ophelpers.IndexOperation;
import org.apache.hyracks.storage.am.lsm.invertedindex.api.IInvertedIndexSearcher;
import org.apache.hyracks.storage.am.lsm.invertedindex.api.IInvertedListCursor;
@@ -50,7 +46,7 @@
protected short minPartitionIndex = Short.MAX_VALUE;
protected short maxPartitionIndex = Short.MIN_VALUE;
- public PartitionedInMemoryInvertedIndex(IBufferCache memBufferCache, IFreePageManager memFreePageManager,
+ public PartitionedInMemoryInvertedIndex(IBufferCache memBufferCache, IMetaDataPageManager memFreePageManager,
ITypeTraits[] invListTypeTraits, IBinaryComparatorFactory[] invListCmpFactories,
ITypeTraits[] tokenTypeTraits, IBinaryComparatorFactory[] tokenCmpFactories,
IBinaryTokenizerFactory tokenizerFactory, FileReference btreeFileRef) throws BTreeException {
diff --git a/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/ondisk/OnDiskInvertedIndex.java b/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/ondisk/OnDiskInvertedIndex.java
index 40ce89a..5be9d90 100644
--- a/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/ondisk/OnDiskInvertedIndex.java
+++ b/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/ondisk/OnDiskInvertedIndex.java
@@ -33,7 +33,6 @@
import org.apache.hyracks.dataflow.common.comm.io.ArrayTupleBuilder;
import org.apache.hyracks.dataflow.common.comm.io.ArrayTupleReference;
import org.apache.hyracks.dataflow.common.data.accessors.ITupleReference;
-import org.apache.hyracks.dataflow.common.data.marshalling.IntegerSerializerDeserializer;
import org.apache.hyracks.dataflow.common.util.TupleUtils;
import org.apache.hyracks.storage.am.btree.frames.BTreeLeafFrameType;
import org.apache.hyracks.storage.am.btree.impls.BTree;
@@ -62,6 +61,7 @@
import org.apache.hyracks.storage.am.lsm.invertedindex.search.TOccurrenceSearcher;
import org.apache.hyracks.storage.common.buffercache.IBufferCache;
import org.apache.hyracks.storage.common.buffercache.ICachedPage;
+import org.apache.hyracks.storage.common.buffercache.IFIFOPageQueue;
import org.apache.hyracks.storage.common.file.BufferedFileHandle;
import org.apache.hyracks.storage.common.file.IFileMapProvider;
@@ -110,6 +110,7 @@
// Last page id of inverted-lists file (inclusive). Set during bulk load.
protected int invListsMaxPageId = -1;
protected boolean isOpen = false;
+ protected boolean wasOpen = false;
public OnDiskInvertedIndex(IBufferCache bufferCache, IFileMapProvider fileMapProvider,
IInvertedListBuilder invListBuilder, ITypeTraits[] invListTypeTraits,
@@ -136,11 +137,16 @@
@Override
public synchronized void create() throws HyracksDataException {
+ create(false);
+ }
+
+ public synchronized void create(boolean appendOnly) throws HyracksDataException {
if (isOpen) {
throw new HyracksDataException("Failed to create since index is already open.");
}
- btree.create();
-
+ if (!appendOnly) {
+ btree.create();
+ }
boolean fileIsMapped = false;
synchronized (fileMapProvider) {
fileIsMapped = fileMapProvider.isMapped(invListsFile);
@@ -164,11 +170,16 @@
@Override
public synchronized void activate() throws HyracksDataException {
+ activate(false);
+ }
+
+ public synchronized void activate(boolean appendOnly) throws HyracksDataException {
if (isOpen) {
throw new HyracksDataException("Failed to activate the index since it is already activated.");
}
-
- btree.activate();
+ if (!appendOnly) {
+ btree.activate();
+ }
boolean fileIsMapped = false;
synchronized (fileMapProvider) {
fileIsMapped = fileMapProvider.isMapped(invListsFile);
@@ -189,11 +200,12 @@
}
isOpen = true;
+ wasOpen = true;
}
@Override
public synchronized void deactivate() throws HyracksDataException {
- if (!isOpen) {
+ if (!isOpen && wasOpen) {
throw new HyracksDataException("Failed to deactivate the index since it is already deactivated.");
}
@@ -302,8 +314,11 @@
private final boolean verifyInput;
private final MultiComparator allCmp;
+ private IFIFOPageQueue queue;
+
public OnDiskInvertedIndexBulkLoader(float btreeFillFactor, boolean verifyInput, long numElementsHint,
- boolean checkIfEmptyIndex, int startPageId, int fileId) throws IndexException, HyracksDataException {
+ boolean checkIfEmptyIndex, int startPageId, boolean appendOnly) throws IndexException,
+ HyracksDataException {
this.verifyInput = verifyInput;
this.tokenCmp = MultiComparator.create(btree.getComparatorFactories());
this.invListCmp = MultiComparator.create(invListCmpFactories);
@@ -316,20 +331,22 @@
this.btreeTupleReference = new ArrayTupleReference();
this.lastTupleBuilder = new ArrayTupleBuilder(numTokenFields + numInvListKeys);
this.lastTuple = new ArrayTupleReference();
+ if (appendOnly) {
+ create(appendOnly);
+ activate(appendOnly);
+ }
this.btreeBulkloader = btree.createBulkLoader(btreeFillFactor, verifyInput, numElementsHint,
- checkIfEmptyIndex);
+ checkIfEmptyIndex, appendOnly);
currentPageId = startPageId;
- currentPage = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, currentPageId), true);
- currentPage.acquireWriteLatch();
+ currentPage = bufferCache.confiscatePage(BufferedFileHandle.getDiskPageId(fileId, currentPageId));
invListBuilder.setTargetBuffer(currentPage.getBuffer().array(), 0);
+ queue = bufferCache.createFIFOQueue();
}
public void pinNextPage() throws HyracksDataException {
- currentPage.releaseWriteLatch(true);
- bufferCache.unpin(currentPage);
+ queue.put(currentPage);
currentPageId++;
- currentPage = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, currentPageId), true);
- currentPage.acquireWriteLatch();
+ currentPage = bufferCache.confiscatePage(BufferedFileHandle.getDiskPageId(fileId, currentPageId));
}
private void createAndInsertBTreeTuple() throws IndexException, HyracksDataException {
@@ -432,10 +449,17 @@
btreeBulkloader.end();
if (currentPage != null) {
- currentPage.releaseWriteLatch(true);
- bufferCache.unpin(currentPage);
+ queue.put(currentPage);
}
invListsMaxPageId = currentPageId;
+ bufferCache.finishQueue();
+ }
+
+ @Override
+ public void abort() throws HyracksDataException {
+ if(btreeBulkloader != null){
+ btreeBulkloader.abort();
+ }
}
}
@@ -587,13 +611,26 @@
boolean checkIfEmptyIndex) throws IndexException {
try {
return new OnDiskInvertedIndexBulkLoader(fillFactor, verifyInput, numElementsHint, checkIfEmptyIndex,
- rootPageId, fileId);
+ rootPageId, false);
} catch (HyracksDataException e) {
throw new InvertedIndexException(e);
}
}
@Override
+ public IIndexBulkLoader createBulkLoader(float fillFactor, boolean verifyInput, long numElementsHint,
+ boolean checkIfEmptyIndex, boolean appendOnly) throws IndexException {
+ try {
+ return new OnDiskInvertedIndexBulkLoader(fillFactor, verifyInput, numElementsHint, checkIfEmptyIndex,
+ rootPageId, appendOnly);
+
+ } catch (HyracksDataException e) {
+ throw new InvertedIndexException(e);
+ }
+
+ }
+
+ @Override
public void validate() throws HyracksDataException {
btree.validate();
// Scan the btree and validate the order of elements in each inverted-list.
@@ -686,4 +723,5 @@
public boolean hasMemoryComponents() {
return true;
}
+
}
diff --git a/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/util/InvertedIndexUtils.java b/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/util/InvertedIndexUtils.java
index 47cf239..87811aa 100644
--- a/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/util/InvertedIndexUtils.java
+++ b/hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/util/InvertedIndexUtils.java
@@ -30,12 +30,12 @@
import org.apache.hyracks.storage.am.btree.frames.BTreeLeafFrameType;
import org.apache.hyracks.storage.am.btree.frames.BTreeNSMInteriorFrameFactory;
import org.apache.hyracks.storage.am.btree.util.BTreeUtils;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrameFactory;
import org.apache.hyracks.storage.am.common.api.IndexException;
import org.apache.hyracks.storage.am.common.frames.LIFOMetaDataFrameFactory;
-import org.apache.hyracks.storage.am.common.freepage.LinkedListFreePageManagerFactory;
+import org.apache.hyracks.storage.am.common.freepage.LinkedListMetadataManagerFactory;
import org.apache.hyracks.storage.am.common.tuples.TypeAwareTupleWriterFactory;
import org.apache.hyracks.storage.am.lsm.common.api.ILSMIOOperationCallback;
import org.apache.hyracks.storage.am.lsm.common.api.ILSMIOOperationScheduler;
@@ -66,7 +66,7 @@
public class InvertedIndexUtils {
public static InMemoryInvertedIndex createInMemoryBTreeInvertedindex(IBufferCache memBufferCache,
- IFreePageManager virtualFreePageManager, ITypeTraits[] invListTypeTraits,
+ IMetaDataPageManager virtualFreePageManager, ITypeTraits[] invListTypeTraits,
IBinaryComparatorFactory[] invListCmpFactories, ITypeTraits[] tokenTypeTraits,
IBinaryComparatorFactory[] tokenCmpFactories, IBinaryTokenizerFactory tokenizerFactory,
FileReference btreeFileRef) throws BTreeException {
@@ -75,7 +75,7 @@
}
public static InMemoryInvertedIndex createPartitionedInMemoryBTreeInvertedindex(IBufferCache memBufferCache,
- IFreePageManager virtualFreePageManager, ITypeTraits[] invListTypeTraits,
+ IMetaDataPageManager virtualFreePageManager, ITypeTraits[] invListTypeTraits,
IBinaryComparatorFactory[] invListCmpFactories, ITypeTraits[] tokenTypeTraits,
IBinaryComparatorFactory[] tokenCmpFactories, IBinaryTokenizerFactory tokenizerFactory,
FileReference btreeFileRef) throws BTreeException {
@@ -115,7 +115,7 @@
BTreeLeafFrameType.REGULAR_NSM);
ITreeIndexFrameFactory interiorFrameFactory = new BTreeNSMInteriorFrameFactory(tupleWriterFactory);
ITreeIndexMetaDataFrameFactory metaFrameFactory = new LIFOMetaDataFrameFactory();
- LinkedListFreePageManagerFactory freePageManagerFactory = new LinkedListFreePageManagerFactory(diskBufferCache,
+ LinkedListMetadataManagerFactory freePageManagerFactory = new LinkedListMetadataManagerFactory(diskBufferCache,
metaFrameFactory);
BTreeFactory deletedKeysBTreeFactory = new BTreeFactory(diskBufferCache, diskFileMapProvider,
freePageManagerFactory, interiorFrameFactory, leafFrameFactory, invListCmpFactories,
diff --git a/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/AbstractLSMRTree.java b/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/AbstractLSMRTree.java
index 7ec6a6e..f71862b 100644
--- a/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/AbstractLSMRTree.java
+++ b/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/AbstractLSMRTree.java
@@ -28,14 +28,7 @@
import org.apache.hyracks.api.io.FileReference;
import org.apache.hyracks.dataflow.common.data.accessors.ITupleReference;
import org.apache.hyracks.storage.am.btree.impls.BTree;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
-import org.apache.hyracks.storage.am.common.api.IIndexCursor;
-import org.apache.hyracks.storage.am.common.api.IIndexOperationContext;
-import org.apache.hyracks.storage.am.common.api.IModificationOperationCallback;
-import org.apache.hyracks.storage.am.common.api.ISearchPredicate;
-import org.apache.hyracks.storage.am.common.api.ITreeIndex;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
-import org.apache.hyracks.storage.am.common.api.IndexException;
+import org.apache.hyracks.storage.am.common.api.*;
import org.apache.hyracks.storage.am.common.exceptions.TreeIndexDuplicateKeyException;
import org.apache.hyracks.storage.am.common.impls.AbstractSearchPredicate;
import org.apache.hyracks.storage.am.common.impls.NoOpOperationCallback;
@@ -52,7 +45,7 @@
import org.apache.hyracks.storage.am.lsm.common.api.ILSMMergePolicy;
import org.apache.hyracks.storage.am.lsm.common.api.ILSMOperationTracker;
import org.apache.hyracks.storage.am.lsm.common.api.IVirtualBufferCache;
-import org.apache.hyracks.storage.am.lsm.common.freepage.VirtualFreePageManager;
+import org.apache.hyracks.storage.am.lsm.common.freepage.VirtualMetaDataPageManager;
import org.apache.hyracks.storage.am.lsm.common.impls.AbstractLSMIndex;
import org.apache.hyracks.storage.am.lsm.common.impls.BlockingIOOperationCallbackWrapper;
import org.apache.hyracks.storage.am.lsm.common.impls.LSMComponentFileReferences;
@@ -100,11 +93,11 @@
int i = 0;
for (IVirtualBufferCache virtualBufferCache : virtualBufferCaches) {
RTree memRTree = new RTree(virtualBufferCache, virtualBufferCache.getFileMapProvider(),
- new VirtualFreePageManager(virtualBufferCache.getNumPages()), rtreeInteriorFrameFactory,
+ new VirtualMetaDataPageManager(virtualBufferCache.getNumPages()), rtreeInteriorFrameFactory,
rtreeLeafFrameFactory, rtreeCmpFactories, fieldCount, new FileReference(new File(
fileManager.getBaseDir() + "_virtual_r_" + i)));
BTree memBTree = new BTree(virtualBufferCache, virtualBufferCache.getFileMapProvider(),
- new VirtualFreePageManager(virtualBufferCache.getNumPages()), btreeInteriorFrameFactory,
+ new VirtualMetaDataPageManager(virtualBufferCache.getNumPages()), btreeInteriorFrameFactory,
btreeLeafFrameFactory, btreeCmpFactories, btreeCmpFactories.length, new FileReference(new File(
fileManager.getBaseDir() + "_virtual_b_" + i)));
LSMRTreeMemoryComponent mutableComponent = new LSMRTreeMemoryComponent(memRTree, memBTree,
@@ -281,20 +274,17 @@
LSMRTreeDiskComponent component = (LSMRTreeDiskComponent) factory
.createLSMComponentInstance(new LSMComponentFileReferences(insertFileRef, deleteFileRef,
bloomFilterFileRef));
- if (createComponent) {
- component.getRTree().create();
- if (component.getBTree() != null) {
- component.getBTree().create();
- component.getBloomFilter().create();
- }
- }
// Tree will be closed during cleanup of merge().
- component.getRTree().activate();
+ if (!createComponent) {
+ component.getRTree().activate();
+ }
if (component.getBTree() != null) {
- component.getBTree().activate();
+ if(!createComponent){
+ component.getBTree().activate();
+ }
component.getBloomFilter().activate();
}
- if (component.getLSMComponentFilter() != null) {
+ if (component.getLSMComponentFilter() != null && !createComponent) {
filterManager.readFilterInfo(component.getLSMComponentFilter(), component.getRTree());
}
return component;
@@ -315,10 +305,10 @@
}
@Override
- public IFreePageManager getFreePageManager() {
+ public IMetaDataPageManager getMetaManager() {
LSMRTreeMemoryComponent mutableComponent = (LSMRTreeMemoryComponent) memoryComponents
.get(currentMutableComponentId.get());
- return mutableComponent.getRTree().getFreePageManager();
+ return mutableComponent.getRTree().getMetaManager();
}
@Override
diff --git a/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/ExternalRTree.java b/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/ExternalRTree.java
index 7e86ed8..11c42a7 100644
--- a/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/ExternalRTree.java
+++ b/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/ExternalRTree.java
@@ -303,7 +303,7 @@
search(opCtx, btreeCursor, rtreeSearchPred);
BTree btree = mergedComponent.getBTree();
- IIndexBulkLoader btreeBulkLoader = btree.createBulkLoader(1.0f, true, 0L, false);
+ IIndexBulkLoader btreeBulkLoader = btree.createBulkLoader(1.0f, true, 0L, false, true);
long numElements = 0L;
for (int i = 0; i < mergeOp.getMergingComponents().size(); ++i) {
@@ -331,7 +331,7 @@
btreeBulkLoader.end();
}
- IIndexBulkLoader bulkLoader = mergedComponent.getRTree().createBulkLoader(1.0f, false, 0L, false);
+ IIndexBulkLoader bulkLoader = mergedComponent.getRTree().createBulkLoader(1.0f, false, 0L, false, true);
try {
while (cursor.hasNext()) {
cursor.next();
@@ -558,9 +558,9 @@
// Create the three loaders
rtreeBulkLoader = ((LSMRTreeDiskComponent) component).getRTree().createBulkLoader(fillFactor, verifyInput,
- numElementsHint, false);
+ numElementsHint, false, true);
btreeBulkLoader = (BTreeBulkLoader) ((LSMRTreeDiskComponent) component).getBTree().createBulkLoader(
- fillFactor, verifyInput, numElementsHint, false);
+ fillFactor, verifyInput, numElementsHint, false, true);
int maxBucketsPerElement = BloomCalculations.maxBucketsPerElement(numElementsHint);
BloomFilterSpecification bloomFilterSpec = BloomCalculations.computeBloomSpec(maxBucketsPerElement,
bloomFilterFalsePositiveRate);
diff --git a/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/LSMRTree.java b/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/LSMRTree.java
index f5be6c8..40906fb 100644
--- a/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/LSMRTree.java
+++ b/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/LSMRTree.java
@@ -29,6 +29,7 @@
import org.apache.hyracks.api.exceptions.HyracksDataException;
import org.apache.hyracks.data.std.primitive.IntegerPointable;
import org.apache.hyracks.dataflow.common.data.accessors.ITupleReference;
+import org.apache.hyracks.dataflow.common.data.marshalling.IntegerSerializerDeserializer;
import org.apache.hyracks.storage.am.bloomfilter.impls.BloomCalculations;
import org.apache.hyracks.storage.am.bloomfilter.impls.BloomFilter;
import org.apache.hyracks.storage.am.bloomfilter.impls.BloomFilterFactory;
@@ -124,8 +125,6 @@
* Opens LSMRTree, cleaning up invalid files from base dir, and registering
* all valid files as on-disk RTrees and BTrees.
*
- * @param fileReference
- * Dummy file id.
* @throws HyracksDataException
*/
@Override
@@ -163,8 +162,8 @@
RTree rtree = component.getRTree();
BTree btree = component.getBTree();
BloomFilter bloomFilter = component.getBloomFilter();
- rtree.deactivate();
- btree.deactivate();
+ rtree.deactivateCloseHandle();
+ btree.deactivateCloseHandle();
bloomFilter.deactivate();
}
isActivated = false;
@@ -256,12 +255,12 @@
} finally {
rtreeScanCursor.close();
}
+ rTreeTupleSorter.sort();
+
+ rTreeBulkloader = diskRTree.createBulkLoader(1.0f, false, 0L, false, true);
+ cursor = rTreeTupleSorter;
+
if (!isEmpty) {
- rTreeTupleSorter.sort();
-
- rTreeBulkloader = diskRTree.createBulkLoader(1.0f, false, 0L, false);
- cursor = rTreeTupleSorter;
-
try {
while (cursor.hasNext()) {
cursor.next();
@@ -271,9 +270,10 @@
} finally {
cursor.close();
}
- rTreeBulkloader.end();
}
+ rTreeBulkloader.end();
+
ITreeIndexAccessor memBTreeAccessor = flushingComponent.getBTree().createAccessor(
NoOpOperationCallback.INSTANCE, NoOpOperationCallback.INSTANCE);
RangePredicate btreeNullPredicate = new RangePredicate(null, null, true, true, null, null);
@@ -290,32 +290,29 @@
btreeCountingCursor.close();
}
- if (numBTreeTuples > 0) {
- int maxBucketsPerElement = BloomCalculations.maxBucketsPerElement(numBTreeTuples);
- BloomFilterSpecification bloomFilterSpec = BloomCalculations.computeBloomSpec(maxBucketsPerElement,
- bloomFilterFalsePositiveRate);
+ int maxBucketsPerElement = BloomCalculations.maxBucketsPerElement(numBTreeTuples);
+ BloomFilterSpecification bloomFilterSpec = BloomCalculations.computeBloomSpec(maxBucketsPerElement,
+ bloomFilterFalsePositiveRate);
- IIndexCursor btreeScanCursor = memBTreeAccessor.createSearchCursor(false);
- memBTreeAccessor.search(btreeScanCursor, btreeNullPredicate);
- BTree diskBTree = component.getBTree();
+ IIndexCursor btreeScanCursor = memBTreeAccessor.createSearchCursor(false);
+ memBTreeAccessor.search(btreeScanCursor, btreeNullPredicate);
+ BTree diskBTree = component.getBTree();
- // BulkLoad the tuples from the in-memory tree into the new disk BTree.
- IIndexBulkLoader bTreeBulkloader = diskBTree.createBulkLoader(1.0f, false, numBTreeTuples, false);
- IIndexBulkLoader builder = component.getBloomFilter().createBuilder(numBTreeTuples,
- bloomFilterSpec.getNumHashes(), bloomFilterSpec.getNumBucketsPerElements());
- // scan the memory BTree
- try {
- while (btreeScanCursor.hasNext()) {
- btreeScanCursor.next();
- ITupleReference frameTuple = btreeScanCursor.getTuple();
- bTreeBulkloader.add(frameTuple);
- builder.add(frameTuple);
- }
- } finally {
- btreeScanCursor.close();
- builder.end();
+ // BulkLoad the tuples from the in-memory tree into the new disk BTree.
+ IIndexBulkLoader bTreeBulkloader = diskBTree.createBulkLoader(1.0f, false, numBTreeTuples, false, true);
+ IIndexBulkLoader builder = component.getBloomFilter().createBuilder(numBTreeTuples,
+ bloomFilterSpec.getNumHashes(), bloomFilterSpec.getNumBucketsPerElements());
+ // scan the memory BTree
+ try {
+ while (btreeScanCursor.hasNext()) {
+ btreeScanCursor.next();
+ ITupleReference frameTuple = btreeScanCursor.getTuple();
+ bTreeBulkloader.add(frameTuple);
+ builder.add(frameTuple);
}
- bTreeBulkloader.end();
+ } finally {
+ btreeScanCursor.close();
+ builder.end();
}
if (component.getLSMComponentFilter() != null) {
@@ -323,9 +320,12 @@
filterTuples.add(flushingComponent.getLSMComponentFilter().getMinTuple());
filterTuples.add(flushingComponent.getLSMComponentFilter().getMaxTuple());
filterManager.updateFilterInfo(component.getLSMComponentFilter(), filterTuples);
- filterManager.writeFilterInfo(component.getLSMComponentFilter(), component.getRTree());
+ filterManager.writeFilterInfo(component.getLSMComponentFilter(), component.getRTree()
+ );
}
+ bTreeBulkloader.end();
+
return component;
}
@@ -338,9 +338,10 @@
ITreeIndexCursor cursor = new LSMRTreeSortedCursor(rctx, linearizer, buddyBTreeFields);
LSMComponentFileReferences relMergeFileRefs = getMergeTargetFileName(mergingComponents);
ILSMIndexAccessorInternal accessor = new LSMRTreeAccessor(lsmHarness, rctx);
- ioScheduler.scheduleOperation(new LSMRTreeMergeOperation(accessor, mergingComponents, cursor, relMergeFileRefs
- .getInsertIndexFileReference(), relMergeFileRefs.getDeleteIndexFileReference(), relMergeFileRefs
- .getBloomFilterFileReference(), callback, fileManager.getBaseDir()));
+ ioScheduler.scheduleOperation(new LSMRTreeMergeOperation((ILSMIndexAccessorInternal) accessor,
+ mergingComponents, cursor, relMergeFileRefs.getInsertIndexFileReference(), relMergeFileRefs
+ .getDeleteIndexFileReference(), relMergeFileRefs.getBloomFilterFileReference(), callback,
+ fileManager.getBaseDir()));
}
@Override
@@ -357,6 +358,8 @@
// In case we must keep the deleted-keys BTrees, then they must be merged *before* merging the r-trees so that
// lsmHarness.endSearch() is called once when the r-trees have been merged.
+ BTree btree = mergedComponent.getBTree();
+ IIndexBulkLoader btreeBulkLoader = btree.createBulkLoader(1.0f, true, 0L, false, true);
if (mergeOp.getMergingComponents().get(mergeOp.getMergingComponents().size() - 1) != diskComponents
.get(diskComponents.size() - 1)) {
// Keep the deleted tuples since the oldest disk component is not included in the merge operation
@@ -364,9 +367,6 @@
LSMRTreeDeletedKeysBTreeMergeCursor btreeCursor = new LSMRTreeDeletedKeysBTreeMergeCursor(opCtx);
search(opCtx, btreeCursor, rtreeSearchPred);
- BTree btree = mergedComponent.getBTree();
- IIndexBulkLoader btreeBulkLoader = btree.createBulkLoader(1.0f, true, 0L, false);
-
long numElements = 0L;
for (int i = 0; i < mergeOp.getMergingComponents().size(); ++i) {
numElements += ((LSMRTreeDiskComponent) mergeOp.getMergingComponents().get(i)).getBloomFilter()
@@ -390,10 +390,20 @@
btreeCursor.close();
builder.end();
}
- btreeBulkLoader.end();
}
- IIndexBulkLoader bulkLoader = mergedComponent.getRTree().createBulkLoader(1.0f, false, 0L, false);
+ if (mergedComponent.getLSMComponentFilter() != null) {
+ List<ITupleReference> filterTuples = new ArrayList<ITupleReference>();
+ for (int i = 0; i < mergeOp.getMergingComponents().size(); ++i) {
+ filterTuples.add(mergeOp.getMergingComponents().get(i).getLSMComponentFilter().getMinTuple());
+ filterTuples.add(mergeOp.getMergingComponents().get(i).getLSMComponentFilter().getMaxTuple());
+ }
+ filterManager.updateFilterInfo(mergedComponent.getLSMComponentFilter(), filterTuples);
+ filterManager.writeFilterInfo(mergedComponent.getLSMComponentFilter(), mergedComponent.getRTree());
+ }
+ btreeBulkLoader.end();
+
+ IIndexBulkLoader bulkLoader = mergedComponent.getRTree().createBulkLoader(1.0f, false, 0L, false, true);
try {
while (cursor.hasNext()) {
cursor.next();
@@ -405,15 +415,6 @@
}
bulkLoader.end();
- if (mergedComponent.getLSMComponentFilter() != null) {
- List<ITupleReference> filterTuples = new ArrayList<ITupleReference>();
- for (int i = 0; i < mergeOp.getMergingComponents().size(); ++i) {
- filterTuples.add(mergeOp.getMergingComponents().get(i).getLSMComponentFilter().getMinTuple());
- filterTuples.add(mergeOp.getMergingComponents().get(i).getLSMComponentFilter().getMaxTuple());
- }
- filterManager.updateFilterInfo(mergedComponent.getLSMComponentFilter(), filterTuples);
- filterManager.writeFilterInfo(mergedComponent.getLSMComponentFilter(), mergedComponent.getRTree());
- }
return mergedComponent;
}
@@ -520,6 +521,7 @@
public class LSMRTreeBulkLoader implements IIndexBulkLoader {
private final ILSMComponent component;
private final IIndexBulkLoader bulkLoader;
+ private final IIndexBulkLoader buddyBTreeBulkloader;
private boolean cleanedUpArtifacts = false;
private boolean isEmptyComponent = true;
public final PermutingTupleReference indexTuple;
@@ -539,8 +541,9 @@
throw new TreeIndexException(e);
}
bulkLoader = ((LSMRTreeDiskComponent) component).getRTree().createBulkLoader(fillFactor, verifyInput,
- numElementsHint, false);
-
+ numElementsHint, false, true);
+ buddyBTreeBulkloader = ((LSMRTreeDiskComponent)component).getBTree().createBulkLoader(fillFactor,
+ verifyInput,numElementsHint,false,true);
if (filterFields != null) {
indexTuple = new PermutingTupleReference(rtreeFields);
filterCmp = MultiComparator.create(component.getLSMComponentFilter().getFilterCmpFactories());
@@ -581,13 +584,15 @@
@Override
public void end() throws HyracksDataException, IndexException {
if (!cleanedUpArtifacts) {
- bulkLoader.end();
if (component.getLSMComponentFilter() != null) {
filterManager.writeFilterInfo(component.getLSMComponentFilter(),
((LSMRTreeDiskComponent) component).getRTree());
}
+ bulkLoader.end();
+ buddyBTreeBulkloader.end();
+
if (isEmptyComponent) {
cleanupArtifacts();
} else {
@@ -596,6 +601,16 @@
}
}
+ @Override
+ public void abort() throws HyracksDataException {
+ if(bulkLoader != null){
+ bulkLoader.abort();
+ }
+ if(buddyBTreeBulkloader != null){
+ buddyBTreeBulkloader.abort();
+ }
+ }
+
protected void cleanupArtifacts() throws HyracksDataException {
if (!cleanedUpArtifacts) {
cleanedUpArtifacts = true;
@@ -612,16 +627,16 @@
@Override
public void markAsValid(ILSMComponent lsmComponent) throws HyracksDataException {
LSMRTreeDiskComponent component = (LSMRTreeDiskComponent) lsmComponent;
- // Flush the bloom filter first.
- int fileId = component.getBloomFilter().getFileId();
- IBufferCache bufferCache = component.getBTree().getBufferCache();
- int startPage = 0;
- int maxPage = component.getBloomFilter().getNumPages();
- forceFlushDirtyPages(bufferCache, fileId, startPage, maxPage);
- forceFlushDirtyPages(component.getRTree());
- markAsValidInternal(component.getRTree());
- forceFlushDirtyPages(component.getBTree());
- markAsValidInternal(component.getBTree());
+ markAsValidInternal(component.getBTree().getBufferCache(),component.getBloomFilter());
+ markAsValidInternal((component).getBTree());
+ markAsValidInternal((component).getRTree());
+ }
+
+ @Override
+ public IIndexBulkLoader createBulkLoader(float fillFactor, boolean verifyInput, long numElementsHint,
+ boolean checkIfEmptyIndex, boolean appendOnly) throws IndexException {
+ if(!appendOnly) throw new UnsupportedOperationException("LSM indexes don't support in-place modification");
+ return createBulkLoader(fillFactor,verifyInput,numElementsHint,checkIfEmptyIndex);
}
@Override
diff --git a/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/LSMRTreeWithAntiMatterTuples.java b/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/LSMRTreeWithAntiMatterTuples.java
index 069a3b5..2388cff 100644
--- a/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/LSMRTreeWithAntiMatterTuples.java
+++ b/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/LSMRTreeWithAntiMatterTuples.java
@@ -124,7 +124,7 @@
List<ILSMComponent> immutableComponents = diskComponents;
for (ILSMComponent c : immutableComponents) {
RTree rtree = ((LSMRTreeDiskComponent) c).getRTree();
- rtree.deactivate();
+ rtree.deactivateCloseHandle();
}
isActivated = false;
}
@@ -233,7 +233,7 @@
bTreeTupleSorter.sort();
}
- IIndexBulkLoader rTreeBulkloader = diskRTree.createBulkLoader(1.0f, false, 0L, false);
+ IIndexBulkLoader rTreeBulkloader = diskRTree.createBulkLoader(1.0f, false, 0L, false, true);
LSMRTreeWithAntiMatterTuplesFlushCursor cursor = new LSMRTreeWithAntiMatterTuplesFlushCursor(rTreeTupleSorter,
bTreeTupleSorter, comparatorFields, linearizerArray);
cursor.open(null, null);
@@ -249,15 +249,15 @@
cursor.close();
}
- rTreeBulkloader.end();
-
if (component.getLSMComponentFilter() != null) {
List<ITupleReference> filterTuples = new ArrayList<ITupleReference>();
filterTuples.add(flushingComponent.getLSMComponentFilter().getMinTuple());
filterTuples.add(flushingComponent.getLSMComponentFilter().getMaxTuple());
filterManager.updateFilterInfo(component.getLSMComponentFilter(), filterTuples);
- filterManager.writeFilterInfo(component.getLSMComponentFilter(), component.getRTree());
+ filterManager.writeFilterInfo(component.getLSMComponentFilter(), component.getRTree()
+ );
}
+ rTreeBulkloader.end();
return component;
}
@@ -293,7 +293,7 @@
LSMRTreeDiskComponent component = createDiskComponent(componentFactory, mergeOp.getRTreeMergeTarget(), null,
null, true);
RTree mergedRTree = component.getRTree();
- IIndexBulkLoader bulkloader = mergedRTree.createBulkLoader(1.0f, false, 0L, false);
+ IIndexBulkLoader bulkloader = mergedRTree.createBulkLoader(1.0f, false, 0L, false, true);
try {
while (cursor.hasNext()) {
cursor.next();
@@ -303,8 +303,6 @@
} finally {
cursor.close();
}
- bulkloader.end();
-
if (component.getLSMComponentFilter() != null) {
List<ITupleReference> filterTuples = new ArrayList<ITupleReference>();
for (int i = 0; i < mergeOp.getMergingComponents().size(); ++i) {
@@ -312,8 +310,10 @@
filterTuples.add(mergeOp.getMergingComponents().get(i).getLSMComponentFilter().getMaxTuple());
}
filterManager.updateFilterInfo(component.getLSMComponentFilter(), filterTuples);
- filterManager.writeFilterInfo(component.getLSMComponentFilter(), component.getBTree());
+ filterManager.writeFilterInfo(component.getLSMComponentFilter(), component.getBTree()
+ );
}
+ bulkloader.end();
return component;
}
@@ -379,7 +379,7 @@
throw new TreeIndexException(e);
}
bulkLoader = ((LSMRTreeDiskComponent) component).getRTree().createBulkLoader(fillFactor, verifyInput,
- numElementsHint, false);
+ numElementsHint, false, true);
if (filterFields != null) {
indexTuple = new PermutingTupleReference(rtreeFields);
@@ -422,12 +422,12 @@
@Override
public void end() throws HyracksDataException, IndexException {
if (!cleanedUpArtifacts) {
- bulkLoader.end();
if (component.getLSMComponentFilter() != null) {
filterManager.writeFilterInfo(component.getLSMComponentFilter(),
((LSMRTreeDiskComponent) component).getRTree());
}
+ bulkLoader.end();
if (isEmptyComponent) {
cleanupArtifacts();
@@ -437,6 +437,13 @@
}
}
+ @Override
+ public void abort() throws HyracksDataException {
+ if(bulkLoader != null){
+ bulkLoader.abort();
+ }
+ }
+
protected void cleanupArtifacts() throws HyracksDataException {
if (!cleanedUpArtifacts) {
cleanedUpArtifacts = true;
@@ -450,11 +457,18 @@
@Override
public void markAsValid(ILSMComponent lsmComponent) throws HyracksDataException {
RTree rtree = ((LSMRTreeDiskComponent) lsmComponent).getRTree();
- forceFlushDirtyPages(rtree);
markAsValidInternal(rtree);
}
@Override
+ public IIndexBulkLoader createBulkLoader(float fillFactor, boolean verifyInput, long numElementsHint,
+ boolean checkIfEmptyIndex, boolean appendOnly) throws IndexException {
+ if (!appendOnly)
+ throw new UnsupportedOperationException("LSM indexes don't support in-place modification");
+ return createBulkLoader(fillFactor, verifyInput, numElementsHint, checkIfEmptyIndex);
+ }
+
+ @Override
public Set<String> getLSMComponentPhysicalFiles(ILSMComponent lsmComponent) {
Set<String> files = new HashSet<String>();
@@ -463,5 +477,4 @@
return files;
}
-
}
diff --git a/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/RTreeFactory.java b/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/RTreeFactory.java
index c35ccb3..041d8df 100644
--- a/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/RTreeFactory.java
+++ b/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/impls/RTreeFactory.java
@@ -20,8 +20,9 @@
package org.apache.hyracks.storage.am.lsm.rtree.impls;
import org.apache.hyracks.api.dataflow.value.IBinaryComparatorFactory;
+import org.apache.hyracks.api.exceptions.HyracksDataException;
import org.apache.hyracks.api.io.FileReference;
-import org.apache.hyracks.storage.am.common.api.IFreePageManagerFactory;
+import org.apache.hyracks.storage.am.common.api.IMetadataManagerFactory;
import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
import org.apache.hyracks.storage.am.common.api.IndexException;
import org.apache.hyracks.storage.am.lsm.common.impls.TreeIndexFactory;
@@ -32,7 +33,7 @@
public class RTreeFactory extends TreeIndexFactory<RTree> {
public RTreeFactory(IBufferCache bufferCache, IFileMapProvider fileMapProvider,
- IFreePageManagerFactory freePageManagerFactory, ITreeIndexFrameFactory interiorFrameFactory,
+ IMetadataManagerFactory freePageManagerFactory, ITreeIndexFrameFactory interiorFrameFactory,
ITreeIndexFrameFactory leafFrameFactory, IBinaryComparatorFactory[] cmpFactories, int fieldCount) {
super(bufferCache, fileMapProvider, freePageManagerFactory, interiorFrameFactory, leafFrameFactory,
cmpFactories, fieldCount);
@@ -40,8 +41,12 @@
@Override
public RTree createIndexInstance(FileReference file) throws IndexException {
- return new RTree(bufferCache, fileMapProvider, freePageManagerFactory.createFreePageManager(),
- interiorFrameFactory, leafFrameFactory, cmpFactories, fieldCount, file);
+ try {
+ return new RTree(bufferCache, fileMapProvider, freePageManagerFactory.createFreePageManager(),
+ interiorFrameFactory, leafFrameFactory, cmpFactories, fieldCount, file);
+ } catch (HyracksDataException e) {
+ throw new IndexException(e);
+ }
}
}
diff --git a/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/utils/LSMRTreeUtils.java b/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/utils/LSMRTreeUtils.java
index 574dfcd..d34f4c2 100644
--- a/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/utils/LSMRTreeUtils.java
+++ b/hyracks/hyracks-storage-am-lsm-rtree/src/main/java/org/apache/hyracks/storage/am/lsm/rtree/utils/LSMRTreeUtils.java
@@ -36,7 +36,7 @@
import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrameFactory;
import org.apache.hyracks.storage.am.common.api.TreeIndexException;
import org.apache.hyracks.storage.am.common.frames.LIFOMetaDataFrameFactory;
-import org.apache.hyracks.storage.am.common.freepage.LinkedListFreePageManagerFactory;
+import org.apache.hyracks.storage.am.common.freepage.LinkedListMetadataManagerFactory;
import org.apache.hyracks.storage.am.common.tuples.TypeAwareTupleWriterFactory;
import org.apache.hyracks.storage.am.lsm.common.api.ILSMIOOperationCallback;
import org.apache.hyracks.storage.am.lsm.common.api.ILSMIOOperationScheduler;
@@ -97,7 +97,7 @@
ITreeIndexFrameFactory btreeLeafFrameFactory = new BTreeNSMLeafFrameFactory(btreeTupleWriterFactory);
ITreeIndexMetaDataFrameFactory metaFrameFactory = new LIFOMetaDataFrameFactory();
- LinkedListFreePageManagerFactory freePageManagerFactory = new LinkedListFreePageManagerFactory(diskBufferCache,
+ LinkedListMetadataManagerFactory freePageManagerFactory = new LinkedListMetadataManagerFactory(diskBufferCache,
metaFrameFactory);
TreeIndexFactory<RTree> diskRTreeFactory = new RTreeFactory(diskBufferCache, diskFileMapProvider,
@@ -164,7 +164,7 @@
valueProviderFactories, rtreePolicyType);
ITreeIndexMetaDataFrameFactory metaFrameFactory = new LIFOMetaDataFrameFactory();
- LinkedListFreePageManagerFactory freePageManagerFactory = new LinkedListFreePageManagerFactory(diskBufferCache,
+ LinkedListMetadataManagerFactory freePageManagerFactory = new LinkedListMetadataManagerFactory(diskBufferCache,
metaFrameFactory);
TreeIndexFactory<RTree> diskRTreeFactory = new RTreeFactory(diskBufferCache, diskFileMapProvider,
@@ -240,7 +240,7 @@
ITreeIndexFrameFactory btreeLeafFrameFactory = new BTreeNSMLeafFrameFactory(btreeTupleWriterFactory);
ITreeIndexMetaDataFrameFactory metaFrameFactory = new LIFOMetaDataFrameFactory();
- LinkedListFreePageManagerFactory freePageManagerFactory = new LinkedListFreePageManagerFactory(diskBufferCache,
+ LinkedListMetadataManagerFactory freePageManagerFactory = new LinkedListMetadataManagerFactory(diskBufferCache,
metaFrameFactory);
TreeIndexFactory<RTree> diskRTreeFactory = new RTreeFactory(diskBufferCache, diskFileMapProvider,
diff --git a/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RStarTreePolicy.java b/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RStarTreePolicy.java
index 7b357f0..c5159e1 100644
--- a/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RStarTreePolicy.java
+++ b/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RStarTreePolicy.java
@@ -225,12 +225,12 @@
splitKey.initData(splitKeySize);
leftRTreeFrame.adjustMBR();
- rTreeTupleWriterleftRTreeFrame.writeTupleFields(leftRTreeFrame.getTuples(), 0,
+ rTreeTupleWriterleftRTreeFrame.writeTupleFields(leftRTreeFrame.getMBRTuples(), 0,
rTreeSplitKey.getLeftPageBuffer(), 0);
rTreeSplitKey.getLeftTuple().resetByTupleOffset(rTreeSplitKey.getLeftPageBuffer(), 0);
((IRTreeFrame) rightFrame).adjustMBR();
- rTreeTupleWriterRightFrame.writeTupleFields(((RTreeNSMFrame) rightFrame).getTuples(), 0,
+ rTreeTupleWriterRightFrame.writeTupleFields(((RTreeNSMFrame) rightFrame).getMBRTuples(), 0,
rTreeSplitKey.getRightPageBuffer(), 0);
rTreeSplitKey.getRightTuple().resetByTupleOffset(rTreeSplitKey.getRightPageBuffer(), 0);
diff --git a/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RTreeNSMFrame.java b/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RTreeNSMFrame.java
index 37ba2a5..2331f3b 100644
--- a/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RTreeNSMFrame.java
+++ b/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RTreeNSMFrame.java
@@ -36,7 +36,7 @@
protected static final int pageNsnOff = smFlagOff + 1;
protected static final int rightPageOff = pageNsnOff + 8;
- protected ITreeIndexTupleReference[] tuples;
+ protected ITreeIndexTupleReference[] mbrTuples;
protected ITreeIndexTupleReference cmpFrameTuple;
private static final double doubleEpsilon = computeDoubleEpsilon();
@@ -47,9 +47,9 @@
public RTreeNSMFrame(ITreeIndexTupleWriter tupleWriter, IPrimitiveValueProvider[] keyValueProviders,
RTreePolicyType rtreePolicyType) {
super(tupleWriter, new UnorderedSlotManager());
- this.tuples = new ITreeIndexTupleReference[keyValueProviders.length];
+ this.mbrTuples = new ITreeIndexTupleReference[keyValueProviders.length];
for (int i = 0; i < keyValueProviders.length; i++) {
- this.tuples[i] = tupleWriter.createTupleReference();
+ this.mbrTuples[i] = tupleWriter.createTupleReference();
}
cmpFrameTuple = tupleWriter.createTupleReference();
this.keyValueProviders = keyValueProviders;
@@ -111,8 +111,8 @@
buf.putInt(rightPageOff, rightPage);
}
- public ITreeIndexTupleReference[] getTuples() {
- return tuples;
+ public ITreeIndexTupleReference[] getMBRTuples() {
+ return mbrTuples;
}
@Override
@@ -123,7 +123,7 @@
abstract public int getTupleSize(ITupleReference tuple);
- public void adjustMBRImpl(ITreeIndexTupleReference[] tuples) {
+ protected void calculateMBRImpl(ITreeIndexTupleReference[] tuples) {
int maxFieldPos = keyValueProviders.length / 2;
for (int i = 1; i < getTupleCount(); i++) {
frameTuple.resetByTupleIndex(this, i);
@@ -145,12 +145,12 @@
@Override
public void adjustMBR() {
- for (int i = 0; i < tuples.length; i++) {
- tuples[i].setFieldCount(getFieldCount());
- tuples[i].resetByTupleIndex(this, 0);
+ for (int i = 0; i < mbrTuples.length; i++) {
+ mbrTuples[i].setFieldCount(getFieldCount());
+ mbrTuples[i].resetByTupleIndex(this, 0);
}
- adjustMBRImpl(tuples);
+ calculateMBRImpl(mbrTuples);
}
public abstract int getFieldCount();
diff --git a/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RTreeNSMInteriorFrame.java b/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RTreeNSMInteriorFrame.java
index 3b71be8..5c2e95e 100644
--- a/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RTreeNSMInteriorFrame.java
+++ b/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RTreeNSMInteriorFrame.java
@@ -32,6 +32,7 @@
import org.apache.hyracks.storage.am.common.api.ITreeIndexTupleReference;
import org.apache.hyracks.storage.am.common.api.ITreeIndexTupleWriter;
import org.apache.hyracks.storage.am.common.api.TreeIndexException;
+import org.apache.hyracks.storage.am.common.frames.AbstractSlotManager;
import org.apache.hyracks.storage.am.common.frames.FrameOpSpaceStatus;
import org.apache.hyracks.storage.am.common.ophelpers.MultiComparator;
import org.apache.hyracks.storage.am.common.ophelpers.SlotOffTupleOff;
@@ -40,7 +41,7 @@
public class RTreeNSMInteriorFrame extends RTreeNSMFrame implements IRTreeInteriorFrame {
- private static final int childPtrSize = 4;
+ public static final int childPtrSize = 4;
private IBinaryComparator childPtrCmp = PointableBinaryComparatorFactory.of(IntegerPointable.FACTORY)
.createBinaryComparator();
private final int keyFieldCount;
@@ -53,7 +54,7 @@
}
@Override
- public int getBytesRequriedToWriteTuple(ITupleReference tuple) {
+ public int getBytesRequiredToWriteTuple(ITupleReference tuple) {
return tupleWriter.bytesRequired(tuple) + childPtrSize + slotManager.getSlotSize();
}
@@ -184,6 +185,16 @@
return FrameOpSpaceStatus.INSUFFICIENT_SPACE;
}
+ public FrameOpSpaceStatus hasSpaceInsert(int bytesRequired) {
+ if (bytesRequired + slotManager.getSlotSize() <= buf.capacity() - buf.getInt(freeSpaceOff)
+ - (buf.getInt(tupleCountOff) * slotManager.getSlotSize()))
+ return FrameOpSpaceStatus.SUFFICIENT_CONTIGUOUS_SPACE;
+ else if (bytesRequired + slotManager.getSlotSize() <= buf.getInt(totalFreeSpaceOff))
+ return FrameOpSpaceStatus.SUFFICIENT_SPACE;
+ else
+ return FrameOpSpaceStatus.INSUFFICIENT_SPACE;
+ }
+
@Override
public void adjustKey(ITupleReference tuple, int tupleIndex, MultiComparator cmp) throws TreeIndexException {
frameTuple.setFieldCount(cmp.getKeyFieldCount());
@@ -221,7 +232,7 @@
@Override
public void insert(ITupleReference tuple, int tupleIndex) {
frameTuple.setFieldCount(tuple.getFieldCount());
- slotManager.insertSlot(-1, buf.getInt(freeSpaceOff));
+ slotManager.insertSlot(AbstractSlotManager.GREATEST_KEY_INDICATOR, buf.getInt(freeSpaceOff));
int freeSpace = buf.getInt(freeSpaceOff);
int bytesWritten = tupleWriter.writeTupleFields(tuple, 0, tuple.getFieldCount(), buf.array(), freeSpace);
System.arraycopy(tuple.getFieldData(tuple.getFieldCount() - 1), getChildPointerOff(tuple), buf.array(),
diff --git a/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RTreeNSMLeafFrame.java b/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RTreeNSMLeafFrame.java
index 8faf5a2..591ce67 100644
--- a/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RTreeNSMLeafFrame.java
+++ b/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RTreeNSMLeafFrame.java
@@ -35,7 +35,7 @@
}
@Override
- public int getBytesRequriedToWriteTuple(ITupleReference tuple) {
+ public int getBytesRequiredToWriteTuple(ITupleReference tuple) {
return tupleWriter.bytesRequired(tuple) + slotManager.getSlotSize();
}
diff --git a/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RTreePolicy.java b/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RTreePolicy.java
index 2bcaa77..1c950c6 100644
--- a/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RTreePolicy.java
+++ b/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/frames/RTreePolicy.java
@@ -196,11 +196,11 @@
splitKey.initData(splitKeySize);
leftRTreeFrame.adjustMBR();
- rTreeTupleWriterLeftFrame.writeTupleFields(leftRTreeFrame.getTuples(), 0, rTreeSplitKey.getLeftPageBuffer(), 0);
+ rTreeTupleWriterLeftFrame.writeTupleFields(leftRTreeFrame.getMBRTuples(), 0, rTreeSplitKey.getLeftPageBuffer(), 0);
rTreeSplitKey.getLeftTuple().resetByTupleOffset(rTreeSplitKey.getLeftPageBuffer(), 0);
((IRTreeFrame) rightFrame).adjustMBR();
- rTreeTupleWriterRightFrame.writeTupleFields(((RTreeNSMFrame) rightFrame).getTuples(), 0,
+ rTreeTupleWriterRightFrame.writeTupleFields(((RTreeNSMFrame) rightFrame).getMBRTuples(), 0,
rTreeSplitKey.getRightPageBuffer(), 0);
rTreeSplitKey.getRightTuple().resetByTupleOffset(rTreeSplitKey.getRightPageBuffer(), 0);
}
diff --git a/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/impls/RTree.java b/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/impls/RTree.java
index 2cb72cc..ada54d2 100644
--- a/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/impls/RTree.java
+++ b/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/impls/RTree.java
@@ -21,6 +21,7 @@
import java.nio.ByteBuffer;
import java.util.ArrayList;
+import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hyracks.api.dataflow.value.IBinaryComparatorFactory;
@@ -28,20 +29,9 @@
import org.apache.hyracks.api.exceptions.HyracksDataException;
import org.apache.hyracks.api.io.FileReference;
import org.apache.hyracks.dataflow.common.data.accessors.ITupleReference;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
-import org.apache.hyracks.storage.am.common.api.IIndexBulkLoader;
-import org.apache.hyracks.storage.am.common.api.IIndexCursor;
-import org.apache.hyracks.storage.am.common.api.IIndexOperationContext;
-import org.apache.hyracks.storage.am.common.api.IModificationOperationCallback;
-import org.apache.hyracks.storage.am.common.api.ISearchOperationCallback;
-import org.apache.hyracks.storage.am.common.api.ISearchPredicate;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexAccessor;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexCursor;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexFrame;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexTupleReference;
-import org.apache.hyracks.storage.am.common.api.IndexException;
-import org.apache.hyracks.storage.am.common.api.TreeIndexException;
+import org.apache.hyracks.storage.am.common.api.*;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
+import org.apache.hyracks.storage.am.common.frames.AbstractSlotManager;
import org.apache.hyracks.storage.am.common.frames.FrameOpSpaceStatus;
import org.apache.hyracks.storage.am.common.impls.AbstractTreeIndex;
import org.apache.hyracks.storage.am.common.impls.NodeFrontier;
@@ -55,6 +45,7 @@
import org.apache.hyracks.storage.am.rtree.frames.RTreeNSMFrame;
import org.apache.hyracks.storage.am.rtree.frames.RTreeNSMInteriorFrame;
import org.apache.hyracks.storage.am.rtree.tuples.RTreeTypeAwareTupleWriter;
+import org.apache.hyracks.storage.common.buffercache.BufferCache;
import org.apache.hyracks.storage.common.buffercache.IBufferCache;
import org.apache.hyracks.storage.common.buffercache.ICachedPage;
import org.apache.hyracks.storage.common.file.BufferedFileHandle;
@@ -67,7 +58,7 @@
private final int maxTupleSize;
- public RTree(IBufferCache bufferCache, IFileMapProvider fileMapProvider, IFreePageManager freePageManager,
+ public RTree(IBufferCache bufferCache, IFileMapProvider fileMapProvider, IMetaDataPageManager freePageManager,
ITreeIndexFrameFactory interiorFrameFactory, ITreeIndexFrameFactory leafFrameFactory,
IBinaryComparatorFactory[] cmpFactories, int fieldCount, FileReference file) {
super(bufferCache, fileMapProvider, freePageManager, interiorFrameFactory, leafFrameFactory, cmpFactories,
@@ -157,8 +148,8 @@
private void insert(ITupleReference tuple, IIndexOperationContext ictx) throws HyracksDataException,
TreeIndexException {
RTreeOpContext ctx = (RTreeOpContext) ictx;
- int tupleSize = Math.max(ctx.leafFrame.getBytesRequriedToWriteTuple(tuple),
- ctx.interiorFrame.getBytesRequriedToWriteTuple(tuple));
+ int tupleSize = Math.max(ctx.leafFrame.getBytesRequiredToWriteTuple(tuple),
+ ctx.interiorFrame.getBytesRequiredToWriteTuple(tuple));
if (tupleSize > maxTupleSize) {
throw new TreeIndexException("Record size (" + tupleSize + ") larger than maximum acceptable record size ("
+ maxTupleSize + ")");
@@ -774,7 +765,7 @@
MultiComparator cmp = MultiComparator.create(cmpFactories);
SearchPredicate searchPred = new SearchPredicate(null, cmp);
- int currentPageId = rootPage;
+ int currentPageId = bulkloadLeafStart;
int maxPageId = freePageManager.getMaxPage(ctx.metaFrame);
ICachedPage page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, currentPageId), false);
@@ -867,8 +858,14 @@
public IIndexBulkLoader createBulkLoader(float fillFactor, boolean verifyInput, long numElementsHint,
boolean checkIfEmptyIndex) throws TreeIndexException {
// TODO: verifyInput currently does nothing.
+ return createBulkLoader(fillFactor, verifyInput, numElementsHint, checkIfEmptyIndex, false);
+ }
+
+ public IIndexBulkLoader createBulkLoader(float fillFactor, boolean verifyInput, long numElementsHint,
+ boolean checkIfEmptyIndex, boolean appendOnly) throws TreeIndexException {
+ // TODO: verifyInput currently does nothing.
try {
- return new RTreeBulkLoader(fillFactor);
+ return new RTreeBulkLoader(fillFactor, appendOnly);
} catch (HyracksDataException e) {
throw new TreeIndexException(e);
}
@@ -879,17 +876,19 @@
RTreeTypeAwareTupleWriter tupleWriter = ((RTreeTypeAwareTupleWriter) interiorFrame.getTupleWriter());
ITreeIndexTupleReference mbrTuple = interiorFrame.createTupleReference();
ByteBuffer mbr;
+ List<Integer> prevNodeFrontierPages = new ArrayList<Integer>();
+ List<ICachedPage> pagesToWrite = new ArrayList<ICachedPage>();
- public RTreeBulkLoader(float fillFactor) throws TreeIndexException, HyracksDataException {
- super(fillFactor);
+ public RTreeBulkLoader(float fillFactor, boolean appendOnly) throws TreeIndexException, HyracksDataException {
+ super(fillFactor, appendOnly);
prevInteriorFrame = interiorFrameFactory.createFrame();
}
@Override
public void add(ITupleReference tuple) throws IndexException, HyracksDataException {
try {
- int tupleSize = Math.max(leafFrame.getBytesRequriedToWriteTuple(tuple),
- interiorFrame.getBytesRequriedToWriteTuple(tuple));
+ int tupleSize = Math.max(leafFrame.getBytesRequiredToWriteTuple(tuple),
+ interiorFrame.getBytesRequiredToWriteTuple(tuple));
if (tupleSize > maxTupleSize) {
throw new TreeIndexException("Space required for record (" + tupleSize
+ ") larger than maximum acceptable size (" + maxTupleSize + ")");
@@ -907,22 +906,29 @@
}
if (spaceUsed + spaceNeeded > leafMaxBytes) {
- propagateBulk(1, false);
+
+ if (prevNodeFrontierPages.size() == 0) {
+ prevNodeFrontierPages.add(leafFrontier.pageId);
+ } else {
+ prevNodeFrontierPages.set(0, leafFrontier.pageId);
+ }
+ pagesToWrite.clear();
+ propagateBulk(1, false, pagesToWrite);
leafFrontier.pageId = freePageManager.getFreePage(metaFrame);
-
- leafFrontier.page.releaseWriteLatch(true);
- bufferCache.unpin(leafFrontier.page);
-
- leafFrontier.page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, leafFrontier.pageId),
- true);
- leafFrontier.page.acquireWriteLatch();
+ queue.put(leafFrontier.page);
+ for (ICachedPage c : pagesToWrite) {
+ queue.put(c);
+ }
+ leafFrontier.page = bufferCache.confiscatePage(BufferedFileHandle.getDiskPageId(fileId,
+ leafFrontier.pageId));
leafFrame.setPage(leafFrontier.page);
leafFrame.initBuffer((byte) 0);
+
}
leafFrame.setPage(leafFrontier.page);
- leafFrame.insert(tuple, -1);
+ leafFrame.insert(tuple, AbstractSlotManager.GREATEST_KEY_INDICATOR);
} catch (HyracksDataException e) {
handleException();
throw e;
@@ -934,12 +940,61 @@
}
public void end() throws HyracksDataException {
- propagateBulk(1, true);
+ pagesToWrite.clear();
+ //if writing a trivial 1-page tree, don't try and propagate up
+ if (nodeFrontiers.size() > 1) {
+ propagateBulk(1, true, pagesToWrite);
+ }
+ for (ICachedPage c : pagesToWrite) {
+ queue.put(c);
+ }
+ finish();
super.end();
}
- protected void propagateBulk(int level, boolean toRoot) throws HyracksDataException {
+ @Override
+ public void abort() throws HyracksDataException {
+ super.handleException();
+ }
+
+ protected void finish() throws HyracksDataException {
+ int prevPageId = -1;
+ //here we assign physical identifiers to everything we can
+ for (NodeFrontier n : nodeFrontiers) {
+ //not a leaf
+ if (nodeFrontiers.indexOf(n) != 0) {
+ interiorFrame.setPage(n.page);
+ mbrTuple.resetByTupleOffset(mbr, 0);
+ interiorFrame.insert(mbrTuple, -1);
+ interiorFrame.getBuffer().putInt(
+ interiorFrame.getTupleOffset(interiorFrame.getTupleCount() - 1) + mbrTuple.getTupleSize(),
+ prevPageId);
+
+ int finalPageId = freePageManager.getFreePage(metaFrame);
+ n.pageId = finalPageId;
+ bufferCache.setPageDiskId(n.page, BufferedFileHandle.getDiskPageId(fileId, finalPageId));
+ //else we are looking at a leaf
+ }
+ //set next guide MBR
+ //if propagateBulk didnt have to do anything this may be un-necessary
+ if (nodeFrontiers.size() > 1 && nodeFrontiers.indexOf(n) < nodeFrontiers.size()-1) {
+ lowerFrame.setPage(n.page);
+ ((RTreeNSMFrame) lowerFrame).adjustMBR();
+ tupleWriter.writeTupleFields(((RTreeNSMFrame) lowerFrame).getMBRTuples(), 0, mbr, 0);
+ }
+ queue.put(n.page);
+ n.page = null;
+ prevPageId = n.pageId;
+ }
+ if (appendOnly) {
+ rootPage = nodeFrontiers.get(nodeFrontiers.size() - 1).pageId;
+ }
+ releasedLatches = true;
+ }
+
+ protected void propagateBulk(int level, boolean toRoot, List<ICachedPage> pagesToWrite)
+ throws HyracksDataException {
boolean propagated = false;
if (level == 1)
@@ -951,47 +1006,66 @@
if (level >= nodeFrontiers.size())
addLevel();
+ //adjust the tuple pointers of the lower frame to allow us to calculate our MBR
+ //if this is a leaf, then there is only one tuple, so this is trivial
((RTreeNSMFrame) lowerFrame).adjustMBR();
if (mbr == null) {
- int bytesRequired = tupleWriter.bytesRequired(((RTreeNSMFrame) lowerFrame).getTuples()[0], 0,
+ int bytesRequired = tupleWriter.bytesRequired(((RTreeNSMFrame) lowerFrame).getMBRTuples()[0], 0,
cmp.getKeyFieldCount())
+ ((RTreeNSMInteriorFrame) interiorFrame).getChildPointerSize();
mbr = ByteBuffer.allocate(bytesRequired);
}
- tupleWriter.writeTupleFields(((RTreeNSMFrame) lowerFrame).getTuples(), 0, mbr, 0);
+ tupleWriter.writeTupleFields(((RTreeNSMFrame) lowerFrame).getMBRTuples(), 0, mbr, 0);
mbrTuple.resetByTupleOffset(mbr, 0);
NodeFrontier frontier = nodeFrontiers.get(level);
interiorFrame.setPage(frontier.page);
+ //see if we have space for two tuples. this works around a tricky boundary condition with sequential bulk load where
+ //finalization can possibly lead to a split
+ //TODO: accomplish this without wasting 1 tuple
+ int sizeOfTwoTuples = 2 * (mbrTuple.getTupleSize() + RTreeNSMInteriorFrame.childPtrSize);
+ FrameOpSpaceStatus spaceForTwoTuples = (((RTreeNSMInteriorFrame) interiorFrame)
+ .hasSpaceInsert(sizeOfTwoTuples));
+ if (spaceForTwoTuples != FrameOpSpaceStatus.SUFFICIENT_CONTIGUOUS_SPACE && !toRoot) {
- interiorFrame.insert(mbrTuple, -1);
+ int finalPageId = freePageManager.getFreePage(metaFrame);
+ if (prevNodeFrontierPages.size() <= level) {
+ prevNodeFrontierPages.add(finalPageId);
+ } else {
+ prevNodeFrontierPages.set(level, finalPageId);
+ }
+ bufferCache.setPageDiskId(frontier.page, BufferedFileHandle.getDiskPageId(fileId, finalPageId));
+ pagesToWrite.add(frontier.page);
- interiorFrame.getBuffer().putInt(
- interiorFrame.getTupleOffset(interiorFrame.getTupleCount() - 1) + mbrTuple.getTupleSize(),
- nodeFrontiers.get(level - 1).pageId);
-
- if (interiorFrame.hasSpaceInsert(mbrTuple) != FrameOpSpaceStatus.SUFFICIENT_CONTIGUOUS_SPACE && !toRoot) {
lowerFrame = prevInteriorFrame;
lowerFrame.setPage(frontier.page);
- propagateBulk(level + 1, toRoot);
- propagated = true;
-
- frontier.page.releaseWriteLatch(true);
- bufferCache.unpin(frontier.page);
- frontier.pageId = freePageManager.getFreePage(metaFrame);
-
- frontier.page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, frontier.pageId), true);
- frontier.page.acquireWriteLatch();
+ frontier.page = bufferCache.confiscatePage(BufferCache.INVALID_DPID);
interiorFrame.setPage(frontier.page);
interiorFrame.initBuffer((byte) level);
+
+ interiorFrame.insert(mbrTuple, AbstractSlotManager.GREATEST_KEY_INDICATOR);
+
+ interiorFrame.getBuffer().putInt(
+ interiorFrame.getTupleOffset(interiorFrame.getTupleCount() - 1) + mbrTuple.getTupleSize(),
+ prevNodeFrontierPages.get(level - 1));
+
+ propagateBulk(level + 1, toRoot, pagesToWrite);
+ } else if (interiorFrame.hasSpaceInsert(mbrTuple) == FrameOpSpaceStatus.SUFFICIENT_CONTIGUOUS_SPACE
+ && !toRoot) {
+
+ interiorFrame.insert(mbrTuple, -1);
+
+ interiorFrame.getBuffer().putInt(
+ interiorFrame.getTupleOffset(interiorFrame.getTupleCount() - 1) + mbrTuple.getTupleSize(),
+ prevNodeFrontierPages.get(level - 1));
}
- if (toRoot && !propagated && level < nodeFrontiers.size() - 1) {
+ if (toRoot && level < nodeFrontiers.size() - 1) {
lowerFrame = prevInteriorFrame;
lowerFrame.setPage(frontier.page);
- propagateBulk(level + 1, true);
+ propagateBulk(level + 1, true, pagesToWrite);
}
leafFrame.setPage(nodeFrontiers.get(0).page);
diff --git a/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/impls/RTreeSearchCursor.java b/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/impls/RTreeSearchCursor.java
index 729e7e0..5d8ce2e 100644
--- a/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/impls/RTreeSearchCursor.java
+++ b/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/impls/RTreeSearchCursor.java
@@ -104,6 +104,8 @@
int pageId = pathList.getLastPageId();
long parentLsn = pathList.getLastPageLsn();
pathList.moveLast();
+ if(pageId <0) throw new IllegalStateException();
+ if(fileId<0) throw new IllegalStateException();
ICachedPage node = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, pageId), false);
node.acquireReadLatch();
readLatched = true;
diff --git a/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/util/RTreeUtils.java b/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/util/RTreeUtils.java
index 0e3eb80..234aa26 100644
--- a/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/util/RTreeUtils.java
+++ b/hyracks/hyracks-storage-am-rtree/src/main/java/org/apache/hyracks/storage/am/rtree/util/RTreeUtils.java
@@ -25,13 +25,13 @@
import org.apache.hyracks.api.io.FileReference;
import org.apache.hyracks.data.std.api.IPointableFactory;
import org.apache.hyracks.dataflow.common.data.accessors.ITupleReference;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
import org.apache.hyracks.storage.am.common.api.IPrimitiveValueProviderFactory;
import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrameFactory;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
import org.apache.hyracks.storage.am.common.data.PointablePrimitiveValueProviderFactory;
import org.apache.hyracks.storage.am.common.frames.LIFOMetaDataFrameFactory;
-import org.apache.hyracks.storage.am.common.freepage.LinkedListFreePageManager;
+import org.apache.hyracks.storage.am.common.freepage.LinkedMetaDataPageManager;
import org.apache.hyracks.storage.am.common.ophelpers.MultiComparator;
import org.apache.hyracks.storage.am.rtree.frames.RTreeNSMInteriorFrameFactory;
import org.apache.hyracks.storage.am.rtree.frames.RTreeNSMLeafFrameFactory;
@@ -54,7 +54,7 @@
valueProviderFactories, rtreePolicyType);
ITreeIndexMetaDataFrameFactory metaFrameFactory = new LIFOMetaDataFrameFactory();
- IFreePageManager freePageManager = new LinkedListFreePageManager(bufferCache, 0, metaFrameFactory);
+ IMetaDataPageManager freePageManager = new LinkedMetaDataPageManager(bufferCache, metaFrameFactory);
RTree rtree = new RTree(bufferCache, fileMapProvider, freePageManager, interiorFrameFactory, leafFrameFactory,
cmpFactories, typeTraits.length, file);
return rtree;
diff --git a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/AsyncFIFOPageQueueManager.java b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/AsyncFIFOPageQueueManager.java
new file mode 100644
index 0000000..08f467e
--- /dev/null
+++ b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/AsyncFIFOPageQueueManager.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hyracks.storage.common.buffercache;
+
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.hyracks.api.exceptions.HyracksDataException;
+import org.apache.hyracks.storage.common.file.BufferedFileHandle;
+
+public class AsyncFIFOPageQueueManager implements Runnable {
+ private final static boolean DEBUG = false;
+
+ protected LinkedBlockingQueue<ICachedPage> queue = new LinkedBlockingQueue<ICachedPage>();
+ volatile Thread writerThread;
+ protected AtomicBoolean poisoned = new AtomicBoolean(false);
+ protected BufferCache bufferCache;
+ volatile protected PageQueue pageQueue;
+
+ public AsyncFIFOPageQueueManager(BufferCache bufferCache){
+ this.bufferCache = bufferCache;
+ }
+
+ protected class PageQueue implements IFIFOPageQueue {
+ final IBufferCache bufferCache;
+ public final IFIFOPageWriter writer;
+
+ protected PageQueue(IBufferCache bufferCache, IFIFOPageWriter writer) {
+ if(DEBUG) System.out.println("[FIFO] New Queue");
+ this.bufferCache = bufferCache;
+ this.writer = writer;
+ }
+
+ protected IBufferCache getBufferCache() {
+ return bufferCache;
+ }
+
+ protected IFIFOPageWriter getWriter() {
+ return writer;
+ }
+
+ @Override
+ public void put(ICachedPage page) throws HyracksDataException {
+ try {
+ if(!poisoned.get()) {
+ queue.put(page);
+ }
+ else{
+ throw new HyracksDataException("Queue is closing");
+ }
+ } catch (InterruptedException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ }
+
+
+ public PageQueue createQueue(IFIFOPageWriter writer) {
+ if (pageQueue == null) {
+ synchronized(this){
+ if (pageQueue == null) {
+ writerThread = new Thread(this);
+ writerThread.setName("FIFO Writer Thread");
+ writerThread.start();
+ pageQueue = new PageQueue(bufferCache,writer);
+ }
+ }
+ }
+ return pageQueue;
+ }
+
+ public void destroyQueue(){
+ poisoned.set(true);
+ //Dummy cached page to act as poison pill
+ CachedPage poisonPill = new CachedPage();
+ poisonPill.setQueueInfo(new QueueInfo(true,true));
+ if(writerThread == null){
+ synchronized (this){
+ if(writerThread == null) {
+ return;
+ }
+ }
+ }
+
+ try{
+ synchronized(poisonPill){
+ queue.put(poisonPill);
+ while(queue.contains(poisonPill)){
+ poisonPill.wait();
+ }
+ }
+ } catch (InterruptedException e){
+ e.printStackTrace();
+ }
+ }
+
+ public void finishQueue() {
+ if(DEBUG) System.out.println("[FIFO] Finishing Queue");
+ try {
+ //Dummy cached page to act as low water mark
+ CachedPage lowWater = new CachedPage();
+ lowWater.setQueueInfo(new QueueInfo(true,false));
+ synchronized(lowWater){
+ queue.put(lowWater);
+ while(queue.contains(lowWater)){
+ lowWater.wait();
+ }
+ }
+ } catch (InterruptedException e) {
+ // TODO what do we do here?
+ e.printStackTrace();
+ }
+ if(DEBUG) System.out.println("[FIFO] Queue finished");
+ }
+
+ @Override
+ public void run() {
+ if(DEBUG) System.out.println("[FIFO] Writer started");
+ boolean die = false;
+ while (!die) {
+ try {
+ ICachedPage entry = queue.take();
+ if(entry.getQueueInfo() != null && entry.getQueueInfo().hasWaiters()){
+ synchronized(entry) {
+ if(entry.getQueueInfo().isPoison()) { die = true; }
+ entry.notifyAll();
+ continue;
+ }
+ }
+
+ if(DEBUG) System.out.println("[FIFO] Write " + BufferedFileHandle.getFileId(((CachedPage)entry).dpid)+","
+ + BufferedFileHandle.getPageId(((CachedPage)entry).dpid));
+
+ try {
+ pageQueue.getWriter().write(entry, bufferCache);
+ } catch (HyracksDataException e) {
+ //TODO: What do we do, if we could not write the page?
+ e.printStackTrace();
+ }
+ } catch(InterruptedException e) {
+ continue;
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/BufferCache.java b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/BufferCache.java
index 3892e0a..3a23c8d 100644
--- a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/BufferCache.java
+++ b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/BufferCache.java
@@ -26,6 +26,7 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
@@ -49,10 +50,11 @@
private static final int MIN_CLEANED_COUNT_DIFF = 3;
private static final int PIN_MAX_WAIT_TIME = 50;
+ public static final boolean DEBUG = false;
private final int pageSize;
private final int maxOpenFiles;
- private final IIOManager ioManager;
+ final IIOManager ioManager;
private final CacheBucket[] pageMap;
private final IPageReplacementStrategy pageReplacementStrategy;
private final IPageCleanerPolicy pageCleanerPolicy;
@@ -60,8 +62,16 @@
private final CleanerThread cleanerThread;
private final Map<Integer, BufferedFileHandle> fileInfoMap;
private final Set<Integer> virtualFiles;
+ private final AsyncFIFOPageQueueManager fifoWriter;
+ //DEBUG
+ private ArrayList<CachedPage> confiscatedPages;
+ private Lock confiscateLock;
+ private HashMap<CachedPage, StackTraceElement[]> confiscatedPagesOwner;
+ private ConcurrentHashMap<CachedPage, StackTraceElement[]> pinnedPageOwner;
+ //!DEBUG
private IIOReplicationManager ioReplicationManager;
- private List<ICachedPageInternal> cachedPages = new ArrayList<ICachedPageInternal>();
+ public List<ICachedPageInternal> cachedPages = new ArrayList<ICachedPageInternal>();
+
private boolean closed;
public BufferCache(IIOManager ioManager, IPageReplacementStrategy pageReplacementStrategy,
@@ -85,6 +95,14 @@
cleanerThread = new CleanerThread();
executor.execute(cleanerThread);
closed = false;
+
+ fifoWriter = new AsyncFIFOPageQueueManager(this);
+ if( DEBUG ) {
+ confiscatedPages = new ArrayList<CachedPage>();
+ confiscatedPagesOwner = new HashMap<CachedPage, StackTraceElement[]>();
+ confiscateLock = new ReentrantLock();
+ pinnedPageOwner = new ConcurrentHashMap<>();
+ }
}
//this constructor is used when replication is enabled to pass the IIOReplicationManager
@@ -126,8 +144,11 @@
@Override
public ICachedPage tryPin(long dpid) throws HyracksDataException {
- // Calling the pinSanityCheck should be used only for debugging, since the synchronized block over the fileInfoMap is a hot spot.
- //pinSanityCheck(dpid);
+ // Calling the pinSanityCheck should be used only for debugging, since
+ // the synchronized block over the fileInfoMap is a hot spot.
+ if (DEBUG) {
+ pinSanityCheck(dpid);
+ }
CachedPage cPage = null;
int hash = hash(dpid);
CacheBucket bucket = pageMap[hash];
@@ -150,10 +171,27 @@
@Override
public ICachedPage pin(long dpid, boolean newPage) throws HyracksDataException {
- // Calling the pinSanityCheck should be used only for debugging, since the synchronized block over the fileInfoMap is a hot spot.
- //pinSanityCheck(dpid);
+ // Calling the pinSanityCheck should be used only for debugging, since
+ // the synchronized block over the fileInfoMap is a hot spot.
+ if (DEBUG) {
+ pinSanityCheck(dpid);
+ }
CachedPage cPage = findPage(dpid, false);
if (!newPage) {
+ if (DEBUG) {
+ confiscateLock.lock();
+ try {
+ for (CachedPage c : confiscatedPages) {
+ if (c.dpid == dpid && c.confiscated.get()) {
+ while(confiscatedPages.contains(c)){
+ throw new IllegalStateException();
+ }
+ }
+ }
+ }finally{
+ confiscateLock.unlock();
+ }
+ }
// Resolve race of multiple threads trying to read the page from
// disk.
synchronized (cPage) {
@@ -166,40 +204,17 @@
cPage.valid = true;
}
pageReplacementStrategy.notifyCachePageAccess(cPage);
- return cPage;
- }
-
- @Override
- /**
- * Allocate and pin a virtual page. This is just like a normal page, except that it will never be flushed.
- */
- public ICachedPage pinVirtual(long vpid) throws HyracksDataException {
- //pinSanityCheck(vpid);
- CachedPage cPage = findPage(vpid, true);
- cPage.virtual = true;
- return cPage;
- }
-
- @Override
- /**
- * Takes a virtual page, and copies it to a new page at the physical identifier.
- */
- //TODO: I should not have to copy the page. I should just append it to the end of the hash bucket, but this is
- //safer/easier for now.
- public ICachedPage unpinVirtual(long vpid, long dpid) throws HyracksDataException {
- CachedPage virtPage = findPage(vpid, true); //should definitely succeed.
- //pinSanityCheck(dpid); //debug
- ICachedPage realPage = pin(dpid, false);
- virtPage.acquireReadLatch();
- realPage.acquireWriteLatch();
- try {
- System.arraycopy(virtPage.buffer.array(), 0, realPage.getBuffer().array(), 0, virtPage.buffer.capacity());
- } finally {
- realPage.releaseWriteLatch(true);
- virtPage.releaseReadLatch();
+ if(DEBUG){
+ pinnedPageOwner.put((CachedPage) cPage, Thread.currentThread().getStackTrace());
}
- virtPage.reset(-1); //now cause the virtual page to die
- return realPage;
+ return cPage;
+ }
+
+
+
+ private boolean isVirtual(long vpid) throws HyracksDataException {
+ CachedPage virtPage = findPage(vpid, true);
+ return virtPage.confiscated.get();
}
private CachedPage findPage(long dpid, boolean virtual) throws HyracksDataException {
@@ -217,7 +232,13 @@
try {
cPage = bucket.cachedPage;
while (cPage != null) {
+ if(DEBUG) {
+ assert bucket.cachedPage != bucket.cachedPage.next;
+ }
if (cPage.dpid == dpid) {
+ if(DEBUG) {
+ assert !cPage.confiscated.get();
+ }
cPage.pinCount.incrementAndGet();
return cPage;
}
@@ -237,28 +258,29 @@
* on the CachedPage may or may not be valid. 2. We have a pin
* on the CachedPage. We have to deal with three cases here.
* Case 1: The dpid on the CachedPage is invalid (-1). This
- * indicates that this buffer has never been used or is a virtual page. So we are the
- * only ones holding it. Get a lock on the required dpid's hash
- * bucket, check if someone inserted the page we want into the
- * table. If so, decrement the pincount on the victim and return
- * the winner page in the table. If such a winner does not
- * exist, insert the victim and return it. Case 2: The dpid on
- * the CachedPage is valid. Case 2a: The current dpid and
- * required dpid hash to the same bucket. Get the bucket lock,
- * check that the victim is still at pinCount == 1 If so check
- * if there is a winning CachedPage with the required dpid. If
- * so, decrement the pinCount on the victim and return the
- * winner. If not, update the contents of the CachedPage to hold
- * the required dpid and return it. If the picCount on the
- * victim was != 1 or CachedPage was dirty someone used the
- * victim for its old contents -- Decrement the pinCount and
- * retry. Case 2b: The current dpid and required dpid hash to
- * different buckets. Get the two bucket locks in the order of
- * the bucket indexes (Ordering prevents deadlocks). Check for
- * the existence of a winner in the new bucket and for potential
- * use of the victim (pinCount != 1). If everything looks good,
- * remove the CachedPage from the old bucket, and add it to the
- * new bucket and update its header with the new dpid.
+ * indicates that this buffer has never been used or is a
+ * confiscated page. So we are the only ones holding it. Get a lock
+ * on the required dpid's hash bucket, check if someone inserted
+ * the page we want into the table. If so, decrement the
+ * pincount on the victim and return the winner page in the
+ * table. If such a winner does not exist, insert the victim and
+ * return it. Case 2: The dpid on the CachedPage is valid. Case
+ * 2a: The current dpid and required dpid hash to the same
+ * bucket. Get the bucket lock, check that the victim is still
+ * at pinCount == 1 If so check if there is a winning CachedPage
+ * with the required dpid. If so, decrement the pinCount on the
+ * victim and return the winner. If not, update the contents of
+ * the CachedPage to hold the required dpid and return it. If
+ * the picCount on the victim was != 1 or CachedPage was dirty
+ * someone used the victim for its old contents -- Decrement the
+ * pinCount and retry. Case 2b: The current dpid and required
+ * dpid hash to different buckets. Get the two bucket locks in
+ * the order of the bucket indexes (Ordering prevents
+ * deadlocks). Check for the existence of a winner in the new
+ * bucket and for potential use of the victim (pinCount != 1).
+ * If everything looks good, remove the CachedPage from the old
+ * bucket, and add it to the new bucket and update its header
+ * with the new dpid.
*/
if (victim.dpid < 0) {
/*
@@ -266,11 +288,24 @@
*/
bucket.bucketLock.lock();
try {
+ if (DEBUG) {
+ confiscateLock.lock();
+ try{
+ if (confiscatedPages.contains(victim)) {
+ throw new IllegalStateException();
+ }
+ } finally{
+ confiscateLock.unlock();
+ }
+ }
cPage = bucket.cachedPage;
while (cPage != null) {
if (cPage.dpid == dpid) {
cPage.pinCount.incrementAndGet();
victim.pinCount.decrementAndGet();
+ if(DEBUG) {
+ assert !cPage.confiscated.get();
+ }
return cPage;
}
cPage = cPage.next;
@@ -281,6 +316,10 @@
} finally {
bucket.bucketLock.unlock();
}
+
+ if(DEBUG) {
+ assert !victim.confiscated.get();
+ }
return victim;
}
int victimHash = hash(victim.dpid);
@@ -294,11 +333,24 @@
victim.pinCount.decrementAndGet();
continue;
}
+ if (DEBUG) {
+ confiscateLock.lock();
+ try{
+ if (confiscatedPages.contains(victim)) {
+ throw new IllegalStateException();
+ }
+ }finally{
+ confiscateLock.unlock();
+ }
+ }
cPage = bucket.cachedPage;
while (cPage != null) {
if (cPage.dpid == dpid) {
cPage.pinCount.incrementAndGet();
victim.pinCount.decrementAndGet();
+ if(DEBUG) {
+ assert !victim.confiscated.get();
+ }
return cPage;
}
cPage = cPage.next;
@@ -307,6 +359,9 @@
} finally {
bucket.bucketLock.unlock();
}
+ if(DEBUG) {
+ assert !victim.confiscated.get();
+ }
return victim;
} else {
/*
@@ -325,11 +380,19 @@
victim.pinCount.decrementAndGet();
continue;
}
+ if (DEBUG) {
+ if (confiscatedPages.contains(victim)) {
+ throw new IllegalStateException();
+ }
+ }
cPage = bucket.cachedPage;
while (cPage != null) {
if (cPage.dpid == dpid) {
cPage.pinCount.incrementAndGet();
victim.pinCount.decrementAndGet();
+ if(DEBUG) {
+ assert !cPage.confiscated.get();
+ }
return cPage;
}
cPage = cPage.next;
@@ -341,7 +404,9 @@
while (victimPrev != null && victimPrev.next != victim) {
victimPrev = victimPrev.next;
}
- assert victimPrev != null;
+ if(DEBUG) {
+ assert victimPrev != null;
+ }
victimPrev.next = victim.next;
}
victim.reset(dpid);
@@ -351,6 +416,9 @@
victimBucket.bucketLock.unlock();
bucket.bucketLock.unlock();
}
+ if(DEBUG) {
+ assert !victim.confiscated.get();
+ }
return victim;
}
}
@@ -382,6 +450,7 @@
.append('\n');
buffer.append("Hash table size: ").append(pageMap.length).append('\n');
buffer.append("Page Map:\n");
+ buffer.append("cpid -> [fileId:pageId, pinCount, valid/invalid, confiscated/physical, dirty/clean]");
int nCachedPages = 0;
for (int i = 0; i < pageMap.length; ++i) {
CacheBucket cb = pageMap[i];
@@ -395,6 +464,7 @@
.append(BufferedFileHandle.getFileId(cp.dpid)).append(':')
.append(BufferedFileHandle.getPageId(cp.dpid)).append(", ").append(cp.pinCount.get())
.append(", ").append(cp.valid ? "valid" : "invalid").append(", ")
+ .append(cp.confiscated.get() ? "confiscated" : "physical").append(", ")
.append(cp.dirty.get() ? "dirty" : "clean").append("]\n");
cp = cp.next;
++nCachedPages;
@@ -405,6 +475,15 @@
}
}
buffer.append("Number of cached pages: ").append(nCachedPages).append('\n');
+ if(DEBUG){
+ confiscateLock.lock();
+ try{
+ buffer.append("Number of confiscated pages: ").append(confiscatedPages.size()).append('\n');
+ }
+ finally{
+ confiscateLock.unlock();
+ }
+ }
return buffer.toString();
}
@@ -415,9 +494,13 @@
cPage.buffer);
}
- private BufferedFileHandle getFileInfo(CachedPage cPage) throws HyracksDataException {
+ BufferedFileHandle getFileInfo(CachedPage cPage) throws HyracksDataException {
+ return getFileInfo(BufferedFileHandle.getFileId(cPage.dpid));
+ }
+
+ BufferedFileHandle getFileInfo(int fileId) throws HyracksDataException {
synchronized (fileInfoMap) {
- BufferedFileHandle fInfo = fileInfoMap.get(BufferedFileHandle.getFileId(cPage.dpid));
+ BufferedFileHandle fInfo = fileInfoMap.get(fileId);
if (fInfo == null) {
throw new HyracksDataException("No such file mapped");
}
@@ -443,6 +526,9 @@
if (closed) {
throw new HyracksDataException("unpin called on a closed cache");
}
+ if(DEBUG){
+ pinnedPageOwner.remove(page);
+ }
((CachedPage) page).pinCount.decrementAndGet();
}
@@ -462,7 +548,9 @@
@Override
public ICachedPageInternal getPage(int cpid) {
- return cachedPages.get(cpid);
+ synchronized (cachedPages) {
+ return cachedPages.get(cpid);
+ }
}
private class CleanerThread extends Thread {
@@ -482,7 +570,7 @@
}
public void cleanPage(CachedPage cPage, boolean force) {
- if (cPage.dirty.get() && !cPage.virtual) {
+ if (cPage.dirty.get() && !cPage.confiscated.get()) {
boolean proceed = false;
if (force) {
cPage.latch.writeLock().lock();
@@ -529,10 +617,15 @@
try {
while (true) {
pageCleanerPolicy.notifyCleanCycleStart(this);
- int numPages = pageReplacementStrategy.getNumPages();
- for (int i = 0; i < numPages; ++i) {
- CachedPage cPage = (CachedPage) cachedPages.get(i);
- cleanPage(cPage, false);
+ int curPage = 0;
+ while (true) {
+ synchronized (cachedPages) {
+ if (curPage >= pageReplacementStrategy.getNumPages()) {
+ break;
+ }
+ cleanPage((CachedPage) cachedPages.get(curPage), false);
+ }
+ curPage++;
}
if (shutdownStart) {
break;
@@ -551,6 +644,7 @@
@Override
public void close() {
closed = true;
+ fifoWriter.destroyQueue();
synchronized (cleanerThread) {
cleanerThread.shutdownStart = true;
cleanerThread.notifyAll();
@@ -698,7 +792,10 @@
pinCount = cPage.pinCount.get();
}
if (pinCount > 0) {
- throw new IllegalStateException("Page is pinned and file is being closed. Pincount is: " + pinCount);
+ throw new IllegalStateException("Page " + BufferedFileHandle.getFileId(cPage.dpid) + ":"
+ + BufferedFileHandle.getPageId(cPage.dpid)
+ + " is pinned and file is being closed. Pincount is: " + pinCount + " Page is confiscated: "
+ + cPage.confiscated);
}
cPage.invalidate();
return true;
@@ -749,12 +846,8 @@
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("Deleting file: " + fileId + " in cache: " + this);
}
- if (flushDirtyPages) {
- synchronized (fileInfoMap) {
- sweepAndFlush(fileId, flushDirtyPages);
- }
- }
synchronized (fileInfoMap) {
+ sweepAndFlush(fileId, flushDirtyPages);
BufferedFileHandle fInfo = null;
try {
fInfo = fileInfoMap.get(fileId);
@@ -819,7 +912,9 @@
@Override
public void addPage(ICachedPageInternal page) {
- cachedPages.add(page);
+ synchronized (cachedPages) {
+ cachedPages.add(page);
+ }
}
@Override
@@ -828,6 +923,214 @@
}
@Override
+ public int getNumPagesOfFile(int fileId) throws HyracksDataException {
+ synchronized (fileInfoMap) {
+ BufferedFileHandle fInfo = fileInfoMap.get(fileId);
+ if (fInfo == null) {
+ throw new HyracksDataException("No such file mapped for fileId:" + fileId);
+ }
+ if(DEBUG) {
+ assert ioManager.getSize(fInfo.getFileHandle()) % getPageSize() == 0;
+ }
+ return (int) (ioManager.getSize(fInfo.getFileHandle()) / getPageSize());
+ }
+ }
+
+ @Override
+ public void adviseWontNeed(ICachedPage page) {
+ pageReplacementStrategy.adviseWontNeed((ICachedPageInternal) page);
+ }
+
+ @Override
+ public ICachedPage confiscatePage(long dpid) throws HyracksDataException {
+ while (true) {
+ int startCleanedCount = cleanerThread.cleanedCount;
+ ICachedPage returnPage = null;
+ CachedPage victim = (CachedPage) pageReplacementStrategy.findVictim();
+ if (victim != null) {
+ if(DEBUG) {
+ assert !victim.confiscated.get();
+ }
+ // find a page that would possibly be evicted anyway
+ // Case 1 from findPage()
+ if (victim.dpid < 0) { // new page
+ if (victim.pinCount.get() != 1) {
+ victim.pinCount.decrementAndGet();
+ continue;
+ }
+ returnPage = victim;
+ ((CachedPage) returnPage).dpid = dpid;
+ } else {
+ // Case 2a/b
+ int pageHash = hash(victim.getDiskPageId());
+ CacheBucket bucket = pageMap[pageHash];
+ bucket.bucketLock.lock();
+ try {
+ // readjust the next pointers to remove this page from
+ // the pagemap
+ CachedPage curr = bucket.cachedPage;
+ CachedPage prev = null;
+ boolean found = false;
+ //traverse the bucket's linked list to find the victim.
+ while (curr != null) {
+ if (curr == victim) { // we found where the victim
+ // resides in the hash table
+ if (victim.pinCount.get() != 1) {
+ victim.pinCount.decrementAndGet();
+ break;
+ }
+ // if this is the first page in the bucket
+ if (prev == null) {
+ if(DEBUG) {
+ assert curr != curr.next;
+ }
+ bucket.cachedPage = bucket.cachedPage.next;
+ found = true;
+ break;
+ // if it isn't we need to make the previous
+ // node point to where it should
+ } else {
+ if(DEBUG) {
+ assert curr.next != curr;
+ }
+ prev.next = curr.next;
+ curr.next = null;
+ if(DEBUG) {
+ assert prev.next != prev;
+ }
+ found = true;
+ break;
+ }
+ }
+ // go to the next entry
+ prev = curr;
+ curr = curr.next;
+ }
+ if (found) {
+ returnPage = victim;
+ ((CachedPage) returnPage).dpid = dpid;
+ } //otherwise, someone took the same victim before we acquired the lock. try again!
+ } finally {
+ bucket.bucketLock.unlock();
+ }
+ }
+ }
+ // if we found a page after all that, go ahead and finish
+ if (returnPage != null) {
+ ((CachedPage) returnPage).confiscated.set(true);
+ if (DEBUG) {
+ confiscateLock.lock();
+ try{
+ confiscatedPages.add((CachedPage) returnPage);
+ confiscatedPagesOwner.put((CachedPage) returnPage, Thread.currentThread().getStackTrace());
+ }
+ finally{
+ confiscateLock.unlock();
+ }
+ }
+ return returnPage;
+ }
+ // no page available to confiscate. try kicking the cleaner thread.
+ synchronized (cleanerThread) {
+ pageCleanerPolicy.notifyVictimNotFound(cleanerThread);
+ }
+ // Heuristic optimization. Check whether the cleaner thread has
+ // cleaned pages since we did our last pin attempt.
+ if (cleanerThread.cleanedCount - startCleanedCount > MIN_CLEANED_COUNT_DIFF) {
+ // Don't go to sleep and wait for notification from the cleaner,
+ // just try to pin again immediately.
+ continue;
+ }
+ synchronized (cleanerThread.cleanNotification) {
+ try {
+ cleanerThread.cleanNotification.wait(PIN_MAX_WAIT_TIME);
+ } catch (InterruptedException e) {
+ // Do nothing
+ }
+ }
+ }
+ }
+
+ @Override
+ public void returnPage(ICachedPage page) {
+ returnPage(page, true);
+ }
+
+ @Override
+ public void returnPage(ICachedPage page, boolean reinsert) {
+ CachedPage cPage = (CachedPage) page;
+ CacheBucket bucket = null;
+ if(!page.confiscated()){
+ return;
+ }
+ if (reinsert) {
+ int hash = hash(cPage.dpid);
+ bucket = pageMap[hash];
+ bucket.bucketLock.lock();
+ if(DEBUG) {
+ confiscateLock.lock();
+ }
+ try {
+ cPage.reset(cPage.dpid);
+ cPage.valid = true;
+ cPage.next = bucket.cachedPage;
+ bucket.cachedPage = cPage;
+ cPage.pinCount.decrementAndGet();
+ if(DEBUG){
+ assert cPage.pinCount.get() == 0 ;
+ assert cPage.latch.getReadLockCount() == 0;
+ assert cPage.latch.getWriteHoldCount() == 0;
+ confiscatedPages.remove(cPage);
+ confiscatedPagesOwner.remove(cPage);
+ }
+ } finally {
+ bucket.bucketLock.unlock();
+ if(DEBUG) {
+ confiscateLock.unlock();
+ }
+ }
+ } else {
+ cPage.invalidate();
+ cPage.pinCount.decrementAndGet();
+ if(DEBUG){
+ assert cPage.pinCount.get() == 0;
+ assert cPage.latch.getReadLockCount() == 0;
+ assert cPage.latch.getWriteHoldCount() == 0;
+ confiscateLock.lock();
+ try{
+ confiscatedPages.remove(cPage);
+ confiscatedPagesOwner.remove(cPage);
+ } finally{
+ confiscateLock.unlock();
+ }
+ }
+ }
+ pageReplacementStrategy.adviseWontNeed(cPage);
+ }
+
+ @Override
+ public void setPageDiskId(ICachedPage page, long dpid) {
+ ((CachedPage) page).dpid = dpid;
+ }
+
+ @Override
+ public IFIFOPageQueue createFIFOQueue() {
+ return fifoWriter.createQueue(FIFOLocalWriter.instance());
+ }
+
+ @Override
+ public void finishQueue() {
+ fifoWriter.finishQueue();
+ }
+
+ @Override
+ public void copyPage(ICachedPage src, ICachedPage dst) {
+ CachedPage srcCast = (CachedPage) src;
+ CachedPage dstCast = (CachedPage) dst;
+ System.arraycopy(srcCast.buffer.array(), 0, dstCast.getBuffer().array(), 0, srcCast.buffer.capacity());
+ }
+
+ @Override
public boolean isReplicationEnabled() {
if (ioReplicationManager != null) {
return ioReplicationManager.isReplicationEnabled();
@@ -839,4 +1142,21 @@
public IIOReplicationManager getIOReplicationManager() {
return ioReplicationManager;
}
+
+ @Override
+ /**
+ * _ONLY_ call this if you absolutely, positively know this file has no dirty pages in the cache!
+ * Bypasses the normal lifecycle of a file handle and evicts all references to it immediately.
+ */
+ public void purgeHandle(int fileId) throws HyracksDataException{
+ synchronized(fileInfoMap){
+ BufferedFileHandle fh = fileInfoMap.get(fileId);
+ if(fh != null){
+ ioManager.close(fh.getFileHandle());
+ fileInfoMap.remove(fileId);
+ fileMapManager.unregisterFile(fileId);
+ }
+ }
+ }
+
}
diff --git a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/CachedPage.java b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/CachedPage.java
index 0c0a618..305b577 100644
--- a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/CachedPage.java
+++ b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/CachedPage.java
@@ -21,24 +21,37 @@
import java.nio.ByteBuffer;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* @author yingyib
*/
-class CachedPage implements ICachedPageInternal {
+public class CachedPage implements ICachedPageInternal {
final int cpid;
final ByteBuffer buffer;
- final AtomicInteger pinCount;
+ public final AtomicInteger pinCount;
final AtomicBoolean dirty;
- final ReadWriteLock latch;
+ final ReentrantReadWriteLock latch;
private final Object replacementStrategyObject;
private final IPageReplacementStrategy pageReplacementStrategy;
- volatile long dpid;
+ volatile long dpid; // disk page id (composed of file id and page id)
CachedPage next;
volatile boolean valid;
- volatile boolean virtual;
+ final AtomicBoolean confiscated;
+ private IQueueInfo queueInfo;
+
+ //Constructor for making dummy entry for FIFO queue
+ public CachedPage(){
+ this.cpid = -1;
+ this.buffer = null;
+ this.pageReplacementStrategy = null;
+ this.dirty = new AtomicBoolean(false);
+ this.confiscated = new AtomicBoolean(true);
+ pinCount = null;
+ queueInfo = null;
+ replacementStrategyObject = null;
+ latch =null;
+ }
public CachedPage(int cpid, ByteBuffer buffer, IPageReplacementStrategy pageReplacementStrategy) {
this.cpid = cpid;
@@ -50,14 +63,17 @@
replacementStrategyObject = pageReplacementStrategy.createPerPageStrategyObject(cpid);
dpid = -1;
valid = false;
- virtual = false;
+ confiscated = new AtomicBoolean(false);
+ queueInfo = null;
}
public void reset(long dpid) {
this.dpid = dpid;
dirty.set(false);
valid = false;
+ confiscated.set(false);
pageReplacementStrategy.notifyCachePageReset(this);
+ queueInfo = null;
}
public void invalidate() {
@@ -76,10 +92,11 @@
@Override
public boolean pinIfGoodVictim() {
- if (virtual)
+ if (confiscated.get())
return false; //i am not a good victim because i cant flush!
- else
+ else {
return pinCount.compareAndSet(0, 1);
+ }
}
@Override
@@ -104,11 +121,43 @@
@Override
public void releaseWriteLatch(boolean markDirty) {
- if (markDirty) {
- if (dirty.compareAndSet(false, true)) {
- pinCount.incrementAndGet();
+ try {
+ if (markDirty) {
+ if (dirty.compareAndSet(false, true)) {
+ pinCount.incrementAndGet();
+ }
}
+ } finally {
+ latch.writeLock().unlock();
}
- latch.writeLock().unlock();
}
+
+ @Override
+ public boolean confiscated() {
+ return confiscated.get();
+ }
+
+ @Override
+ public IQueueInfo getQueueInfo() {
+ return queueInfo;
+ }
+
+ @Override
+ public void setQueueInfo(IQueueInfo queueInfo) {
+ this.queueInfo = queueInfo;
+ }
+
+ @Override
+ public long getDiskPageId() {
+ return dpid;
+ }
+
+ CachedPage getNext() {
+ return next;
+ }
+
+ void setNext(CachedPage next) {
+ this.next = next;
+ }
+
}
diff --git a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/ClockPageReplacementStrategy.java b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/ClockPageReplacementStrategy.java
index 6050b22..6a82b97 100644
--- a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/ClockPageReplacementStrategy.java
+++ b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/ClockPageReplacementStrategy.java
@@ -25,9 +25,10 @@
private static final int MAX_UNSUCCESSFUL_CYCLE_COUNT = 3;
private IBufferCacheInternal bufferCache;
- private int clockPtr;
+ private AtomicInteger clockPtr;
private ICacheMemoryAllocator allocator;
- private AtomicInteger numPages = new AtomicInteger(0);
+ private AtomicInteger numPages;
+ private AtomicInteger cpIdCounter;
private final int pageSize;
private final int maxAllowedNumPages;
@@ -35,7 +36,9 @@
this.allocator = allocator;
this.pageSize = pageSize;
this.maxAllowedNumPages = maxAllowedNumPages;
- clockPtr = 0;
+ this.clockPtr = new AtomicInteger(0);
+ this.numPages = new AtomicInteger(0);
+ this.cpIdCounter = new AtomicInteger(0);
}
@Override
@@ -61,9 +64,7 @@
@Override
public ICachedPageInternal findVictim() {
ICachedPageInternal cachedPage = null;
- int pageCount = getNumPages();
- // pageCount is a lower-bound of numPages.
- if (pageCount >= maxAllowedNumPages) {
+ if (numPages.get() >= maxAllowedNumPages) {
cachedPage = findVictimByEviction();
} else {
cachedPage = allocatePage();
@@ -72,10 +73,12 @@
}
private ICachedPageInternal findVictimByEviction() {
- int startClockPtr = clockPtr;
+ //check if we're starved from confiscation
+ assert (maxAllowedNumPages > 0);
+ int startClockPtr = clockPtr.get();
int cycleCount = 0;
do {
- ICachedPageInternal cPage = bufferCache.getPage(clockPtr);
+ ICachedPageInternal cPage = bufferCache.getPage(clockPtr.get());
/*
* We do two things here:
@@ -88,36 +91,26 @@
AtomicBoolean accessedFlag = getPerPageObject(cPage);
if (!accessedFlag.compareAndSet(true, false)) {
if (cPage.pinIfGoodVictim()) {
- return cPage;
+ return cPage;
}
}
- /**
- * The clockPtr may miss the last added pages in this round.
- */
- clockPtr = (clockPtr + 1) % getNumPages();
- if (clockPtr == startClockPtr) {
+ advanceClock();
+ if (clockPtr.get() == startClockPtr) {
++cycleCount;
}
} while (cycleCount < MAX_UNSUCCESSFUL_CYCLE_COUNT);
return null;
}
- /**
- * The number returned here could only be smaller or equal to the actual number
- * of pages, because numPages is monotonically incremented.
- */
@Override
public int getNumPages() {
return numPages.get();
}
private ICachedPageInternal allocatePage() {
- CachedPage cPage = null;
- synchronized (this) {
- cPage = new CachedPage(numPages.get(), allocator.allocate(pageSize, 1)[0], this);
- bufferCache.addPage(cPage);
- numPages.incrementAndGet();
- }
+ CachedPage cPage = new CachedPage(cpIdCounter.getAndIncrement(), allocator.allocate(pageSize, 1)[0], this);
+ bufferCache.addPage(cPage);
+ numPages.incrementAndGet();
AtomicBoolean accessedFlag = getPerPageObject(cPage);
if (!accessedFlag.compareAndSet(true, false)) {
if (cPage.pinIfGoodVictim()) {
@@ -127,6 +120,21 @@
return null;
}
+ //derived from RoundRobinAllocationPolicy in Apache directmemory
+ private int advanceClock(){
+ boolean clockInDial = false;
+ int newClockPtr = 0;
+ do
+ {
+ int currClockPtr = clockPtr.get();
+ newClockPtr = ( currClockPtr + 1 ) % numPages.get();
+ clockInDial = clockPtr.compareAndSet( currClockPtr, newClockPtr );
+ }
+ while ( !clockInDial );
+ return newClockPtr;
+
+ }
+
private AtomicBoolean getPerPageObject(ICachedPageInternal cPage) {
return (AtomicBoolean) cPage.getReplacementStrategyObject();
}
@@ -140,4 +148,10 @@
public int getMaxAllowedNumPages() {
return maxAllowedNumPages;
}
-}
\ No newline at end of file
+
+ @Override
+ public void adviseWontNeed(ICachedPageInternal cPage) {
+ //make the page appear as if it wasn't accessed even if it was
+ getPerPageObject(cPage).set(false);
+ }
+}
diff --git a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/DebugBufferCache.java b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/DebugBufferCache.java
index 10ce65a..9f7960f 100644
--- a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/DebugBufferCache.java
+++ b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/DebugBufferCache.java
@@ -182,15 +182,51 @@
}
@Override
- public ICachedPage pinVirtual(long vpid) throws HyracksDataException {
- pinCount.addAndGet(1);
- return bufferCache.pinVirtual(vpid);
+ public int getNumPagesOfFile(int fileId) throws HyracksDataException {
+ return bufferCache.getNumPagesOfFile(fileId);
}
@Override
- public ICachedPage unpinVirtual(long vpid, long dpid) throws HyracksDataException {
- unpinCount.addAndGet(1);
- return bufferCache.unpinVirtual(vpid, dpid);
+ public void adviseWontNeed(ICachedPage page) {
+ bufferCache.adviseWontNeed(page);
+ }
+
+ @Override
+ public ICachedPage confiscatePage(long dpid) throws HyracksDataException {
+ return bufferCache.confiscatePage(dpid);
+ }
+
+ @Override
+ public void returnPage(ICachedPage page) {
+ bufferCache.returnPage(page);
+ }
+
+ @Override
+ public IFIFOPageQueue createFIFOQueue() {
+ return bufferCache.createFIFOQueue();
+ }
+
+ @Override
+ public void finishQueue() {
+ bufferCache.finishQueue();
+ }
+
+ @Override
+ public void copyPage(ICachedPage src, ICachedPage dst) {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setPageDiskId(ICachedPage page, long dpid) {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void returnPage(ICachedPage page, boolean reinsert) {
+ // TODO Auto-generated method stub
+
}
@Override
@@ -207,4 +243,9 @@
public IIOReplicationManager getIOReplicationManager() {
return null;
}
+
+ @Override
+ public void purgeHandle(int fileId) throws HyracksDataException {
+ bufferCache.purgeHandle(fileId);
+ }
}
diff --git a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/FIFOLocalWriter.java b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/FIFOLocalWriter.java
new file mode 100644
index 0000000..2f66b15
--- /dev/null
+++ b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/FIFOLocalWriter.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2009-2013 by The Regents of the University of California
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * you may obtain a copy of the License from
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hyracks.storage.common.buffercache;
+
+import org.apache.hyracks.api.exceptions.HyracksDataException;
+import org.apache.hyracks.storage.common.file.BufferedFileHandle;
+
+public class FIFOLocalWriter implements IFIFOPageWriter {
+ private static FIFOLocalWriter instance;
+ private static boolean DEBUG = false;
+
+ public static FIFOLocalWriter instance() {
+ if(instance == null) {
+ instance = new FIFOLocalWriter();
+ }
+ return instance;
+ }
+
+ @Override
+ public void write(ICachedPage page, IBufferCache ibufferCache) throws HyracksDataException {
+ BufferCache bufferCache = (BufferCache)ibufferCache;
+ CachedPage cPage = (CachedPage)page;
+ BufferedFileHandle fInfo = bufferCache.getFileInfo(cPage);
+ if (fInfo.fileHasBeenDeleted()) {
+ return;
+ }
+ cPage.buffer.position(0);
+ cPage.buffer.limit(bufferCache.getPageSize());
+ bufferCache.ioManager.syncWrite(fInfo.getFileHandle(), (long) BufferedFileHandle.getPageId(cPage.dpid) * bufferCache.getPageSize(),
+ cPage.buffer);
+ bufferCache.returnPage(cPage);
+ if(DEBUG) System.out.println("[FIFO] Return page: " + cPage.cpid + "," + cPage.dpid);
+ }
+
+ @Override
+ public void sync(int fileId, IBufferCache ibufferCache) throws HyracksDataException {
+ BufferCache bufferCache = (BufferCache)ibufferCache;
+ BufferedFileHandle fInfo = bufferCache.getFileInfo(fileId);
+ bufferCache.ioManager.sync(fInfo.getFileHandle(), true);
+ }
+}
diff --git a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/IBufferCache.java b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/IBufferCache.java
index dc4ee3b..20e268d 100644
--- a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/IBufferCache.java
+++ b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/IBufferCache.java
@@ -19,12 +19,17 @@
package org.apache.hyracks.storage.common.buffercache;
import org.apache.hyracks.api.exceptions.HyracksDataException;
+import org.apache.hyracks.api.io.IFileHandle;
import org.apache.hyracks.api.io.FileReference;
import org.apache.hyracks.api.replication.IIOReplicationManager;
public interface IBufferCache {
- public void createFile(FileReference fileRef) throws HyracksDataException;
+ public static final long INVALID_DPID = -1l;
+ public static final int INVALID_PAGEID = -1;
+
+ public void createFile(FileReference fileRef) throws HyracksDataException;
+
public int createMemFile() throws HyracksDataException;
public void openFile(int fileId) throws HyracksDataException;
@@ -39,26 +44,42 @@
public ICachedPage pin(long dpid, boolean newPage) throws HyracksDataException;
- public ICachedPage pinVirtual(long vpid) throws HyracksDataException;
-
- public ICachedPage unpinVirtual(long vpid, long dpid) throws HyracksDataException;
-
public void unpin(ICachedPage page) throws HyracksDataException;
public void flushDirtyPage(ICachedPage page) throws HyracksDataException;
+ public void adviseWontNeed(ICachedPage page);
+
+ public ICachedPage confiscatePage(long dpid) throws HyracksDataException;
+
+ public void returnPage(ICachedPage page);
+
+ public void returnPage(ICachedPage page, boolean reinsert);
+
public void force(int fileId, boolean metadata) throws HyracksDataException;
public int getPageSize();
public int getNumPages();
+ public int getNumPagesOfFile(int fileId) throws HyracksDataException;
+
public int getFileReferenceCount(int fileId);
public void close() throws HyracksDataException;
+ public IFIFOPageQueue createFIFOQueue();
+
+ public void finishQueue();
+
+ void copyPage(ICachedPage src, ICachedPage dst);
+
+ void setPageDiskId(ICachedPage page, long dpid);
+
public boolean isReplicationEnabled();
public IIOReplicationManager getIOReplicationManager();
+ void purgeHandle(int fileId) throws HyracksDataException;
+
}
diff --git a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/ICachedPage.java b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/ICachedPage.java
index c8c3ab1..0f77730 100644
--- a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/ICachedPage.java
+++ b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/ICachedPage.java
@@ -30,4 +30,10 @@
public void acquireWriteLatch();
public void releaseWriteLatch(boolean markDirty);
+
+ public boolean confiscated();
+
+ public IQueueInfo getQueueInfo();
+
+ public void setQueueInfo(IQueueInfo queueInfo);
}
\ No newline at end of file
diff --git a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/ICachedPageInternal.java b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/ICachedPageInternal.java
index 908af18..0193ed8 100644
--- a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/ICachedPageInternal.java
+++ b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/ICachedPageInternal.java
@@ -21,7 +21,10 @@
public interface ICachedPageInternal extends ICachedPage {
public int getCachedPageId();
+ public long getDiskPageId();
+
public Object getReplacementStrategyObject();
public boolean pinIfGoodVictim();
+
}
\ No newline at end of file
diff --git a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/IFIFOPageQueue.java b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/IFIFOPageQueue.java
new file mode 100644
index 0000000..0fe5767
--- /dev/null
+++ b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/IFIFOPageQueue.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2009-2013 by The Regents of the University of California
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * you may obtain a copy of the License from
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hyracks.storage.common.buffercache;
+
+import org.apache.hyracks.api.exceptions.HyracksDataException;
+
+public interface IFIFOPageQueue {
+ public void put(ICachedPage page) throws HyracksDataException;
+}
diff --git a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/IFIFOPageWriter.java b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/IFIFOPageWriter.java
new file mode 100644
index 0000000..2281a44
--- /dev/null
+++ b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/IFIFOPageWriter.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2009-2013 by The Regents of the University of California
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * you may obtain a copy of the License from
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hyracks.storage.common.buffercache;
+
+import org.apache.hyracks.api.exceptions.HyracksDataException;
+
+
+public interface IFIFOPageWriter {
+ public void write(ICachedPage page, IBufferCache bufferCache) throws HyracksDataException;
+
+ void sync(int fileId, IBufferCache ibufferCache) throws HyracksDataException;
+}
diff --git a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/IPageReplacementStrategy.java b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/IPageReplacementStrategy.java
index 2ecf6b3..c39d3ed 100644
--- a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/IPageReplacementStrategy.java
+++ b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/IPageReplacementStrategy.java
@@ -27,6 +27,8 @@
public void notifyCachePageAccess(ICachedPageInternal cPage);
+ public void adviseWontNeed(ICachedPageInternal cPage);
+
public ICachedPageInternal findVictim();
public int getNumPages();
diff --git a/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/IQueueInfo.java b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/IQueueInfo.java
new file mode 100644
index 0000000..f214639
--- /dev/null
+++ b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/IQueueInfo.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2009-2013 by The Regents of the University of California
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * you may obtain a copy of the License from
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hyracks.storage.common.buffercache;
+
+public interface IQueueInfo {
+
+ /**
+ * Returns whether this queued page should notify all waiters (i.e. it is a low water mark)
+ * @return
+ */
+
+ boolean hasWaiters();
+
+ /**
+ * Whether or not this cached page is a poisoned page.
+ * @return
+ */
+
+ boolean isPoison();
+}
diff --git a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IFreePageManagerFactory.java b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/QueueInfo.java
similarity index 63%
copy from hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IFreePageManagerFactory.java
copy to hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/QueueInfo.java
index 3dbe0ab..bc69bc8 100644
--- a/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/api/IFreePageManagerFactory.java
+++ b/hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/buffercache/QueueInfo.java
@@ -16,8 +16,26 @@
* specific language governing permissions and limitations
* under the License.
*/
-package org.apache.hyracks.storage.am.common.api;
+package org.apache.hyracks.storage.common.buffercache;
-public interface IFreePageManagerFactory {
- public IFreePageManager createFreePageManager();
+public class QueueInfo implements IQueueInfo{
+
+ private final boolean poison;
+ private final boolean waiters;
+
+ public QueueInfo(boolean waiters, boolean poison){
+ this.waiters = waiters;
+ this.poison = poison;
+ }
+
+ @Override
+ public boolean hasWaiters(){
+ return waiters;
+
+ }
+
+ @Override
+ public boolean isPoison(){
+ return poison;
+ }
}
diff --git a/hyracks/hyracks-test-support/src/main/java/org/apache/hyracks/storage/am/btree/OrderedIndexExamplesTest.java b/hyracks/hyracks-test-support/src/main/java/org/apache/hyracks/storage/am/btree/OrderedIndexExamplesTest.java
index b8f2166..8c01105 100644
--- a/hyracks/hyracks-test-support/src/main/java/org/apache/hyracks/storage/am/btree/OrderedIndexExamplesTest.java
+++ b/hyracks/hyracks-test-support/src/main/java/org/apache/hyracks/storage/am/btree/OrderedIndexExamplesTest.java
@@ -692,6 +692,7 @@
int ins = 1000;
for (int i = 1; i < ins; i++) {
+
ITreeIndex treeIndex = createTreeIndex(typeTraits, cmpFactories, bloomFilterKeyFields, null, null, null,
null);
treeIndex.create();
diff --git a/hyracks/hyracks-test-support/src/main/java/org/apache/hyracks/storage/am/btree/OrderedIndexTestUtils.java b/hyracks/hyracks-test-support/src/main/java/org/apache/hyracks/storage/am/btree/OrderedIndexTestUtils.java
index e1b71d2..40b9f05 100644
--- a/hyracks/hyracks-test-support/src/main/java/org/apache/hyracks/storage/am/btree/OrderedIndexTestUtils.java
+++ b/hyracks/hyracks-test-support/src/main/java/org/apache/hyracks/storage/am/btree/OrderedIndexTestUtils.java
@@ -63,8 +63,8 @@
DataInput dataIn = new DataInputStream(inStream);
Object actualObj = fieldSerdes[i].deserialize(dataIn);
if (!actualObj.equals(expected.getField(i))) {
- fail("Actual and expected fields do not match on field " + i + ".\nExpected: " + expected.getField(i)
- + "\nActual : " + actualObj);
+ fail("Actual and expected fields do not match on field " + i + ".\nExpected: " + expected.getField(i)
+ + "\nActual : " + actualObj);
}
}
}
diff --git a/hyracks/hyracks-test-support/src/main/java/org/apache/hyracks/storage/am/common/TreeIndexTestUtils.java b/hyracks/hyracks-test-support/src/main/java/org/apache/hyracks/storage/am/common/TreeIndexTestUtils.java
index bf03aea..464291a 100644
--- a/hyracks/hyracks-test-support/src/main/java/org/apache/hyracks/storage/am/common/TreeIndexTestUtils.java
+++ b/hyracks/hyracks-test-support/src/main/java/org/apache/hyracks/storage/am/common/TreeIndexTestUtils.java
@@ -138,7 +138,12 @@
+ ctx.getCheckTuples().size() + "\nActual : " + actualCount);
}
} finally {
- diskOrderCursor.close();
+ try {
+ diskOrderCursor.close();
+ }
+ catch(Exception ex){
+ LOGGER.log(Level.WARNING,"Error during scan cursor close",ex);
+ }
}
} catch (UnsupportedOperationException e) {
// Ignore exception because some indexes, e.g. the LSMTrees, don't
@@ -251,9 +256,9 @@
int c = 1;
for (CheckTuple checkTuple : checkTuples) {
if (LOGGER.isLoggable(Level.INFO)) {
- if (c % (numTuples / 10) == 0) {
+ //if (c % (numTuples / 10) == 0) {
LOGGER.info("Bulk Loading Tuple " + c + "/" + numTuples);
- }
+ //}
}
createTupleFromCheckTuple(checkTuple, tupleBuilder, tuple, ctx.getFieldSerdes());
bulkLoader.add(tuple);
diff --git a/hyracks/hyracks-tests/hyracks-storage-am-btree-test/src/test/java/org/apache/hyracks/storage/am/btree/BTreeSearchCursorTest.java b/hyracks/hyracks-tests/hyracks-storage-am-btree-test/src/test/java/org/apache/hyracks/storage/am/btree/BTreeSearchCursorTest.java
index 6e009e8..15d63f6 100644
--- a/hyracks/hyracks-tests/hyracks-storage-am-btree-test/src/test/java/org/apache/hyracks/storage/am/btree/BTreeSearchCursorTest.java
+++ b/hyracks/hyracks-tests/hyracks-storage-am-btree-test/src/test/java/org/apache/hyracks/storage/am/btree/BTreeSearchCursorTest.java
@@ -28,6 +28,8 @@
import java.util.TreeSet;
import java.util.logging.Level;
+import org.apache.hyracks.storage.am.common.api.*;
+import org.apache.hyracks.storage.am.common.freepage.LinkedMetaDataPageManager;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -53,13 +55,8 @@
import org.apache.hyracks.storage.am.btree.impls.RangePredicate;
import org.apache.hyracks.storage.am.btree.util.AbstractBTreeTest;
import org.apache.hyracks.storage.am.common.TestOperationCallback;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexAccessor;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexCursor;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrameFactory;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
import org.apache.hyracks.storage.am.common.frames.LIFOMetaDataFrameFactory;
-import org.apache.hyracks.storage.am.common.freepage.LinkedListFreePageManager;
import org.apache.hyracks.storage.am.common.ophelpers.MultiComparator;
import org.apache.hyracks.storage.am.common.tuples.TypeAwareTupleWriterFactory;
import org.apache.hyracks.storage.common.buffercache.IBufferCache;
@@ -97,7 +94,7 @@
IBTreeLeafFrame leafFrame = (IBTreeLeafFrame) leafFrameFactory.createFrame();
IBTreeInteriorFrame interiorFrame = (IBTreeInteriorFrame) interiorFrameFactory.createFrame();
- IFreePageManager freePageManager = new LinkedListFreePageManager(bufferCache, 0, metaFrameFactory);
+ IMetaDataPageManager freePageManager = new LinkedMetaDataPageManager(bufferCache, metaFrameFactory);
BTree btree = new BTree(bufferCache, harness.getFileMapProvider(), freePageManager, interiorFrameFactory,
leafFrameFactory, cmpFactories, fieldCount, harness.getFileReference());
@@ -174,7 +171,7 @@
IBTreeLeafFrame leafFrame = (IBTreeLeafFrame) leafFrameFactory.createFrame();
IBTreeInteriorFrame interiorFrame = (IBTreeInteriorFrame) interiorFrameFactory.createFrame();
- IFreePageManager freePageManager = new LinkedListFreePageManager(bufferCache, 0, metaFrameFactory);
+ IMetaDataPageManager freePageManager = new LinkedMetaDataPageManager(bufferCache, metaFrameFactory);
BTree btree = new BTree(bufferCache, harness.getFileMapProvider(), freePageManager, interiorFrameFactory,
leafFrameFactory, cmpFactories, fieldCount, harness.getFileReference());
@@ -248,7 +245,7 @@
IBTreeLeafFrame leafFrame = (IBTreeLeafFrame) leafFrameFactory.createFrame();
IBTreeInteriorFrame interiorFrame = (IBTreeInteriorFrame) interiorFrameFactory.createFrame();
- IFreePageManager freePageManager = new LinkedListFreePageManager(bufferCache, 0, metaFrameFactory);
+ IMetaDataPageManager freePageManager = new LinkedMetaDataPageManager(bufferCache, metaFrameFactory);
BTree btree = new BTree(bufferCache, harness.getFileMapProvider(), freePageManager, interiorFrameFactory,
leafFrameFactory, cmpFactories, fieldCount, harness.getFileReference());
diff --git a/hyracks/hyracks-tests/hyracks-storage-am-btree-test/src/test/java/org/apache/hyracks/storage/am/btree/BTreeStatsTest.java b/hyracks/hyracks-tests/hyracks-storage-am-btree-test/src/test/java/org/apache/hyracks/storage/am/btree/BTreeStatsTest.java
index 54505dd..4049185 100644
--- a/hyracks/hyracks-tests/hyracks-storage-am-btree-test/src/test/java/org/apache/hyracks/storage/am/btree/BTreeStatsTest.java
+++ b/hyracks/hyracks-tests/hyracks-storage-am-btree-test/src/test/java/org/apache/hyracks/storage/am/btree/BTreeStatsTest.java
@@ -22,6 +22,7 @@
import java.util.Random;
import java.util.logging.Level;
+import org.apache.hyracks.storage.am.common.api.*;
import org.junit.Test;
import org.apache.hyracks.api.comm.IFrame;
@@ -46,14 +47,9 @@
import org.apache.hyracks.storage.am.btree.impls.BTree;
import org.apache.hyracks.storage.am.btree.util.AbstractBTreeTest;
import org.apache.hyracks.storage.am.common.TestOperationCallback;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexAccessor;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrame;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrameFactory;
-import org.apache.hyracks.storage.am.common.api.TreeIndexException;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
import org.apache.hyracks.storage.am.common.frames.LIFOMetaDataFrameFactory;
-import org.apache.hyracks.storage.am.common.freepage.LinkedListFreePageManager;
+import org.apache.hyracks.storage.am.common.freepage.LinkedMetaDataPageManager;
import org.apache.hyracks.storage.am.common.tuples.TypeAwareTupleWriterFactory;
import org.apache.hyracks.storage.am.common.util.TreeIndexBufferCacheWarmup;
import org.apache.hyracks.storage.am.common.util.TreeIndexStats;
@@ -99,7 +95,7 @@
IBTreeInteriorFrame interiorFrame = (IBTreeInteriorFrame) interiorFrameFactory.createFrame();
ITreeIndexMetaDataFrame metaFrame = metaFrameFactory.createFrame();
- IFreePageManager freePageManager = new LinkedListFreePageManager(bufferCache, 0, metaFrameFactory);
+ IMetaDataPageManager freePageManager = new LinkedMetaDataPageManager(bufferCache, metaFrameFactory);
BTree btree = new BTree(bufferCache, fmp, freePageManager, interiorFrameFactory, leafFrameFactory,
cmpFactories, fieldCount, harness.getFileReference());
diff --git a/hyracks/hyracks-tests/hyracks-storage-am-btree-test/src/test/java/org/apache/hyracks/storage/am/btree/BTreeUpdateSearchTest.java b/hyracks/hyracks-tests/hyracks-storage-am-btree-test/src/test/java/org/apache/hyracks/storage/am/btree/BTreeUpdateSearchTest.java
index 4915751..40fc0ef 100644
--- a/hyracks/hyracks-tests/hyracks-storage-am-btree-test/src/test/java/org/apache/hyracks/storage/am/btree/BTreeUpdateSearchTest.java
+++ b/hyracks/hyracks-tests/hyracks-storage-am-btree-test/src/test/java/org/apache/hyracks/storage/am/btree/BTreeUpdateSearchTest.java
@@ -21,6 +21,8 @@
import java.util.Random;
import java.util.logging.Level;
+import org.apache.hyracks.storage.am.common.api.*;
+import org.apache.hyracks.storage.am.common.freepage.LinkedMetaDataPageManager;
import org.junit.Test;
import org.apache.hyracks.api.dataflow.value.IBinaryComparatorFactory;
@@ -41,14 +43,8 @@
import org.apache.hyracks.storage.am.btree.impls.RangePredicate;
import org.apache.hyracks.storage.am.btree.util.AbstractBTreeTest;
import org.apache.hyracks.storage.am.common.TestOperationCallback;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexAccessor;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexCursor;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrameFactory;
-import org.apache.hyracks.storage.am.common.api.TreeIndexException;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
import org.apache.hyracks.storage.am.common.frames.LIFOMetaDataFrameFactory;
-import org.apache.hyracks.storage.am.common.freepage.LinkedListFreePageManager;
import org.apache.hyracks.storage.am.common.tuples.TypeAwareTupleWriterFactory;
import org.apache.hyracks.storage.common.buffercache.IBufferCache;
@@ -81,7 +77,7 @@
IBTreeLeafFrame leafFrame = (IBTreeLeafFrame) leafFrameFactory.createFrame();
- IFreePageManager freePageManager = new LinkedListFreePageManager(bufferCache, 0, metaFrameFactory);
+ IMetaDataPageManager freePageManager = new LinkedMetaDataPageManager(bufferCache, metaFrameFactory);
BTree btree = new BTree(bufferCache, harness.getFileMapProvider(), freePageManager, interiorFrameFactory,
leafFrameFactory, cmpFactories, fieldCount, harness.getFileReference());
btree.create();
diff --git a/hyracks/hyracks-tests/hyracks-storage-am-lsm-btree-test/src/test/java/org/apache/hyracks/storage/am/lsm/btree/LSMBTreeSearchOperationCallbackTest.java b/hyracks/hyracks-tests/hyracks-storage-am-lsm-btree-test/src/test/java/org/apache/hyracks/storage/am/lsm/btree/LSMBTreeSearchOperationCallbackTest.java
index 00d637a..6712d1c 100644
--- a/hyracks/hyracks-tests/hyracks-storage-am-lsm-btree-test/src/test/java/org/apache/hyracks/storage/am/lsm/btree/LSMBTreeSearchOperationCallbackTest.java
+++ b/hyracks/hyracks-tests/hyracks-storage-am-lsm-btree-test/src/test/java/org/apache/hyracks/storage/am/lsm/btree/LSMBTreeSearchOperationCallbackTest.java
@@ -279,7 +279,7 @@
throw new IllegalArgumentException("Invalid range: [" + begin + ", " + end + "]");
}
- IIndexBulkLoader bulkloader = index.createBulkLoader(1.0f, false, end - begin, true);
+ IIndexBulkLoader bulkloader = index.createBulkLoader(1.0f, false, end - begin, true, true);
for (int i = begin; i <= end; i++) {
TupleUtils.createIntegerTuple(builder, tuple, i);
bulkloader.add(tuple);
diff --git a/hyracks/hyracks-tests/hyracks-storage-am-lsm-btree-test/src/test/java/org/apache/hyracks/storage/am/lsm/btree/perf/InMemoryBTreeRunner.java b/hyracks/hyracks-tests/hyracks-storage-am-lsm-btree-test/src/test/java/org/apache/hyracks/storage/am/lsm/btree/perf/InMemoryBTreeRunner.java
index 62bc7d0..bcb11d3 100644
--- a/hyracks/hyracks-tests/hyracks-storage-am-lsm-btree-test/src/test/java/org/apache/hyracks/storage/am/lsm/btree/perf/InMemoryBTreeRunner.java
+++ b/hyracks/hyracks-tests/hyracks-storage-am-lsm-btree-test/src/test/java/org/apache/hyracks/storage/am/lsm/btree/perf/InMemoryBTreeRunner.java
@@ -31,7 +31,7 @@
import org.apache.hyracks.storage.am.btree.frames.BTreeNSMInteriorFrameFactory;
import org.apache.hyracks.storage.am.btree.frames.BTreeNSMLeafFrameFactory;
import org.apache.hyracks.storage.am.btree.impls.BTree;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
import org.apache.hyracks.storage.am.common.api.ITreeIndexAccessor;
import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
import org.apache.hyracks.storage.am.common.api.TreeIndexException;
@@ -39,7 +39,7 @@
import org.apache.hyracks.storage.am.common.datagen.TupleBatch;
import org.apache.hyracks.storage.am.common.impls.NoOpOperationCallback;
import org.apache.hyracks.storage.am.common.tuples.TypeAwareTupleWriterFactory;
-import org.apache.hyracks.storage.am.lsm.common.freepage.VirtualFreePageManager;
+import org.apache.hyracks.storage.am.lsm.common.freepage.VirtualMetaDataPageManager;
import org.apache.hyracks.storage.am.lsm.common.impls.VirtualBufferCache;
import org.apache.hyracks.storage.common.buffercache.HeapBufferAllocator;
import org.apache.hyracks.storage.common.buffercache.IBufferCache;
@@ -71,7 +71,7 @@
TypeAwareTupleWriterFactory tupleWriterFactory = new TypeAwareTupleWriterFactory(typeTraits);
ITreeIndexFrameFactory leafFrameFactory = new BTreeNSMLeafFrameFactory(tupleWriterFactory);
ITreeIndexFrameFactory interiorFrameFactory = new BTreeNSMInteriorFrameFactory(tupleWriterFactory);
- IFreePageManager freePageManager = new VirtualFreePageManager(bufferCache.getNumPages());
+ IMetaDataPageManager freePageManager = new VirtualMetaDataPageManager(bufferCache.getNumPages());
btree = new BTree(bufferCache, new TransientFileMapManager(), freePageManager, interiorFrameFactory,
leafFrameFactory, cmpFactories, typeTraits.length, file);
}
diff --git a/hyracks/hyracks-tests/hyracks-storage-am-lsm-common-test/src/test/java/org/apache/hyracks/storage/am/lsm/common/VirtualFreePageManagerTest.java b/hyracks/hyracks-tests/hyracks-storage-am-lsm-common-test/src/test/java/org/apache/hyracks/storage/am/lsm/common/VirtualFreePageManagerTest.java
index 634589d..86f75d4 100644
--- a/hyracks/hyracks-tests/hyracks-storage-am-lsm-common-test/src/test/java/org/apache/hyracks/storage/am/lsm/common/VirtualFreePageManagerTest.java
+++ b/hyracks/hyracks-tests/hyracks-storage-am-lsm-common-test/src/test/java/org/apache/hyracks/storage/am/lsm/common/VirtualFreePageManagerTest.java
@@ -21,16 +21,16 @@
import static org.junit.Assert.assertEquals;
+import org.apache.hyracks.storage.am.lsm.common.freepage.VirtualMetaDataPageManager;
import org.junit.Test;
import org.apache.hyracks.api.exceptions.HyracksDataException;
-import org.apache.hyracks.storage.am.lsm.common.freepage.VirtualFreePageManager;
public class VirtualFreePageManagerTest {
private final int NUM_PAGES = 100;
- private void testInMemoryFreePageManager(VirtualFreePageManager virtualFreePageManager) throws HyracksDataException {
+ private void testInMemoryFreePageManager(VirtualMetaDataPageManager virtualFreePageManager) throws HyracksDataException {
// The first two pages are reserved for the BTree's metadata page and
// root page.
// The "actual" capacity is therefore numPages - 2.
@@ -53,7 +53,7 @@
@Test
public void test01() throws HyracksDataException {
- VirtualFreePageManager virtualFreePageManager = new VirtualFreePageManager(NUM_PAGES);
+ VirtualMetaDataPageManager virtualFreePageManager = new VirtualMetaDataPageManager(NUM_PAGES);
testInMemoryFreePageManager(virtualFreePageManager);
// We expect exactly the same behavior after a reset().
virtualFreePageManager.reset();
diff --git a/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/common/AbstractInvertedIndexDeleteTest.java b/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/common/AbstractInvertedIndexDeleteTest.java
index 4dbfe6a..e59f85c 100644
--- a/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/common/AbstractInvertedIndexDeleteTest.java
+++ b/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/common/AbstractInvertedIndexDeleteTest.java
@@ -45,15 +45,22 @@
protected void runTest(LSMInvertedIndexTestContext testCtx, TupleGenerator tupleGen) throws IOException,
IndexException {
IIndex invIndex = testCtx.getIndex();
- invIndex.create();
- invIndex.activate();
+
+ if ((invIndexType != InvertedIndexType.LSM) && (invIndexType != InvertedIndexType.PARTITIONED_LSM) || !bulkLoad) {
+ invIndex.create();
+ invIndex.activate();
+ }
for (int i = 0; i < numInsertRounds; i++) {
// Start generating documents ids from 0 again.
tupleGen.reset();
-
if (bulkLoad) {
- LSMInvertedIndexTestUtils.bulkLoadInvIndex(testCtx, tupleGen, NUM_DOCS_TO_INSERT);
+ if ((invIndexType != InvertedIndexType.LSM) && (invIndexType != InvertedIndexType.PARTITIONED_LSM)) {
+ LSMInvertedIndexTestUtils.bulkLoadInvIndex(testCtx, tupleGen, NUM_DOCS_TO_INSERT, false);
+ } else {
+
+ LSMInvertedIndexTestUtils.bulkLoadInvIndex(testCtx, tupleGen, NUM_DOCS_TO_INSERT, true);
+ }
} else {
LSMInvertedIndexTestUtils.insertIntoInvIndex(testCtx, tupleGen, NUM_DOCS_TO_INSERT);
}
@@ -74,7 +81,8 @@
@Test
public void wordTokensInvIndexTest() throws IOException, IndexException {
- LSMInvertedIndexTestContext testCtx = LSMInvertedIndexTestUtils.createWordInvIndexTestContext(harness, invIndexType);
+ LSMInvertedIndexTestContext testCtx = LSMInvertedIndexTestUtils.createWordInvIndexTestContext(harness,
+ invIndexType);
TupleGenerator tupleGen = LSMInvertedIndexTestUtils.createStringDocumentTupleGen(harness.getRandom());
runTest(testCtx, tupleGen);
}
@@ -89,7 +97,8 @@
@Test
public void ngramTokensInvIndexTest() throws IOException, IndexException {
- LSMInvertedIndexTestContext testCtx = LSMInvertedIndexTestUtils.createNGramInvIndexTestContext(harness, invIndexType);
+ LSMInvertedIndexTestContext testCtx = LSMInvertedIndexTestUtils.createNGramInvIndexTestContext(harness,
+ invIndexType);
TupleGenerator tupleGen = LSMInvertedIndexTestUtils.createPersonNamesTupleGen(harness.getRandom());
runTest(testCtx, tupleGen);
}
diff --git a/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/common/AbstractInvertedIndexLoadTest.java b/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/common/AbstractInvertedIndexLoadTest.java
index 4467394..78b6d0e 100644
--- a/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/common/AbstractInvertedIndexLoadTest.java
+++ b/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/common/AbstractInvertedIndexLoadTest.java
@@ -42,11 +42,12 @@
protected void runTest(LSMInvertedIndexTestContext testCtx, TupleGenerator tupleGen) throws IOException,
IndexException {
IIndex invIndex = testCtx.getIndex();
- invIndex.create();
- invIndex.activate();
-
+ if(invIndexType != InvertedIndexType.PARTITIONED_ONDISK && invIndexType != InvertedIndexType.ONDISK) {
+ invIndex.create();
+ invIndex.activate();
+ }
if (bulkLoad) {
- LSMInvertedIndexTestUtils.bulkLoadInvIndex(testCtx, tupleGen, NUM_DOCS_TO_INSERT);
+ LSMInvertedIndexTestUtils.bulkLoadInvIndex(testCtx, tupleGen, NUM_DOCS_TO_INSERT, true);
} else {
LSMInvertedIndexTestUtils.insertIntoInvIndex(testCtx, tupleGen, NUM_DOCS_TO_INSERT);
}
diff --git a/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/common/AbstractInvertedIndexSearchTest.java b/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/common/AbstractInvertedIndexSearchTest.java
index 2c239c5..3a188fe 100644
--- a/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/common/AbstractInvertedIndexSearchTest.java
+++ b/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/common/AbstractInvertedIndexSearchTest.java
@@ -54,11 +54,16 @@
protected void runTest(LSMInvertedIndexTestContext testCtx, TupleGenerator tupleGen,
List<IInvertedIndexSearchModifier> searchModifiers) throws IOException, IndexException {
IIndex invIndex = testCtx.getIndex();
- invIndex.create();
- invIndex.activate();
-
+ if ((invIndexType != InvertedIndexType.LSM) && (invIndexType != InvertedIndexType.PARTITIONED_LSM) || !bulkLoad) {
+ invIndex.create();
+ invIndex.activate();
+ }
if (bulkLoad) {
- LSMInvertedIndexTestUtils.bulkLoadInvIndex(testCtx, tupleGen, NUM_DOCS_TO_INSERT);
+ if ((invIndexType != InvertedIndexType.LSM) && (invIndexType != InvertedIndexType.PARTITIONED_LSM)) {
+ LSMInvertedIndexTestUtils.bulkLoadInvIndex(testCtx, tupleGen, NUM_DOCS_TO_INSERT, false);
+ } else {
+ LSMInvertedIndexTestUtils.bulkLoadInvIndex(testCtx, tupleGen, NUM_DOCS_TO_INSERT, true);
+ }
} else {
LSMInvertedIndexTestUtils.insertIntoInvIndex(testCtx, tupleGen, NUM_DOCS_TO_INSERT);
}
diff --git a/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/util/LSMInvertedIndexTestContext.java b/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/util/LSMInvertedIndexTestContext.java
index 7a9db2f..061e231 100644
--- a/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/util/LSMInvertedIndexTestContext.java
+++ b/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/util/LSMInvertedIndexTestContext.java
@@ -40,7 +40,7 @@
import org.apache.hyracks.storage.am.common.CheckTuple;
import org.apache.hyracks.storage.am.common.api.IIndex;
import org.apache.hyracks.storage.am.common.api.IndexException;
-import org.apache.hyracks.storage.am.lsm.common.freepage.VirtualFreePageManager;
+import org.apache.hyracks.storage.am.lsm.common.freepage.VirtualMetaDataPageManager;
import org.apache.hyracks.storage.am.lsm.invertedindex.api.IInvertedIndex;
import org.apache.hyracks.storage.am.lsm.invertedindex.common.LSMInvertedIndexTestHarness;
import org.apache.hyracks.storage.am.lsm.invertedindex.exceptions.InvertedIndexException;
@@ -129,14 +129,14 @@
switch (invIndexType) {
case INMEMORY: {
invIndex = InvertedIndexUtils.createInMemoryBTreeInvertedindex(harness.getVirtualBufferCaches().get(0),
- new VirtualFreePageManager(harness.getVirtualBufferCaches().get(0).getNumPages()),
+ new VirtualMetaDataPageManager(harness.getVirtualBufferCaches().get(0).getNumPages()),
invListTypeTraits, invListCmpFactories, tokenTypeTraits, tokenCmpFactories, tokenizerFactory,
new FileReference(new File(harness.getOnDiskDir())));
break;
}
case PARTITIONED_INMEMORY: {
invIndex = InvertedIndexUtils.createPartitionedInMemoryBTreeInvertedindex(harness
- .getVirtualBufferCaches().get(0), new VirtualFreePageManager(harness.getVirtualBufferCaches()
+ .getVirtualBufferCaches().get(0), new VirtualMetaDataPageManager(harness.getVirtualBufferCaches()
.get(0).getNumPages()), invListTypeTraits, invListCmpFactories, tokenTypeTraits,
tokenCmpFactories, tokenizerFactory, new FileReference(new File(harness.getOnDiskDir())));
break;
diff --git a/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/util/LSMInvertedIndexTestUtils.java b/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/util/LSMInvertedIndexTestUtils.java
index fd94870..cc0400a 100644
--- a/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/util/LSMInvertedIndexTestUtils.java
+++ b/hyracks/hyracks-tests/hyracks-storage-am-lsm-invertedindex-test/src/test/java/org/apache/hyracks/storage/am/lsm/invertedindex/util/LSMInvertedIndexTestUtils.java
@@ -199,7 +199,7 @@
return testCtx;
}
- public static void bulkLoadInvIndex(LSMInvertedIndexTestContext testCtx, TupleGenerator tupleGen, int numDocs)
+ public static void bulkLoadInvIndex(LSMInvertedIndexTestContext testCtx, TupleGenerator tupleGen, int numDocs, boolean appendOnly)
throws IndexException, IOException {
SortedSet<CheckTuple> tmpMemIndex = new TreeSet<CheckTuple>();
// First generate the expected index by inserting the documents one-by-one.
@@ -210,7 +210,7 @@
ISerializerDeserializer[] fieldSerdes = testCtx.getFieldSerdes();
// Use the expected index to bulk-load the actual index.
- IIndexBulkLoader bulkLoader = testCtx.getIndex().createBulkLoader(1.0f, false, numDocs, true);
+ IIndexBulkLoader bulkLoader = testCtx.getIndex().createBulkLoader(1.0f, false, numDocs, true, appendOnly);
ArrayTupleBuilder tupleBuilder = new ArrayTupleBuilder(testCtx.getFieldSerdes().length);
ArrayTupleReference tuple = new ArrayTupleReference();
Iterator<CheckTuple> checkTupleIter = tmpMemIndex.iterator();
diff --git a/hyracks/hyracks-tests/hyracks-storage-am-rtree-test/src/test/java/org/apache/hyracks/storage/am/rtree/RTreeSearchCursorTest.java b/hyracks/hyracks-tests/hyracks-storage-am-rtree-test/src/test/java/org/apache/hyracks/storage/am/rtree/RTreeSearchCursorTest.java
index c444d68..b613724 100644
--- a/hyracks/hyracks-tests/hyracks-storage-am-rtree-test/src/test/java/org/apache/hyracks/storage/am/rtree/RTreeSearchCursorTest.java
+++ b/hyracks/hyracks-tests/hyracks-storage-am-rtree-test/src/test/java/org/apache/hyracks/storage/am/rtree/RTreeSearchCursorTest.java
@@ -23,6 +23,8 @@
import java.util.Random;
import java.util.logging.Level;
+import org.apache.hyracks.storage.am.common.api.*;
+import org.apache.hyracks.storage.am.common.freepage.LinkedMetaDataPageManager;
import org.junit.Before;
import org.junit.Test;
@@ -36,15 +38,8 @@
import org.apache.hyracks.dataflow.common.comm.io.ArrayTupleReference;
import org.apache.hyracks.dataflow.common.data.marshalling.IntegerSerializerDeserializer;
import org.apache.hyracks.dataflow.common.util.TupleUtils;
-import org.apache.hyracks.storage.am.common.api.IFreePageManager;
-import org.apache.hyracks.storage.am.common.api.IPrimitiveValueProviderFactory;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexAccessor;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexCursor;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexFrameFactory;
-import org.apache.hyracks.storage.am.common.api.ITreeIndexMetaDataFrameFactory;
-import org.apache.hyracks.storage.am.common.api.TreeIndexException;
+import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
import org.apache.hyracks.storage.am.common.frames.LIFOMetaDataFrameFactory;
-import org.apache.hyracks.storage.am.common.freepage.LinkedListFreePageManager;
import org.apache.hyracks.storage.am.common.impls.NoOpOperationCallback;
import org.apache.hyracks.storage.am.common.ophelpers.MultiComparator;
import org.apache.hyracks.storage.am.common.util.HashMultiSet;
@@ -119,7 +114,7 @@
IRTreeInteriorFrame interiorFrame = (IRTreeInteriorFrame) interiorFrameFactory.createFrame();
IRTreeLeafFrame leafFrame = (IRTreeLeafFrame) leafFrameFactory.createFrame();
- IFreePageManager freePageManager = new LinkedListFreePageManager(bufferCache, 0, metaFrameFactory);
+ IMetaDataPageManager freePageManager = new LinkedMetaDataPageManager(bufferCache, metaFrameFactory);
RTree rtree = new RTree(bufferCache, harness.getFileMapProvider(), freePageManager, interiorFrameFactory,
leafFrameFactory, cmpFactories, fieldCount, harness.getFileReference());
diff --git a/hyracks/hyracks-tests/hyracks-storage-common-test/src/test/java/org/apache/hyracks/storage/common/BufferCacheTest.java b/hyracks/hyracks-tests/hyracks-storage-common-test/src/test/java/org/apache/hyracks/storage/common/BufferCacheTest.java
index 1f365df..2c584d5 100644
--- a/hyracks/hyracks-tests/hyracks-storage-common-test/src/test/java/org/apache/hyracks/storage/common/BufferCacheTest.java
+++ b/hyracks/hyracks-tests/hyracks-storage-common-test/src/test/java/org/apache/hyracks/storage/common/BufferCacheTest.java
@@ -319,102 +319,6 @@
bufferCache.close();
}
- @Test
- public void virtualPageTest() throws HyracksDataException {
- TestStorageManagerComponentHolder.init(PAGE_SIZE, NUM_PAGES, MAX_OPEN_FILES);
- IBufferCache bufferCache = TestStorageManagerComponentHolder.getBufferCache(ctx);
- IFileMapProvider fmp = TestStorageManagerComponentHolder.getFileMapProvider(ctx);
-
- List<Integer> fileIds = new ArrayList<Integer>();
- Map<Integer, ArrayList<Integer>> pageContents = new HashMap<Integer, ArrayList<Integer>>();
- ArrayList<Integer> memVals;
- int num = 10;
- int testPageId = 0;
- int lastRealPage = 0;
- String fileName = getFileName();
- FileReference file = new FileReference(new File(fileName));
- bufferCache.createFile(file);
- int memFileId = bufferCache.createMemFile();
- int fileId = fmp.lookupFileId(file);
- bufferCache.openFile(fileId);
- fileIds.add(fileId);
-
- // try and write a few somethings into an on-disk paged file
- ICachedPage page = null;
- for (; lastRealPage < 10; lastRealPage++) {
- page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, lastRealPage), true);
- page.acquireWriteLatch();
- try {
- ArrayList<Integer> values = new ArrayList<Integer>();
- for (int j = 0; j < num; j++) {
- int x = Math.abs(rnd.nextInt());
- page.getBuffer().putInt(j * 4, x);
- values.add(x);
- }
- pageContents.put(lastRealPage, values);
- } finally {
- page.releaseWriteLatch(true);
- bufferCache.unpin(page);
- }
- }
- //now try the same thing, but for a virtual page
- page = bufferCache.pinVirtual(BufferedFileHandle.getDiskPageId(memFileId, testPageId));
- page.acquireWriteLatch();
- try {
- ArrayList<Integer> values = new ArrayList<Integer>();
- for (int j = 0; j < num; j++) {
- int x = Math.abs(rnd.nextInt());
- page.getBuffer().putInt(j * 4, x);
- values.add(x);
- }
- memVals = values;
- } finally {
- page.releaseWriteLatch(true);
- //no unpin here.
- }
- //write some more stuff...
- for (; lastRealPage < 20; lastRealPage++) {
- page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, lastRealPage), true);
- page.acquireWriteLatch();
- try {
- ArrayList<Integer> values = new ArrayList<Integer>();
- for (int j = 0; j < num; j++) {
- int x = Math.abs(rnd.nextInt());
- page.getBuffer().putInt(j * 4, x);
- values.add(x);
- }
- pageContents.put(lastRealPage, values);
- } finally {
- page.releaseWriteLatch(true);
- bufferCache.unpin(page);
- }
- }
- //now try putting the virtual page after the other pages
- ICachedPage realPage = bufferCache.unpinVirtual(BufferedFileHandle.getDiskPageId(memFileId, testPageId),
- BufferedFileHandle.getDiskPageId(fileId, lastRealPage));
- bufferCache.unpin(realPage);
- pageContents.put(lastRealPage, memVals);
-
- bufferCache.closeFile(fileId);
-
- //now try reading it back!
- bufferCache.openFile(fileId);
- for (int i : pageContents.keySet()) {
- page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, i), false);
- page.acquireReadLatch();
- try {
- ArrayList<Integer> values = pageContents.get(i);
- for (int j = 0; j < values.size(); j++) {
- Assert.assertEquals(values.get(j).intValue(), page.getBuffer().getInt(j * 4));
- }
- } finally {
- page.releaseReadLatch();
- bufferCache.unpin(page);
- }
- }
- bufferCache.closeFile(fileId);
- }
-
@AfterClass
public static void cleanup() throws Exception {
for (String s : openedFiles) {
diff --git a/hyracks/pom.xml b/hyracks/pom.xml
index 61e06e4..7af92c7 100644
--- a/hyracks/pom.xml
+++ b/hyracks/pom.xml
@@ -103,7 +103,6 @@
<module>hyracks-client</module>
<module>hyracks-dataflow-common</module>
<module>hyracks-dataflow-std</module>
- <module>hyracks-dataflow-hadoop</module>
<module>hyracks-control</module>
<module>hyracks-net</module>
<module>hyracks-data</module>