Minor cleanup.
git-svn-id: https://hyracks.googlecode.com/svn/branches/hyracks_btree_updates_next@702 123451ca-8445-de46-9d55-352943316053
diff --git a/hyracks-storage-am-btree/src/main/java/edu/uci/ics/hyracks/storage/am/btree/compressors/FieldPrefixCompressor.java b/hyracks-storage-am-btree/src/main/java/edu/uci/ics/hyracks/storage/am/btree/compressors/FieldPrefixCompressor.java
index 3f7bc30..8e88c18 100644
--- a/hyracks-storage-am-btree/src/main/java/edu/uci/ics/hyracks/storage/am/btree/compressors/FieldPrefixCompressor.java
+++ b/hyracks-storage-am-btree/src/main/java/edu/uci/ics/hyracks/storage/am/btree/compressors/FieldPrefixCompressor.java
@@ -178,14 +178,14 @@
// number of fields we decided to use for compression of
// this keyPartition
- int numFieldsToCompress = keyPartitions.get(kpIndex).maxPmiIndex + 1;
+ int fieldCountToCompress = keyPartitions.get(kpIndex).maxPmiIndex + 1;
int segmentStart = keyPartitions.get(kpIndex).firstTupleIndex;
int tuplesInSegment = 1;
// System.out.println("PROCESSING KEYPARTITION: " + kpIndex
// + " RANGE: " + keyPartitions.get(kpIndex).firstRecSlotNum
// + " " + keyPartitions.get(kpIndex).lastRecSlotNum +
- // " FIELDSTOCOMPRESS: " + numFieldsToCompress);
+ // " FIELDSTOCOMPRESS: " + fieldCountToCompress);
FieldPrefixTupleReference prevTuple = new FieldPrefixTupleReference(tupleWriter
.createTupleReference());
@@ -198,10 +198,10 @@
prevTuple.resetByTupleIndex(frame, i - 1);
tuple.resetByTupleIndex(frame, i);
- // check if tuples match in numFieldsToCompress of their
+ // check if tuples match in fieldCountToCompress of their
// first fields
int prefixFieldsMatch = 0;
- for (int j = 0; j < numFieldsToCompress; j++) {
+ for (int j = 0; j < fieldCountToCompress; j++) {
if (cmps[j].compare(pageArray, prevTuple.getFieldStart(j), prevTuple.getFieldLength(j),
pageArray, tuple.getFieldStart(j), tuple.getFieldLength(j)) == 0)
prefixFieldsMatch++;
@@ -212,7 +212,7 @@
// the two tuples must match in exactly the number of
// fields we decided to compress for this keyPartition
int processSegments = 0;
- if (prefixFieldsMatch == numFieldsToCompress)
+ if (prefixFieldsMatch == fieldCountToCompress)
tuplesInSegment++;
else
processSegments++;
@@ -223,7 +223,7 @@
for (int r = 0; r < processSegments; r++) {
// compress current segment and then start new
// segment
- if (tuplesInSegment < occurrenceThreshold || numFieldsToCompress <= 0) {
+ if (tuplesInSegment < occurrenceThreshold || fieldCountToCompress <= 0) {
// segment does not have at least
// occurrenceThreshold tuples, so write tuples
// uncompressed
@@ -240,19 +240,19 @@
// extract prefix, write prefix tuple to buffer,
// and set prefix slot
newPrefixSlots[newPrefixSlots.length - 1 - prefixTupleIndex] = slotManager
- .encodeSlotFields(numFieldsToCompress, prefixFreeSpace);
+ .encodeSlotFields(fieldCountToCompress, prefixFreeSpace);
// int tmp = freeSpace;
// prevRec.reset();
// System.out.println("SOURCE CONTENTS: " +
// buf.getInt(prevRec.getFieldOff()) + " " +
// buf.getInt(prevRec.getFieldOff()+4));
- prefixFreeSpace += tupleWriter.writeTupleFields(prevTuple, 0, numFieldsToCompress,
+ prefixFreeSpace += tupleWriter.writeTupleFields(prevTuple, 0, fieldCountToCompress,
byteBuffer, prefixFreeSpace);
// System.out.println("WRITING PREFIX RECORD " +
// prefixSlotNum + " AT " + tmp + " " +
// freeSpace);
// System.out.print("CONTENTS: ");
- // for(int x = 0; x < numFieldsToCompress; x++)
+ // for(int x = 0; x < fieldCountToCompress; x++)
// System.out.print(buf.getInt(tmp + x*4) +
// " ");
// System.out.println();
@@ -264,8 +264,8 @@
tupleToWrite.resetByTupleIndex(frame, currTupleIndex);
newTupleSlots[tupleCount - 1 - currTupleIndex] = slotManager.encodeSlotFields(
prefixTupleIndex, tupleFreeSpace);
- tupleFreeSpace += tupleWriter.writeTupleFields(tupleToWrite, numFieldsToCompress,
- fieldCount - numFieldsToCompress, byteBuffer, tupleFreeSpace);
+ tupleFreeSpace += tupleWriter.writeTupleFields(tupleToWrite, fieldCountToCompress,
+ fieldCount - fieldCountToCompress, byteBuffer, tupleFreeSpace);
}
prefixTupleIndex++;
diff --git a/hyracks-storage-am-btree/src/main/java/edu/uci/ics/hyracks/storage/am/btree/frames/BTreeFieldPrefixNSMLeafFrame.java b/hyracks-storage-am-btree/src/main/java/edu/uci/ics/hyracks/storage/am/btree/frames/BTreeFieldPrefixNSMLeafFrame.java
index 0334a7f..56e0774 100644
--- a/hyracks-storage-am-btree/src/main/java/edu/uci/ics/hyracks/storage/am/btree/frames/BTreeFieldPrefixNSMLeafFrame.java
+++ b/hyracks-storage-am-btree/src/main/java/edu/uci/ics/hyracks/storage/am/btree/frames/BTreeFieldPrefixNSMLeafFrame.java
@@ -268,11 +268,11 @@
int newTupleBytes = 0;
int numPrefixFields = frameTuple.getNumPrefixFields();
- int numFields = frameTuple.getFieldCount();
+ int fieldCount = frameTuple.getFieldCount();
if (numPrefixFields != 0) {
// Check the space requirements for updating the suffix of the original tuple.
oldTupleBytes = frameTuple.getSuffixTupleSize();
- newTupleBytes = tupleWriter.bytesRequired(newTuple, numPrefixFields, numFields - numPrefixFields);
+ newTupleBytes = tupleWriter.bytesRequired(newTuple, numPrefixFields, fieldCount - numPrefixFields);
} else {
// The original tuple is uncompressed.
oldTupleBytes = frameTuple.getTupleSize();
@@ -308,18 +308,18 @@
int suffixTupleStartOff = slotManager.decodeSecondSlotField(tupleSlot);
frameTuple.resetByTupleIndex(this, tupleIndex);
- int numFields = frameTuple.getFieldCount();
+ int fieldCount = frameTuple.getFieldCount();
int numPrefixFields = frameTuple.getNumPrefixFields();
int oldTupleBytes = frameTuple.getSuffixTupleSize();
int bytesWritten = 0;
if (inPlace) {
// Overwrite the old tuple suffix in place.
- bytesWritten = tupleWriter.writeTupleFields(newTuple, numPrefixFields, numFields - numPrefixFields, buf, suffixTupleStartOff);
+ bytesWritten = tupleWriter.writeTupleFields(newTuple, numPrefixFields, fieldCount - numPrefixFields, buf, suffixTupleStartOff);
} else {
// Insert the new tuple suffix at the end of the free space, and change the slot value (effectively "deleting" the old tuple).
int newSuffixTupleStartOff = buf.getInt(freeSpaceOff);
- bytesWritten = tupleWriter.writeTupleFields(newTuple, numPrefixFields, numFields - numPrefixFields, buf, newSuffixTupleStartOff);
+ bytesWritten = tupleWriter.writeTupleFields(newTuple, numPrefixFields, fieldCount - numPrefixFields, buf, newSuffixTupleStartOff);
// Update slot value using the same prefix slot num.
slotManager.setSlot(tupleSlotOff, slotManager.encodeSlotFields(prefixSlotNum, newSuffixTupleStartOff));
// Update contiguous free space pointer.
diff --git a/hyracks-storage-am-btree/src/main/java/edu/uci/ics/hyracks/storage/am/btree/impls/BTree.java b/hyracks-storage-am-btree/src/main/java/edu/uci/ics/hyracks/storage/am/btree/impls/BTree.java
index 3952f5d..14c7208 100644
--- a/hyracks-storage-am-btree/src/main/java/edu/uci/ics/hyracks/storage/am/btree/impls/BTree.java
+++ b/hyracks-storage-am-btree/src/main/java/edu/uci/ics/hyracks/storage/am/btree/impls/BTree.java
@@ -925,7 +925,6 @@
BulkLoadContext ctx = new BulkLoadContext(fillFactor, leafFrame,
(IBTreeInteriorFrame)interiorFrameFactory.createFrame(), freePageManager.getMetaDataFrameFactory().createFrame(), cmp);
- ctx.nodeFrontiers.get(0).lastTuple.setFieldCount(fieldCount);
ctx.splitKey.getTuple().setFieldCount(cmp.getKeyFieldCount());
return ctx;
}
diff --git a/hyracks-tests/hyracks-storage-am-btree-test/src/test/java/edu/uci/ics/hyracks/storage/am/btree/util/BTreeTestUtils.java b/hyracks-tests/hyracks-storage-am-btree-test/src/test/java/edu/uci/ics/hyracks/storage/am/btree/util/BTreeTestUtils.java
index 8e7a43b..a86f50c 100644
--- a/hyracks-tests/hyracks-storage-am-btree-test/src/test/java/edu/uci/ics/hyracks/storage/am/btree/util/BTreeTestUtils.java
+++ b/hyracks-tests/hyracks-storage-am-btree-test/src/test/java/edu/uci/ics/hyracks/storage/am/btree/util/BTreeTestUtils.java
@@ -77,8 +77,8 @@
@SuppressWarnings("unchecked")
private static CheckTuple createCheckTupleFromTuple(ITupleReference tuple, ISerializerDeserializer[] fieldSerdes, int numKeys) throws HyracksDataException {
CheckTuple checkTuple = new CheckTuple(fieldSerdes.length, numKeys);
- int numFields = Math.min(fieldSerdes.length, tuple.getFieldCount());
- for (int i = 0; i < numFields; i++) {
+ int fieldCount = Math.min(fieldSerdes.length, tuple.getFieldCount());
+ for (int i = 0; i < fieldCount; i++) {
ByteArrayInputStream inStream = new ByteArrayInputStream(
tuple.getFieldData(i), tuple.getFieldStart(i),
tuple.getFieldLength(i));
@@ -267,7 +267,7 @@
}
public static void insertIntTuples(BTreeTestContext testCtx, int numTuples, Random rnd) throws Exception {
- int numFields = testCtx.getFieldCount();
+ int fieldCount = testCtx.getFieldCount();
int numKeyFields = testCtx.getKeyFieldCount();
testCtx.opCtx.reset(IndexOp.INSERT);
@@ -282,7 +282,7 @@
tupleValues[j] = rnd.nextInt() % maxValue;
}
// Set values.
- for (int j = numKeyFields; j < numFields; j++) {
+ for (int j = numKeyFields; j < fieldCount; j++) {
tupleValues[j] = j;
}
TupleUtils.createIntegerTuple(testCtx.tupleBuilder, testCtx.tuple, tupleValues);
@@ -292,7 +292,7 @@
try {
testCtx.btree.insert(testCtx.tuple, testCtx.opCtx);
// Set expected values. Do this only after insertion succeeds because we ignore duplicate keys.
- CheckTuple<Integer> checkTuple = new CheckTuple<Integer>(numFields, numKeyFields);
+ CheckTuple<Integer> checkTuple = new CheckTuple<Integer>(fieldCount, numKeyFields);
for(int v : tupleValues) {
checkTuple.add(v);
}
@@ -304,12 +304,12 @@
}
public static void insertStringTuples(BTreeTestContext testCtx, int numTuples, Random rnd) throws Exception {
- int numFields = testCtx.getFieldCount();
+ int fieldCount = testCtx.getFieldCount();
int numKeyFields = testCtx.getKeyFieldCount();
testCtx.opCtx.reset(IndexOp.INSERT);
- Object[] tupleValues = new Object[numFields];
+ Object[] tupleValues = new Object[fieldCount];
for (int i = 0; i < numTuples; i++) {
if ((i + 1) % (numTuples / Math.min(10, numTuples)) == 0) {
LOGGER.info("Inserting Tuple " + (i + 1) + "/" + numTuples);
@@ -320,14 +320,14 @@
tupleValues[j] = getRandomString(length, rnd);
}
// Set values.
- for (int j = numKeyFields; j < numFields; j++) {
+ for (int j = numKeyFields; j < fieldCount; j++) {
tupleValues[j] = getRandomString(5, rnd);
}
TupleUtils.createTuple(testCtx.tupleBuilder, testCtx.tuple, testCtx.fieldSerdes, tupleValues);
try {
testCtx.btree.insert(testCtx.tuple, testCtx.opCtx);
// Set expected values. Do this only after insertion succeeds because we ignore duplicate keys.
- CheckTuple<String> checkTuple = new CheckTuple<String>(numFields, numKeyFields);
+ CheckTuple<String> checkTuple = new CheckTuple<String>(fieldCount, numKeyFields);
for(Object v : tupleValues) {
checkTuple.add((String)v);
}
@@ -339,7 +339,7 @@
}
public static void bulkLoadIntTuples(BTreeTestContext testCtx, int numTuples, Random rnd) throws Exception {
- int numFields = testCtx.getFieldCount();
+ int fieldCount = testCtx.getFieldCount();
int numKeyFields = testCtx.getKeyFieldCount();
int[] tupleValues = new int[testCtx.getFieldCount()];
int maxValue = (int)Math.ceil(Math.pow(numTuples, 1.0/(double)numKeyFields));
@@ -349,12 +349,12 @@
tupleValues[j] = rnd.nextInt() % maxValue;
}
// Set values.
- for (int j = numKeyFields; j < numFields; j++) {
+ for (int j = numKeyFields; j < fieldCount; j++) {
tupleValues[j] = j;
}
// Set expected values. We also use these as the pre-sorted stream for bulk loading.
- CheckTuple<Integer> checkTuple = new CheckTuple<Integer>(numFields, numKeyFields);
+ CheckTuple<Integer> checkTuple = new CheckTuple<Integer>(fieldCount, numKeyFields);
for(int v : tupleValues) {
checkTuple.add(v);
}
@@ -365,9 +365,9 @@
}
public static void bulkLoadStringTuples(BTreeTestContext testCtx, int numTuples, Random rnd) throws Exception {
- int numFields = testCtx.getFieldCount();
+ int fieldCount = testCtx.getFieldCount();
int numKeyFields = testCtx.getKeyFieldCount();
- String[] tupleValues = new String[numFields];
+ String[] tupleValues = new String[fieldCount];
for (int i = 0; i < numTuples; i++) {
// Set keys.
for (int j = 0; j < numKeyFields; j++) {
@@ -375,11 +375,11 @@
tupleValues[j] = getRandomString(length, rnd);
}
// Set values.
- for (int j = numKeyFields; j < numFields; j++) {
+ for (int j = numKeyFields; j < fieldCount; j++) {
tupleValues[j] = getRandomString(5, rnd);
}
// Set expected values. We also use these as the pre-sorted stream for bulk loading.
- CheckTuple<String> checkTuple = new CheckTuple<String>(numFields, numKeyFields);
+ CheckTuple<String> checkTuple = new CheckTuple<String>(fieldCount, numKeyFields);
for(String v : tupleValues) {
checkTuple.add(v);
}
@@ -390,8 +390,8 @@
}
private static void bulkLoadCheckTuples(BTreeTestContext testCtx, int numTuples) throws HyracksDataException, TreeIndexException {
- int numFields = testCtx.getFieldCount();
- ArrayTupleBuilder tupleBuilder = new ArrayTupleBuilder(numFields);
+ int fieldCount = testCtx.getFieldCount();
+ ArrayTupleBuilder tupleBuilder = new ArrayTupleBuilder(fieldCount);
ArrayTupleReference tuple = new ArrayTupleReference();
// Perform bulk load.
IIndexBulkLoadContext bulkLoadCtx = testCtx.btree.beginBulkLoad(0.7f);