merge back the fix of the issue 336(buil test hang)
diff --git a/asterix-app/src/test/java/edu/uci/ics/asterix/test/runtime/ExecutionTest.java b/asterix-app/src/test/java/edu/uci/ics/asterix/test/runtime/ExecutionTest.java
index 0dbf6d4..299c43c 100644
--- a/asterix-app/src/test/java/edu/uci/ics/asterix/test/runtime/ExecutionTest.java
+++ b/asterix-app/src/test/java/edu/uci/ics/asterix/test/runtime/ExecutionTest.java
@@ -231,7 +231,7 @@
List<CompilationUnit> cUnits = tcCtx.getTestCase().getCompilationUnit();
for (CompilationUnit cUnit : cUnits) {
- LOGGER.info("[TEST]: " + tcCtx.getTestCase().getFilePath() + "/" + cUnit.getName());
+ LOGGER.severe("[TEST]: " + tcCtx.getTestCase().getFilePath() + "/" + cUnit.getName());
testFileCtxs = tcCtx.getTestFiles(cUnit);
expectedResultFileCtxs = tcCtx.getExpectedResultFiles(cUnit);
diff --git a/asterix-transactions/src/main/java/edu/uci/ics/asterix/transaction/management/service/locking/EntityInfoManager.java b/asterix-transactions/src/main/java/edu/uci/ics/asterix/transaction/management/service/locking/EntityInfoManager.java
index b8820c4..5d81e8a 100644
--- a/asterix-transactions/src/main/java/edu/uci/ics/asterix/transaction/management/service/locking/EntityInfoManager.java
+++ b/asterix-transactions/src/main/java/edu/uci/ics/asterix/transaction/management/service/locking/EntityInfoManager.java
@@ -164,6 +164,7 @@
allocChild = pArray.size() - 1;
}
}
+
occupiedSlots++;
return pArray.get(allocChild).allocate() + allocChild * ChildEntityInfoArrayManager.NUM_OF_SLOTS;
}
@@ -230,45 +231,35 @@
*/
private void shrink() {
int i;
- boolean bContiguous = true;
- int decreaseCount = 0;
+ int removeCount = 0;
int size = pArray.size();
int maxDecreaseCount = size / 2;
ChildEntityInfoArrayManager child;
- for (i = size - 1; i >= 0; i--) {
- child = pArray.get(i);
- if (child.isEmpty() || child.isDeinitialized()) {
- if (bContiguous) {
- pArray.remove(i);
- if (++decreaseCount == maxDecreaseCount) {
- break;
- }
- } else {
- bContiguous = false;
- if (child.isEmpty()) {
- child.deinitialize();
- if (++decreaseCount == maxDecreaseCount) {
- break;
- }
- }
- }
- } else {
- bContiguous = false;
+
+ //The first buffer never be deinitialized.
+ for (i = 1; i < size; i++) {
+ if (pArray.get(i).isEmpty()) {
+ pArray.get(i).deinitialize();
}
}
- //reset allocChild when the child is removed or deinitialized.
- size = pArray.size();
- if (allocChild >= size || pArray.get(allocChild).isDeinitialized()) {
- //set allocChild to any initialized one.
- //It is guaranteed that there is at least one initialized child.
- for (i = 0; i < size; i++) {
- if (!pArray.get(i).isDeinitialized()) {
- allocChild = i;
+ //remove the empty buffers from the end
+ for (i = size - 1; i >= 1; i--) {
+ child = pArray.get(i);
+ if (child.isDeinitialized()) {
+ pArray.remove(i);
+ if (++removeCount == maxDecreaseCount) {
break;
}
+ } else {
+ break;
}
}
+
+ //reset allocChild to the first buffer
+ allocChild = 0;
+
+ isShrinkTimerOn = false;
}
public String prettyPrint() {
@@ -538,7 +529,7 @@
freeSlotNum = getNextFreeSlot(currentSlot);
occupiedSlots++;
if (LockManager.IS_DEBUG_MODE) {
- System.out.println(Thread.currentThread().getName()+" entity allocate: "+currentSlot);
+ System.out.println(Thread.currentThread().getName() + " entity allocate: " + currentSlot);
}
return currentSlot;
}
@@ -548,7 +539,7 @@
freeSlotNum = slotNum;
occupiedSlots--;
if (LockManager.IS_DEBUG_MODE) {
- System.out.println(Thread.currentThread().getName()+" entity deallocate: "+slotNum);
+ System.out.println(Thread.currentThread().getName() + " entity deallocate: " + slotNum);
}
}
diff --git a/asterix-transactions/src/main/java/edu/uci/ics/asterix/transaction/management/service/locking/LockManager.java b/asterix-transactions/src/main/java/edu/uci/ics/asterix/transaction/management/service/locking/LockManager.java
index 4e980d4..a354d2a 100644
--- a/asterix-transactions/src/main/java/edu/uci/ics/asterix/transaction/management/service/locking/LockManager.java
+++ b/asterix-transactions/src/main/java/edu/uci/ics/asterix/transaction/management/service/locking/LockManager.java
@@ -124,73 +124,111 @@
JobInfo jobInfo;
byte datasetLockMode = entityHashValue == -1 ? lockMode : lockMode == LockMode.S ? LockMode.IS : LockMode.IX;
boolean doEscalate = false;
+ boolean caughtLockMgrLatchException = false;
latchLockTable();
- validateJob(txnContext);
+ try {
+ validateJob(txnContext);
- if (IS_DEBUG_MODE) {
- trackLockRequest("Requested", RequestType.LOCK, datasetId, entityHashValue, lockMode, txnContext,
- dLockInfo, eLockInfo);
- }
-
- dLockInfo = datasetResourceHT.get(datasetId);
- jobInfo = jobHT.get(jobId);
-
- if (ALLOW_ESCALATE_FROM_ENTITY_TO_DATASET) {
- if (!isInstant && datasetLockMode == LockMode.IS && jobInfo != null && dLockInfo != null) {
- int escalateStatus = needEscalateFromEntityToDataset(jobInfo, dId, lockMode);
- switch (escalateStatus) {
- case DO_ESCALATE:
- entityHashValue = -1;
- doEscalate = true;
- break;
-
- case ESCALATED:
- unlatchLockTable();
- return;
-
- default:
- break;
- }
- }
- }
-
- //#. if the datasetLockInfo doesn't exist in datasetResourceHT
- if (dLockInfo == null || dLockInfo.isNoHolder()) {
- if (dLockInfo == null) {
- dLockInfo = new DatasetLockInfo(entityLockInfoManager, entityInfoManager, lockWaiterManager);
- datasetResourceHT.put(new DatasetId(dId), dLockInfo); //datsetId obj should be created
- }
- entityInfo = entityInfoManager.allocate(jId, dId, entityHashValue, lockMode);
-
- //if dataset-granule lock
- if (entityHashValue == -1) { //-1 stands for dataset-granule
- entityInfoManager.increaseDatasetLockCount(entityInfo);
- dLockInfo.increaseLockCount(datasetLockMode);
- dLockInfo.addHolder(entityInfo);
- } else {
- entityInfoManager.increaseDatasetLockCount(entityInfo);
- dLockInfo.increaseLockCount(datasetLockMode);
- //add entityLockInfo
- eLockInfo = entityLockInfoManager.allocate();
- dLockInfo.getEntityResourceHT().put(entityHashValue, eLockInfo);
- entityInfoManager.increaseEntityLockCount(entityInfo);
- entityLockInfoManager.increaseLockCount(eLockInfo, lockMode);
- entityLockInfoManager.addHolder(eLockInfo, entityInfo);
+ if (IS_DEBUG_MODE) {
+ trackLockRequest("Requested", RequestType.LOCK, datasetId, entityHashValue, lockMode, txnContext,
+ dLockInfo, eLockInfo);
}
- if (jobInfo == null) {
- jobInfo = new JobInfo(entityInfoManager, lockWaiterManager, txnContext);
- jobHT.put(jobId, jobInfo); //jobId obj doesn't have to be created
- }
- jobInfo.addHoldingResource(entityInfo);
+ dLockInfo = datasetResourceHT.get(datasetId);
+ jobInfo = jobHT.get(jobId);
if (ALLOW_ESCALATE_FROM_ENTITY_TO_DATASET) {
- if (!isInstant && datasetLockMode == LockMode.IS) {
- jobInfo.increaseDatasetISLockCount(dId);
+ if (!isInstant && datasetLockMode == LockMode.IS && jobInfo != null && dLockInfo != null) {
+ int escalateStatus = needEscalateFromEntityToDataset(jobInfo, dId, lockMode);
+ switch (escalateStatus) {
+ case DO_ESCALATE:
+ entityHashValue = -1;
+ doEscalate = true;
+ break;
+
+ case ESCALATED:
+ return;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ //#. if the datasetLockInfo doesn't exist in datasetResourceHT
+ if (dLockInfo == null || dLockInfo.isNoHolder()) {
+ if (dLockInfo == null) {
+ dLockInfo = new DatasetLockInfo(entityLockInfoManager, entityInfoManager, lockWaiterManager);
+ datasetResourceHT.put(new DatasetId(dId), dLockInfo); //datsetId obj should be created
+ }
+ entityInfo = entityInfoManager.allocate(jId, dId, entityHashValue, lockMode);
+
+ //if dataset-granule lock
+ if (entityHashValue == -1) { //-1 stands for dataset-granule
+ entityInfoManager.increaseDatasetLockCount(entityInfo);
+ dLockInfo.increaseLockCount(datasetLockMode);
+ dLockInfo.addHolder(entityInfo);
+ } else {
+ entityInfoManager.increaseDatasetLockCount(entityInfo);
+ dLockInfo.increaseLockCount(datasetLockMode);
+ //add entityLockInfo
+ eLockInfo = entityLockInfoManager.allocate();
+ dLockInfo.getEntityResourceHT().put(entityHashValue, eLockInfo);
+ entityInfoManager.increaseEntityLockCount(entityInfo);
+ entityLockInfoManager.increaseLockCount(eLockInfo, lockMode);
+ entityLockInfoManager.addHolder(eLockInfo, entityInfo);
+ }
+
+ if (jobInfo == null) {
+ jobInfo = new JobInfo(entityInfoManager, lockWaiterManager, txnContext);
+ jobHT.put(jobId, jobInfo); //jobId obj doesn't have to be created
+ }
+ jobInfo.addHoldingResource(entityInfo);
+
+ if (ALLOW_ESCALATE_FROM_ENTITY_TO_DATASET) {
+ if (!isInstant && datasetLockMode == LockMode.IS) {
+ jobInfo.increaseDatasetISLockCount(dId);
+ if (doEscalate) {
+ throw new IllegalStateException(
+ "ESCALATE_TRHESHOLD_ENTITY_TO_DATASET should not be set to "
+ + ESCALATE_TRHESHOLD_ENTITY_TO_DATASET);
+ }
+ }
+ }
+
+ if (IS_DEBUG_MODE) {
+ trackLockRequest("Granted", RequestType.LOCK, datasetId, entityHashValue, lockMode, txnContext,
+ dLockInfo, eLockInfo);
+ }
+
+ return;
+ }
+
+ //#. the datasetLockInfo exists in datasetResourceHT.
+ //1. handle dataset-granule lock
+ entityInfo = lockDatasetGranule(datasetId, entityHashValue, lockMode, txnContext);
+
+ //2. handle entity-granule lock
+ if (entityHashValue != -1) {
+ lockEntityGranule(datasetId, entityHashValue, lockMode, entityInfo, txnContext);
+ }
+
+ if (ALLOW_ESCALATE_FROM_ENTITY_TO_DATASET) {
+ if (!isInstant) {
if (doEscalate) {
- throw new IllegalStateException("ESCALATE_TRHESHOLD_ENTITY_TO_DATASET should not be set to "
- + ESCALATE_TRHESHOLD_ENTITY_TO_DATASET);
+ //jobInfo must not be null.
+ assert jobInfo != null;
+ jobInfo.increaseDatasetISLockCount(dId);
+ //release pre-acquired locks
+ releaseDatasetISLocks(jobInfo, jobId, datasetId, txnContext);
+ } else if (datasetLockMode == LockMode.IS) {
+ if (jobInfo == null) {
+ jobInfo = jobHT.get(jobId);
+ //jobInfo must not be null;
+ assert jobInfo != null;
+ }
+ jobInfo.increaseDatasetISLockCount(dId);
}
}
}
@@ -199,44 +237,18 @@
trackLockRequest("Granted", RequestType.LOCK, datasetId, entityHashValue, lockMode, txnContext,
dLockInfo, eLockInfo);
}
-
- unlatchLockTable();
- return;
- }
-
- //#. the datasetLockInfo exists in datasetResourceHT.
- //1. handle dataset-granule lock
- entityInfo = lockDatasetGranule(datasetId, entityHashValue, lockMode, txnContext);
-
- //2. handle entity-granule lock
- if (entityHashValue != -1) {
- lockEntityGranule(datasetId, entityHashValue, lockMode, entityInfo, txnContext);
- }
-
- if (ALLOW_ESCALATE_FROM_ENTITY_TO_DATASET) {
- if (!isInstant) {
- if (doEscalate) {
- //jobInfo must not be null.
- assert jobInfo != null;
- jobInfo.increaseDatasetISLockCount(dId);
- //release pre-acquired locks
- releaseDatasetISLocks(jobInfo, jobId, datasetId, txnContext);
- } else if (datasetLockMode == LockMode.IS) {
- if (jobInfo == null) {
- jobInfo = jobHT.get(jobId);
- //jobInfo must not be null;
- assert jobInfo != null;
- }
- jobInfo.increaseDatasetISLockCount(dId);
- }
+ } catch (Exception e) {
+ if (e instanceof LockMgrLatchHandlerException) {
+ // don't unlatch
+ caughtLockMgrLatchException = true;
+ throw new ACIDException(((LockMgrLatchHandlerException) e).getInternalException());
+ }
+ } finally {
+ if (!caughtLockMgrLatchException) {
+ unlatchLockTable();
}
}
- if (IS_DEBUG_MODE) {
- trackLockRequest("Granted", RequestType.LOCK, datasetId, entityHashValue, lockMode, txnContext, dLockInfo,
- eLockInfo);
- }
- unlatchLockTable();
return;
}
@@ -285,14 +297,9 @@
private void validateJob(TransactionContext txnContext) throws ACIDException {
if (txnContext.getTxnState() == TransactionState.ABORTED) {
- unlatchLockTable();
throw new ACIDException("" + txnContext.getJobId() + " is in ABORTED state.");
} else if (txnContext.getStatus() == TransactionContext.TIMED_OUT_STATUS) {
- try {
- requestAbort(txnContext);
- } finally {
- unlatchLockTable();
- }
+ requestAbort(txnContext);
}
}
@@ -657,127 +664,124 @@
}
latchLockTable();
- validateJob(txnContext);
+ try {
+ validateJob(txnContext);
- if (IS_DEBUG_MODE) {
- trackLockRequest("Requested", RequestType.UNLOCK, datasetId, entityHashValue, (byte) 0, txnContext,
- dLockInfo, eLockInfo);
- }
+ if (IS_DEBUG_MODE) {
+ trackLockRequest("Requested", RequestType.UNLOCK, datasetId, entityHashValue, (byte) 0, txnContext,
+ dLockInfo, eLockInfo);
+ }
- //find the resource to be unlocked
- dLockInfo = datasetResourceHT.get(datasetId);
- jobInfo = jobHT.get(jobId);
- if (dLockInfo == null || jobInfo == null) {
- unlatchLockTable();
- throw new IllegalStateException("Invalid unlock request: Corresponding lock info doesn't exist.");
- }
+ //find the resource to be unlocked
+ dLockInfo = datasetResourceHT.get(datasetId);
+ jobInfo = jobHT.get(jobId);
+ if (dLockInfo == null || jobInfo == null) {
+ throw new IllegalStateException("Invalid unlock request: Corresponding lock info doesn't exist.");
+ }
- eLockInfo = dLockInfo.getEntityResourceHT().get(entityHashValue);
+ eLockInfo = dLockInfo.getEntityResourceHT().get(entityHashValue);
- if (eLockInfo == -1) {
- unlatchLockTable();
- throw new IllegalStateException("Invalid unlock request: Corresponding lock info doesn't exist.");
- }
+ if (eLockInfo == -1) {
+ throw new IllegalStateException("Invalid unlock request: Corresponding lock info doesn't exist.");
+ }
- //find the corresponding entityInfo
- entityInfo = entityLockInfoManager.findEntityInfoFromHolderList(eLockInfo, jobId.getId(), entityHashValue);
- if (entityInfo == -1) {
- unlatchLockTable();
- throw new IllegalStateException("Invalid unlock request[" + jobId.getId() + "," + datasetId.getId() + ","
- + entityHashValue + "]: Corresponding lock info doesn't exist.");
- }
+ //find the corresponding entityInfo
+ entityInfo = entityLockInfoManager.findEntityInfoFromHolderList(eLockInfo, jobId.getId(), entityHashValue);
+ if (entityInfo == -1) {
+ throw new IllegalStateException("Invalid unlock request[" + jobId.getId() + "," + datasetId.getId()
+ + "," + entityHashValue + "]: Corresponding lock info doesn't exist.");
+ }
- datasetLockMode = entityInfoManager.getDatasetLockMode(entityInfo) == LockMode.S ? LockMode.IS : LockMode.IX;
+ datasetLockMode = entityInfoManager.getDatasetLockMode(entityInfo) == LockMode.S ? LockMode.IS
+ : LockMode.IX;
- //decrease the corresponding count of dLockInfo/eLockInfo/entityInfo
- dLockInfo.decreaseLockCount(datasetLockMode);
- entityLockInfoManager.decreaseLockCount(eLockInfo, entityInfoManager.getEntityLockMode(entityInfo));
- entityInfoManager.decreaseDatasetLockCount(entityInfo);
- entityInfoManager.decreaseEntityLockCount(entityInfo);
+ //decrease the corresponding count of dLockInfo/eLockInfo/entityInfo
+ dLockInfo.decreaseLockCount(datasetLockMode);
+ entityLockInfoManager.decreaseLockCount(eLockInfo, entityInfoManager.getEntityLockMode(entityInfo));
+ entityInfoManager.decreaseDatasetLockCount(entityInfo);
+ entityInfoManager.decreaseEntityLockCount(entityInfo);
- if (entityInfoManager.getEntityLockCount(entityInfo) == 0
- && entityInfoManager.getDatasetLockCount(entityInfo) == 0) {
- int threadCount = 0; //number of threads(in the same job) waiting on the same resource
- int waiterObjId = jobInfo.getFirstWaitingResource();
- int waitingEntityInfo;
- LockWaiter waiterObj;
+ if (entityInfoManager.getEntityLockCount(entityInfo) == 0
+ && entityInfoManager.getDatasetLockCount(entityInfo) == 0) {
+ int threadCount = 0; //number of threads(in the same job) waiting on the same resource
+ int waiterObjId = jobInfo.getFirstWaitingResource();
+ int waitingEntityInfo;
+ LockWaiter waiterObj;
- //TODO
- //This code should be taken care properly when there is a way to avoid doubling memory space for txnIds.
- //This commit log is written here in order to avoid increasing the memory space for managing transactionIds
- if (commitFlag) {
- if (txnContext.getTransactionType().equals(TransactionContext.TransactionType.READ_WRITE)) {
- try {
- txnSubsystem.getLogManager().log(LogType.ENTITY_COMMIT, txnContext, datasetId.getId(),
- entityHashValue, -1, (byte) 0, 0, null, null, logicalLogLocator);
- } catch (ACIDException e) {
+ //TODO
+ //This code should be taken care properly when there is a way to avoid doubling memory space for txnIds.
+ //This commit log is written here in order to avoid increasing the memory space for managing transactionIds
+ if (commitFlag) {
+ if (txnContext.getTransactionType().equals(TransactionContext.TransactionType.READ_WRITE)) {
try {
+ txnSubsystem.getLogManager().log(LogType.ENTITY_COMMIT, txnContext, datasetId.getId(),
+ entityHashValue, -1, (byte) 0, 0, null, null, logicalLogLocator);
+ } catch (ACIDException e) {
requestAbort(txnContext);
- } finally {
- unlatchLockTable();
}
}
+
+ txnContext.updateLastLSNForIndexes(logicalLogLocator.getLsn());
}
- txnContext.updateLastLSNForIndexes(logicalLogLocator.getLsn());
- }
+ //1) wake up waiters and remove holder
+ //wake up waiters of dataset-granule lock
+ wakeUpDatasetLockWaiters(dLockInfo);
+ //wake up waiters of entity-granule lock
+ wakeUpEntityLockWaiters(eLockInfo);
+ //remove the holder from eLockInfo's holder list and remove the holding resource from jobInfo's holding resource list
+ //this can be done in the following single function call.
+ entityLockInfoManager.removeHolder(eLockInfo, entityInfo, jobInfo);
- //1) wake up waiters and remove holder
- //wake up waiters of dataset-granule lock
- wakeUpDatasetLockWaiters(dLockInfo);
- //wake up waiters of entity-granule lock
- wakeUpEntityLockWaiters(eLockInfo);
- //remove the holder from eLockInfo's holder list and remove the holding resource from jobInfo's holding resource list
- //this can be done in the following single function call.
- entityLockInfoManager.removeHolder(eLockInfo, entityInfo, jobInfo);
-
- //2) if
- // there is no waiting thread on the same resource (this can be checked through jobInfo)
- // then
- // a) delete the corresponding entityInfo
- // b) write commit log for the unlocked resource(which is a committed txn).
- while (waiterObjId != -1) {
- waiterObj = lockWaiterManager.getLockWaiter(waiterObjId);
- waitingEntityInfo = waiterObj.getEntityInfoSlot();
- if (entityInfoManager.getDatasetId(waitingEntityInfo) == datasetId.getId()
- && entityInfoManager.getPKHashVal(waitingEntityInfo) == entityHashValue) {
- threadCount++;
- break;
+ //2) if
+ // there is no waiting thread on the same resource (this can be checked through jobInfo)
+ // then
+ // a) delete the corresponding entityInfo
+ // b) write commit log for the unlocked resource(which is a committed txn).
+ while (waiterObjId != -1) {
+ waiterObj = lockWaiterManager.getLockWaiter(waiterObjId);
+ waitingEntityInfo = waiterObj.getEntityInfoSlot();
+ if (entityInfoManager.getDatasetId(waitingEntityInfo) == datasetId.getId()
+ && entityInfoManager.getPKHashVal(waitingEntityInfo) == entityHashValue) {
+ threadCount++;
+ break;
+ }
+ waiterObjId = waiterObj.getNextWaiterObjId();
}
- waiterObjId = waiterObj.getNextWaiterObjId();
- }
- if (threadCount == 0) {
- if (entityInfoManager.getEntityLockMode(entityInfo) == LockMode.X) {
- //TODO
- //write a commit log for the unlocked resource
- //need to figure out that instantLock() also needs to write a commit log.
+ if (threadCount == 0) {
+ if (entityInfoManager.getEntityLockMode(entityInfo) == LockMode.X) {
+ //TODO
+ //write a commit log for the unlocked resource
+ //need to figure out that instantLock() also needs to write a commit log.
+ }
+ entityInfoManager.deallocate(entityInfo);
}
- entityInfoManager.deallocate(entityInfo);
}
- }
- //deallocate entityLockInfo's slot if there is no txn referring to the entityLockInfo.
- if (entityLockInfoManager.getFirstWaiter(eLockInfo) == -1
- && entityLockInfoManager.getLastHolder(eLockInfo) == -1
- && entityLockInfoManager.getUpgrader(eLockInfo) == -1) {
- dLockInfo.getEntityResourceHT().remove(entityHashValue);
- entityLockInfoManager.deallocate(eLockInfo);
- }
-
- //we don't deallocate datasetLockInfo even if there is no txn referring to the datasetLockInfo
- //since the datasetLockInfo is likely to be referred to again.
-
- if (ALLOW_ESCALATE_FROM_ENTITY_TO_DATASET) {
- if (!isInstant && datasetLockMode == LockMode.IS) {
- jobInfo.decreaseDatasetISLockCount(datasetId.getId());
+ //deallocate entityLockInfo's slot if there is no txn referring to the entityLockInfo.
+ if (entityLockInfoManager.getFirstWaiter(eLockInfo) == -1
+ && entityLockInfoManager.getLastHolder(eLockInfo) == -1
+ && entityLockInfoManager.getUpgrader(eLockInfo) == -1) {
+ dLockInfo.getEntityResourceHT().remove(entityHashValue);
+ entityLockInfoManager.deallocate(eLockInfo);
}
- }
- if (IS_DEBUG_MODE) {
- trackLockRequest("Granted", RequestType.UNLOCK, datasetId, entityHashValue, (byte) 0, txnContext,
- dLockInfo, eLockInfo);
+ //we don't deallocate datasetLockInfo even if there is no txn referring to the datasetLockInfo
+ //since the datasetLockInfo is likely to be referred to again.
+
+ if (ALLOW_ESCALATE_FROM_ENTITY_TO_DATASET) {
+ if (!isInstant && datasetLockMode == LockMode.IS) {
+ jobInfo.decreaseDatasetISLockCount(datasetId.getId());
+ }
+ }
+
+ if (IS_DEBUG_MODE) {
+ trackLockRequest("Granted", RequestType.UNLOCK, datasetId, entityHashValue, (byte) 0, txnContext,
+ dLockInfo, eLockInfo);
+ }
+ } finally {
+ unlatchLockTable();
}
- unlatchLockTable();
}
@Override
@@ -797,183 +801,178 @@
JobId jobId = txnContext.getJobId();
latchLockTable();
-
- if (IS_DEBUG_MODE) {
- trackLockRequest("Requested", RequestType.RELEASE_LOCKS, new DatasetId(0), 0, (byte) 0, txnContext,
- dLockInfo, eLockInfo);
- }
-
- JobInfo jobInfo = jobHT.get(jobId);
- if (jobInfo == null) {
- unlatchLockTable();
- return;
- }
-
- //remove waiterObj of JobInfo
- //[Notice]
- //waiterObjs may exist if aborted thread is the caller of this function.
- //Even if there are the waiterObjs, there is no waiting thread on the objects.
- //If the caller of this function is an aborted thread, it is guaranteed that there is no waiting threads
- //on the waiterObjs since when the aborted caller thread is waken up, all other waiting threads are
- //also waken up at the same time through 'notifyAll()' call.
- //In contrast, if the caller of this function is not an aborted thread, then there is no waiting object.
- int waiterObjId = jobInfo.getFirstWaitingResource();
- int nextWaiterObjId;
- while (waiterObjId != -1) {
- existWaiter = true;
- waiterObj = lockWaiterManager.getLockWaiter(waiterObjId);
- nextWaiterObjId = waiterObj.getNextWaitingResourceObjId();
- entityInfo = waiterObj.getEntityInfoSlot();
+ try {
if (IS_DEBUG_MODE) {
- if (jobId.getId() != entityInfoManager.getJobId(entityInfo)) {
- throw new IllegalStateException("JobInfo(" + jobId + ") has diffrent Job(JID:"
- + entityInfoManager.getJobId(entityInfo) + "'s lock request!!!");
- }
+ trackLockRequest("Requested", RequestType.RELEASE_LOCKS, new DatasetId(0), 0, (byte) 0, txnContext,
+ dLockInfo, eLockInfo);
}
- //1. remove from waiter(or upgrader)'s list of dLockInfo or eLockInfo.
- did = entityInfoManager.getDatasetId(entityInfo);
- tempDatasetIdObj.setId(did);
- dLockInfo = datasetResourceHT.get(tempDatasetIdObj);
-
- if (waiterObj.isWaitingOnEntityLock()) {
- entityHashValue = entityInfoManager.getPKHashVal(entityInfo);
- eLockInfo = dLockInfo.getEntityResourceHT().get(entityHashValue);
- if (waiterObj.isWaiter()) {
- entityLockInfoManager.removeWaiter(eLockInfo, waiterObjId);
- } else {
- entityLockInfoManager.removeUpgrader(eLockInfo, waiterObjId);
- }
- } else {
- if (waiterObj.isWaiter()) {
- dLockInfo.removeWaiter(waiterObjId);
- } else {
- dLockInfo.removeUpgrader(waiterObjId);
- }
+ JobInfo jobInfo = jobHT.get(jobId);
+ if (jobInfo == null) {
+ return;
}
- //2. wake-up waiters
- latchWaitNotify();
- synchronized (waiterObj) {
- unlatchWaitNotify();
- waiterObj.setWait(false);
+ //remove waiterObj of JobInfo
+ //[Notice]
+ //waiterObjs may exist if aborted thread is the caller of this function.
+ //Even if there are the waiterObjs, there is no waiting thread on the objects.
+ //If the caller of this function is an aborted thread, it is guaranteed that there is no waiting threads
+ //on the waiterObjs since when the aborted caller thread is waken up, all other waiting threads are
+ //also waken up at the same time through 'notifyAll()' call.
+ //In contrast, if the caller of this function is not an aborted thread, then there is no waiting object.
+ int waiterObjId = jobInfo.getFirstWaitingResource();
+ int nextWaiterObjId;
+ while (waiterObjId != -1) {
+ existWaiter = true;
+ waiterObj = lockWaiterManager.getLockWaiter(waiterObjId);
+ nextWaiterObjId = waiterObj.getNextWaitingResourceObjId();
+ entityInfo = waiterObj.getEntityInfoSlot();
if (IS_DEBUG_MODE) {
- System.out.println("" + Thread.currentThread().getName() + "\twake-up(D): WID(" + waiterObjId
- + "),EID(" + waiterObj.getEntityInfoSlot() + ")");
- }
- waiterObj.notifyAll();
- }
-
- //3. deallocate waiterObj
- lockWaiterManager.deallocate(waiterObjId);
-
- //4. deallocate entityInfo only if this waiter is not an upgrader
- if (entityInfoManager.getDatasetLockCount(entityInfo) == 0
- && entityInfoManager.getEntityLockCount(entityInfo) == 0) {
- entityInfoManager.deallocate(entityInfo);
- }
- waiterObjId = nextWaiterObjId;
- }
-
- //release holding resources
- entityInfo = jobInfo.getLastHoldingResource();
- while (entityInfo != -1) {
- prevEntityInfo = entityInfoManager.getPrevJobResource(entityInfo);
-
- //decrease lock count of datasetLock and entityLock
- did = entityInfoManager.getDatasetId(entityInfo);
- tempDatasetIdObj.setId(did);
- dLockInfo = datasetResourceHT.get(tempDatasetIdObj);
- entityHashValue = entityInfoManager.getPKHashVal(entityInfo);
-
- if (entityHashValue == -1) {
- //decrease datasetLockCount
- lockMode = entityInfoManager.getDatasetLockMode(entityInfo);
- datasetLockCount = entityInfoManager.getDatasetLockCount(entityInfo);
- if (datasetLockCount != 0) {
- dLockInfo.decreaseLockCount(lockMode, datasetLockCount);
-
- //wakeup waiters of datasetLock and remove holder from datasetLockInfo
- wakeUpDatasetLockWaiters(dLockInfo);
-
- //remove the holder from datasetLockInfo only if the lock is dataset-granule lock.
- //--> this also removes the holding resource from jobInfo
- //(Because the IX and IS lock's holders are handled implicitly,
- //those are not in the holder list of datasetLockInfo.)
- dLockInfo.removeHolder(entityInfo, jobInfo);
- }
- } else {
- //decrease datasetLockCount
- lockMode = entityInfoManager.getDatasetLockMode(entityInfo);
- lockMode = lockMode == LockMode.S ? LockMode.IS : LockMode.IX;
- datasetLockCount = entityInfoManager.getDatasetLockCount(entityInfo);
-
- if (datasetLockCount != 0) {
- dLockInfo.decreaseLockCount(lockMode, datasetLockCount);
- }
-
- //decrease entityLockCount
- lockMode = entityInfoManager.getEntityLockMode(entityInfo);
- entityLockCount = entityInfoManager.getEntityLockCount(entityInfo);
- eLockInfo = dLockInfo.getEntityResourceHT().get(entityHashValue);
- if (IS_DEBUG_MODE) {
- if (eLockInfo < 0) {
- System.out.println("eLockInfo:" + eLockInfo);
+ if (jobId.getId() != entityInfoManager.getJobId(entityInfo)) {
+ throw new IllegalStateException("JobInfo(" + jobId + ") has diffrent Job(JID:"
+ + entityInfoManager.getJobId(entityInfo) + "'s lock request!!!");
}
}
- if (entityLockCount != 0) {
- entityLockInfoManager.decreaseLockCount(eLockInfo, lockMode, (short) entityLockCount);
+ //1. remove from waiter(or upgrader)'s list of dLockInfo or eLockInfo.
+ did = entityInfoManager.getDatasetId(entityInfo);
+ tempDatasetIdObj.setId(did);
+ dLockInfo = datasetResourceHT.get(tempDatasetIdObj);
+
+ if (waiterObj.isWaitingOnEntityLock()) {
+ entityHashValue = entityInfoManager.getPKHashVal(entityInfo);
+ eLockInfo = dLockInfo.getEntityResourceHT().get(entityHashValue);
+ if (waiterObj.isWaiter()) {
+ entityLockInfoManager.removeWaiter(eLockInfo, waiterObjId);
+ } else {
+ entityLockInfoManager.removeUpgrader(eLockInfo, waiterObjId);
+ }
+ } else {
+ if (waiterObj.isWaiter()) {
+ dLockInfo.removeWaiter(waiterObjId);
+ } else {
+ dLockInfo.removeUpgrader(waiterObjId);
+ }
}
- if (datasetLockCount != 0) {
- //wakeup waiters of datasetLock and don't remove holder from datasetLockInfo
- wakeUpDatasetLockWaiters(dLockInfo);
+ //2. wake-up waiters
+ latchWaitNotify();
+ synchronized (waiterObj) {
+ unlatchWaitNotify();
+ waiterObj.setWait(false);
+ if (IS_DEBUG_MODE) {
+ System.out.println("" + Thread.currentThread().getName() + "\twake-up(D): WID(" + waiterObjId
+ + "),EID(" + waiterObj.getEntityInfoSlot() + ")");
+ }
+ waiterObj.notifyAll();
}
- if (entityLockCount != 0) {
- //wakeup waiters of entityLock
- wakeUpEntityLockWaiters(eLockInfo);
+ //3. deallocate waiterObj
+ lockWaiterManager.deallocate(waiterObjId);
- //remove the holder from entityLockInfo
- //--> this also removes the holding resource from jobInfo
- entityLockInfoManager.removeHolder(eLockInfo, entityInfo, jobInfo);
+ //4. deallocate entityInfo only if this waiter is not an upgrader
+ if (entityInfoManager.getDatasetLockCount(entityInfo) == 0
+ && entityInfoManager.getEntityLockCount(entityInfo) == 0) {
+ entityInfoManager.deallocate(entityInfo);
}
-
- //deallocate entityLockInfo if there is no holder and waiter.
- if (entityLockInfoManager.getLastHolder(eLockInfo) == -1
- && entityLockInfoManager.getFirstWaiter(eLockInfo) == -1
- && entityLockInfoManager.getUpgrader(eLockInfo) == -1) {
- dLockInfo.getEntityResourceHT().remove(entityHashValue);
- entityLockInfoManager.deallocate(eLockInfo);
- // if (IS_DEBUG_MODE) {
- // System.out.println("removed PK["+entityHashValue+"]");
- // }
- }
+ waiterObjId = nextWaiterObjId;
}
- //deallocate entityInfo
- entityInfoManager.deallocate(entityInfo);
- // if (IS_DEBUG_MODE) {
- // System.out.println("dellocate EntityInfo["+entityInfo+"]");
- // }
+ //release holding resources
+ entityInfo = jobInfo.getLastHoldingResource();
+ while (entityInfo != -1) {
+ prevEntityInfo = entityInfoManager.getPrevJobResource(entityInfo);
- entityInfo = prevEntityInfo;
+ //decrease lock count of datasetLock and entityLock
+ did = entityInfoManager.getDatasetId(entityInfo);
+ tempDatasetIdObj.setId(did);
+ dLockInfo = datasetResourceHT.get(tempDatasetIdObj);
+ entityHashValue = entityInfoManager.getPKHashVal(entityInfo);
+
+ if (entityHashValue == -1) {
+ //decrease datasetLockCount
+ lockMode = entityInfoManager.getDatasetLockMode(entityInfo);
+ datasetLockCount = entityInfoManager.getDatasetLockCount(entityInfo);
+ if (datasetLockCount != 0) {
+ dLockInfo.decreaseLockCount(lockMode, datasetLockCount);
+
+ //wakeup waiters of datasetLock and remove holder from datasetLockInfo
+ wakeUpDatasetLockWaiters(dLockInfo);
+
+ //remove the holder from datasetLockInfo only if the lock is dataset-granule lock.
+ //--> this also removes the holding resource from jobInfo
+ //(Because the IX and IS lock's holders are handled implicitly,
+ //those are not in the holder list of datasetLockInfo.)
+ dLockInfo.removeHolder(entityInfo, jobInfo);
+ }
+ } else {
+ //decrease datasetLockCount
+ lockMode = entityInfoManager.getDatasetLockMode(entityInfo);
+ lockMode = lockMode == LockMode.S ? LockMode.IS : LockMode.IX;
+ datasetLockCount = entityInfoManager.getDatasetLockCount(entityInfo);
+
+ if (datasetLockCount != 0) {
+ dLockInfo.decreaseLockCount(lockMode, datasetLockCount);
+ }
+
+ //decrease entityLockCount
+ lockMode = entityInfoManager.getEntityLockMode(entityInfo);
+ entityLockCount = entityInfoManager.getEntityLockCount(entityInfo);
+ eLockInfo = dLockInfo.getEntityResourceHT().get(entityHashValue);
+ if (IS_DEBUG_MODE) {
+ if (eLockInfo < 0) {
+ System.out.println("eLockInfo:" + eLockInfo);
+ }
+ }
+
+ if (entityLockCount != 0) {
+ entityLockInfoManager.decreaseLockCount(eLockInfo, lockMode, (short) entityLockCount);
+ }
+
+ if (datasetLockCount != 0) {
+ //wakeup waiters of datasetLock and don't remove holder from datasetLockInfo
+ wakeUpDatasetLockWaiters(dLockInfo);
+ }
+
+ if (entityLockCount != 0) {
+ //wakeup waiters of entityLock
+ wakeUpEntityLockWaiters(eLockInfo);
+
+ //remove the holder from entityLockInfo
+ //--> this also removes the holding resource from jobInfo
+ entityLockInfoManager.removeHolder(eLockInfo, entityInfo, jobInfo);
+ }
+
+ //deallocate entityLockInfo if there is no holder and waiter.
+ if (entityLockInfoManager.getLastHolder(eLockInfo) == -1
+ && entityLockInfoManager.getFirstWaiter(eLockInfo) == -1
+ && entityLockInfoManager.getUpgrader(eLockInfo) == -1) {
+ dLockInfo.getEntityResourceHT().remove(entityHashValue);
+ entityLockInfoManager.deallocate(eLockInfo);
+ }
+ }
+
+ //deallocate entityInfo
+ entityInfoManager.deallocate(entityInfo);
+
+ entityInfo = prevEntityInfo;
+ }
+
+ //remove JobInfo
+ jobHT.remove(jobId);
+
+ if (existWaiter) {
+ txnContext.setStatus(TransactionContext.TIMED_OUT_STATUS);
+ txnContext.setTxnState(TransactionState.ABORTED);
+ }
+
+ if (IS_DEBUG_MODE) {
+ trackLockRequest("Granted", RequestType.RELEASE_LOCKS, new DatasetId(0), 0, (byte) 0, txnContext,
+ dLockInfo, eLockInfo);
+ }
+ } finally {
+ unlatchLockTable();
}
-
- //remove JobInfo
- jobHT.remove(jobId);
-
- if (existWaiter) {
- txnContext.setStatus(TransactionContext.TIMED_OUT_STATUS);
- txnContext.setTxnState(TransactionState.ABORTED);
- }
-
- if (IS_DEBUG_MODE) {
- trackLockRequest("Granted", RequestType.RELEASE_LOCKS, new DatasetId(0), 0, (byte) 0, txnContext,
- dLockInfo, eLockInfo);
- }
- unlatchLockTable();
}
@Override
@@ -1008,57 +1007,59 @@
boolean isSuccess = true;
latchLockTable();
- validateJob(txnContext);
+ try {
+ validateJob(txnContext);
- if (IS_DEBUG_MODE) {
- trackLockRequest("Requested", RequestType.INSTANT_TRY_LOCK, datasetId, entityHashValue, lockMode,
- txnContext, dLockInfo, -1);
- }
-
- dLockInfo = datasetResourceHT.get(datasetId);
-
- //#. if the datasetLockInfo doesn't exist in datasetResourceHT
- if (dLockInfo == null || dLockInfo.isNoHolder()) {
if (IS_DEBUG_MODE) {
- trackLockRequest("Granted", RequestType.INSTANT_TRY_LOCK, datasetId, entityHashValue, lockMode,
+ trackLockRequest("Requested", RequestType.INSTANT_TRY_LOCK, datasetId, entityHashValue, lockMode,
txnContext, dLockInfo, -1);
}
- unlatchLockTable();
- return true;
- }
+ dLockInfo = datasetResourceHT.get(datasetId);
- //#. the datasetLockInfo exists in datasetResourceHT.
- //1. handle dataset-granule lock
- byte datasetLockMode = entityHashValue == -1 ? lockMode : lockMode == LockMode.S ? LockMode.IS : LockMode.IX;
- if (datasetLockMode == LockMode.IS) {
- //[Notice]
- //Skip checking the dataset level lock compatibility if the requested LockMode is IS lock.
- //We know that this internalInstantTryLock() call with IS lock mode will be always granted
- //because we don't allow X lock on dataset-level except DDL operation.
- //During DDL operation, all other operations will be pending, so there is no conflict.
- isSuccess = true;
- } else {
- isSuccess = instantTryLockDatasetGranule(datasetId, entityHashValue, lockMode, txnContext, dLockInfo,
- datasetLockMode);
- }
+ //#. if the datasetLockInfo doesn't exist in datasetResourceHT
+ if (dLockInfo == null || dLockInfo.isNoHolder()) {
+ if (IS_DEBUG_MODE) {
+ trackLockRequest("Granted", RequestType.INSTANT_TRY_LOCK, datasetId, entityHashValue, lockMode,
+ txnContext, dLockInfo, -1);
+ }
+ return true;
+ }
- if (isSuccess && entityHashValue != -1) {
- //2. handle entity-granule lock
- isSuccess = instantTryLockEntityGranule(datasetId, entityHashValue, lockMode, txnContext, dLockInfo);
- }
-
- if (IS_DEBUG_MODE) {
- if (isSuccess) {
- trackLockRequest("Granted", RequestType.INSTANT_TRY_LOCK, datasetId, entityHashValue, lockMode,
- txnContext, dLockInfo, -1);
+ //#. the datasetLockInfo exists in datasetResourceHT.
+ //1. handle dataset-granule lock
+ byte datasetLockMode = entityHashValue == -1 ? lockMode : lockMode == LockMode.S ? LockMode.IS
+ : LockMode.IX;
+ if (datasetLockMode == LockMode.IS) {
+ //[Notice]
+ //Skip checking the dataset level lock compatibility if the requested LockMode is IS lock.
+ //We know that this internalInstantTryLock() call with IS lock mode will be always granted
+ //because we don't allow X lock on dataset-level except DDL operation.
+ //During DDL operation, all other operations will be pending, so there is no conflict.
+ isSuccess = true;
} else {
- trackLockRequest("Failed", RequestType.INSTANT_TRY_LOCK, datasetId, entityHashValue, lockMode,
- txnContext, dLockInfo, -1);
+ isSuccess = instantTryLockDatasetGranule(datasetId, entityHashValue, lockMode, txnContext, dLockInfo,
+ datasetLockMode);
}
- }
- unlatchLockTable();
+ if (isSuccess && entityHashValue != -1) {
+ //2. handle entity-granule lock
+ isSuccess = instantTryLockEntityGranule(datasetId, entityHashValue, lockMode, txnContext, dLockInfo);
+ }
+
+ if (IS_DEBUG_MODE) {
+ if (isSuccess) {
+ trackLockRequest("Granted", RequestType.INSTANT_TRY_LOCK, datasetId, entityHashValue, lockMode,
+ txnContext, dLockInfo, -1);
+ } else {
+ trackLockRequest("Failed", RequestType.INSTANT_TRY_LOCK, datasetId, entityHashValue, lockMode,
+ txnContext, dLockInfo, -1);
+ }
+ }
+
+ } finally {
+ unlatchLockTable();
+ }
return isSuccess;
}
@@ -1217,133 +1218,136 @@
boolean doEscalate = false;
latchLockTable();
- validateJob(txnContext);
+ try {
+ validateJob(txnContext);
- if (IS_DEBUG_MODE) {
- trackLockRequest("Requested", RequestType.TRY_LOCK, datasetId, entityHashValue, lockMode, txnContext,
- dLockInfo, eLockInfo);
- }
-
- dLockInfo = datasetResourceHT.get(datasetId);
- jobInfo = jobHT.get(jobId);
-
- if (ALLOW_ESCALATE_FROM_ENTITY_TO_DATASET) {
- if (!isInstant && datasetLockMode == LockMode.IS && jobInfo != null && dLockInfo != null) {
- int upgradeStatus = needEscalateFromEntityToDataset(jobInfo, dId, lockMode);
- switch (upgradeStatus) {
- case DO_ESCALATE:
- entityHashValue = -1;
- doEscalate = true;
- break;
-
- case ESCALATED:
- unlatchLockTable();
- return true;
-
- default:
- break;
- }
- }
- }
-
- //#. if the datasetLockInfo doesn't exist in datasetResourceHT
- if (dLockInfo == null || dLockInfo.isNoHolder()) {
- if (dLockInfo == null) {
- dLockInfo = new DatasetLockInfo(entityLockInfoManager, entityInfoManager, lockWaiterManager);
- datasetResourceHT.put(new DatasetId(dId), dLockInfo); //datsetId obj should be created
- }
- entityInfo = entityInfoManager.allocate(jId, dId, entityHashValue, lockMode);
-
- //if dataset-granule lock
- if (entityHashValue == -1) { //-1 stands for dataset-granule
- entityInfoManager.increaseDatasetLockCount(entityInfo);
- dLockInfo.increaseLockCount(datasetLockMode);
- dLockInfo.addHolder(entityInfo);
- } else {
- entityInfoManager.increaseDatasetLockCount(entityInfo);
- dLockInfo.increaseLockCount(datasetLockMode);
- //add entityLockInfo
- eLockInfo = entityLockInfoManager.allocate();
- dLockInfo.getEntityResourceHT().put(entityHashValue, eLockInfo);
- entityInfoManager.increaseEntityLockCount(entityInfo);
- entityLockInfoManager.increaseLockCount(eLockInfo, lockMode);
- entityLockInfoManager.addHolder(eLockInfo, entityInfo);
+ if (IS_DEBUG_MODE) {
+ trackLockRequest("Requested", RequestType.TRY_LOCK, datasetId, entityHashValue, lockMode, txnContext,
+ dLockInfo, eLockInfo);
}
- if (jobInfo == null) {
- jobInfo = new JobInfo(entityInfoManager, lockWaiterManager, txnContext);
- jobHT.put(jobId, jobInfo); //jobId obj doesn't have to be created
- }
- jobInfo.addHoldingResource(entityInfo);
+ dLockInfo = datasetResourceHT.get(datasetId);
+ jobInfo = jobHT.get(jobId);
if (ALLOW_ESCALATE_FROM_ENTITY_TO_DATASET) {
- if (!isInstant && datasetLockMode == LockMode.IS) {
- jobInfo.increaseDatasetISLockCount(dId);
+ if (!isInstant && datasetLockMode == LockMode.IS && jobInfo != null && dLockInfo != null) {
+ int upgradeStatus = needEscalateFromEntityToDataset(jobInfo, dId, lockMode);
+ switch (upgradeStatus) {
+ case DO_ESCALATE:
+ entityHashValue = -1;
+ doEscalate = true;
+ break;
+
+ case ESCALATED:
+ return true;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ //#. if the datasetLockInfo doesn't exist in datasetResourceHT
+ if (dLockInfo == null || dLockInfo.isNoHolder()) {
+ if (dLockInfo == null) {
+ dLockInfo = new DatasetLockInfo(entityLockInfoManager, entityInfoManager, lockWaiterManager);
+ datasetResourceHT.put(new DatasetId(dId), dLockInfo); //datsetId obj should be created
+ }
+ entityInfo = entityInfoManager.allocate(jId, dId, entityHashValue, lockMode);
+
+ //if dataset-granule lock
+ if (entityHashValue == -1) { //-1 stands for dataset-granule
+ entityInfoManager.increaseDatasetLockCount(entityInfo);
+ dLockInfo.increaseLockCount(datasetLockMode);
+ dLockInfo.addHolder(entityInfo);
+ } else {
+ entityInfoManager.increaseDatasetLockCount(entityInfo);
+ dLockInfo.increaseLockCount(datasetLockMode);
+ //add entityLockInfo
+ eLockInfo = entityLockInfoManager.allocate();
+ dLockInfo.getEntityResourceHT().put(entityHashValue, eLockInfo);
+ entityInfoManager.increaseEntityLockCount(entityInfo);
+ entityLockInfoManager.increaseLockCount(eLockInfo, lockMode);
+ entityLockInfoManager.addHolder(eLockInfo, entityInfo);
+ }
+
+ if (jobInfo == null) {
+ jobInfo = new JobInfo(entityInfoManager, lockWaiterManager, txnContext);
+ jobHT.put(jobId, jobInfo); //jobId obj doesn't have to be created
+ }
+ jobInfo.addHoldingResource(entityInfo);
+
+ if (ALLOW_ESCALATE_FROM_ENTITY_TO_DATASET) {
+ if (!isInstant && datasetLockMode == LockMode.IS) {
+ jobInfo.increaseDatasetISLockCount(dId);
+ if (doEscalate) {
+ //This exception is thrown when the threshold value is set to 1.
+ //We don't want to allow the lock escalation when there is a first lock request on a dataset.
+ throw new IllegalStateException(
+ "ESCALATE_TRHESHOLD_ENTITY_TO_DATASET should not be set to "
+ + ESCALATE_TRHESHOLD_ENTITY_TO_DATASET);
+ }
+ }
+ }
+
+ if (IS_DEBUG_MODE) {
+ trackLockRequest("Granted", RequestType.TRY_LOCK, datasetId, entityHashValue, lockMode, txnContext,
+ dLockInfo, eLockInfo);
+ }
+
+ return true;
+ }
+
+ //#. the datasetLockInfo exists in datasetResourceHT.
+ //1. handle dataset-granule lock
+ tryLockDatasetGranuleRevertOperation = 0;
+ entityInfo = tryLockDatasetGranule(datasetId, entityHashValue, lockMode, txnContext);
+ if (entityInfo == -2) {//-2 represents fail
+ isSuccess = false;
+ } else {
+ //2. handle entity-granule lock
+ if (entityHashValue != -1) {
+ isSuccess = tryLockEntityGranule(datasetId, entityHashValue, lockMode, entityInfo, txnContext);
+ if (!isSuccess) {
+ revertTryLockDatasetGranuleOperation(datasetId, entityHashValue, lockMode, entityInfo,
+ txnContext);
+ }
+ }
+ }
+
+ if (ALLOW_ESCALATE_FROM_ENTITY_TO_DATASET) {
+ if (!isInstant) {
if (doEscalate) {
- //This exception is thrown when the threshold value is set to 1.
- //We don't want to allow the lock escalation when there is a first lock request on a dataset.
- throw new IllegalStateException("ESCALATE_TRHESHOLD_ENTITY_TO_DATASET should not be set to "
- + ESCALATE_TRHESHOLD_ENTITY_TO_DATASET);
+ //jobInfo must not be null.
+ assert jobInfo != null;
+ jobInfo.increaseDatasetISLockCount(dId);
+ //release pre-acquired locks
+ releaseDatasetISLocks(jobInfo, jobId, datasetId, txnContext);
+ } else if (datasetLockMode == LockMode.IS) {
+ if (jobInfo == null) {
+ jobInfo = jobHT.get(jobId);
+ //jobInfo must not be null;
+ assert jobInfo != null;
+ }
+ jobInfo.increaseDatasetISLockCount(dId);
}
}
}
if (IS_DEBUG_MODE) {
- trackLockRequest("Granted", RequestType.TRY_LOCK, datasetId, entityHashValue, lockMode, txnContext,
- dLockInfo, eLockInfo);
+ if (isSuccess) {
+ trackLockRequest("Granted", RequestType.TRY_LOCK, datasetId, entityHashValue, lockMode, txnContext,
+ dLockInfo, eLockInfo);
+ } else {
+ trackLockRequest("Failed", RequestType.TRY_LOCK, datasetId, entityHashValue, lockMode, txnContext,
+ dLockInfo, eLockInfo);
+ }
}
+ } finally {
unlatchLockTable();
- return true;
}
- //#. the datasetLockInfo exists in datasetResourceHT.
- //1. handle dataset-granule lock
- tryLockDatasetGranuleRevertOperation = 0;
- entityInfo = tryLockDatasetGranule(datasetId, entityHashValue, lockMode, txnContext);
- if (entityInfo == -2) {//-2 represents fail
- isSuccess = false;
- } else {
- //2. handle entity-granule lock
- if (entityHashValue != -1) {
- isSuccess = tryLockEntityGranule(datasetId, entityHashValue, lockMode, entityInfo, txnContext);
- if (!isSuccess) {
- revertTryLockDatasetGranuleOperation(datasetId, entityHashValue, lockMode, entityInfo, txnContext);
- }
- }
- }
-
- if (ALLOW_ESCALATE_FROM_ENTITY_TO_DATASET) {
- if (!isInstant) {
- if (doEscalate) {
- //jobInfo must not be null.
- assert jobInfo != null;
- jobInfo.increaseDatasetISLockCount(dId);
- //release pre-acquired locks
- releaseDatasetISLocks(jobInfo, jobId, datasetId, txnContext);
- } else if (datasetLockMode == LockMode.IS) {
- if (jobInfo == null) {
- jobInfo = jobHT.get(jobId);
- //jobInfo must not be null;
- assert jobInfo != null;
- }
- jobInfo.increaseDatasetISLockCount(dId);
- }
- }
- }
-
- if (IS_DEBUG_MODE) {
- if (isSuccess) {
- trackLockRequest("Granted", RequestType.TRY_LOCK, datasetId, entityHashValue, lockMode, txnContext,
- dLockInfo, eLockInfo);
- } else {
- trackLockRequest("Failed", RequestType.TRY_LOCK, datasetId, entityHashValue, lockMode, txnContext,
- dLockInfo, eLockInfo);
- }
- }
-
- unlatchLockTable();
-
return isSuccess;
}
@@ -1778,38 +1782,38 @@
latchWaitNotify();
unlatchLockTable();
- synchronized (waiter) {
- unlatchWaitNotify();
- while (waiter.needWait()) {
- try {
- if (IS_DEBUG_MODE) {
- System.out.println("" + Thread.currentThread().getName() + "\twaits("
- + waiter.getWaiterCount() + "): WID(" + waiterId + "),EID("
- + waiter.getEntityInfoSlot() + ")");
+ try {
+ synchronized (waiter) {
+ unlatchWaitNotify();
+ while (waiter.needWait()) {
+ try {
+ if (IS_DEBUG_MODE) {
+ System.out.println("" + Thread.currentThread().getName() + "\twaits("
+ + waiter.getWaiterCount() + "): WID(" + waiterId + "),EID("
+ + waiter.getEntityInfoSlot() + ")");
+ }
+ waiter.wait();
+ } catch (InterruptedException e) {
+ //TODO figure-out what is the appropriate way to handle this exception
+ e.printStackTrace();
+ isInterruptedExceptionOccurred = true;
+ waiter.setWait(false);
}
- waiter.wait();
- } catch (InterruptedException e) {
- //TODO figure-out what is the appropriate way to handle this exception
- e.printStackTrace();
- isInterruptedExceptionOccurred = true;
- waiter.setWait(false);
}
}
- }
- if (isInterruptedExceptionOccurred) {
- throw new ACIDException("InterruptedException is caught");
+ if (isInterruptedExceptionOccurred) {
+ throw new ACIDException("InterruptedException is caught");
+ }
+ } catch (Exception e) {
+ throw new LockMgrLatchHandlerException(e);
}
//waiter woke up -> remove/deallocate waiter object and abort if timeout
latchLockTable();
if (txnContext.getStatus() == TransactionContext.TIMED_OUT_STATUS || waiter.isVictim()) {
- try {
- requestAbort(txnContext);
- } finally {
- unlatchLockTable();
- }
+ requestAbort(txnContext);
}
if (waiter.isFirstGetUp()) {
@@ -1853,11 +1857,7 @@
//deallocate the entityInfo
entityInfoManager.deallocate(entityInfo);
}
- try {
- requestAbort(txnContext);
- } finally {
- unlatchLockTable();
- }
+ requestAbort(txnContext);
}
return waiterCount;
@@ -2028,20 +2028,22 @@
LockWaiter waiterObj;
latchLockTable();
+ try {
- Iterator<Entry<JobId, JobInfo>> iter = jobHT.entrySet().iterator();
- while (iter.hasNext()) {
- Map.Entry<JobId, JobInfo> pair = (Map.Entry<JobId, JobInfo>) iter.next();
- jobInfo = pair.getValue();
- waiterObjId = jobInfo.getFirstWaitingResource();
- while (waiterObjId != -1) {
- waiterObj = lockWaiterManager.getLockWaiter(waiterObjId);
- toutDetector.checkAndSetVictim(waiterObj);
- waiterObjId = waiterObj.getNextWaiterObjId();
+ Iterator<Entry<JobId, JobInfo>> iter = jobHT.entrySet().iterator();
+ while (iter.hasNext()) {
+ Map.Entry<JobId, JobInfo> pair = (Map.Entry<JobId, JobInfo>) iter.next();
+ jobInfo = pair.getValue();
+ waiterObjId = jobInfo.getFirstWaitingResource();
+ while (waiterObjId != -1) {
+ waiterObj = lockWaiterManager.getLockWaiter(waiterObjId);
+ toutDetector.checkAndSetVictim(waiterObj);
+ waiterObjId = waiterObj.getNextWaiterObjId();
+ }
}
+ } finally {
+ unlatchLockTable();
}
-
- unlatchLockTable();
}
}
diff --git a/asterix-transactions/src/main/java/edu/uci/ics/asterix/transaction/management/service/locking/LockMgrLatchHandlerException.java b/asterix-transactions/src/main/java/edu/uci/ics/asterix/transaction/management/service/locking/LockMgrLatchHandlerException.java
new file mode 100644
index 0000000..05a582c
--- /dev/null
+++ b/asterix-transactions/src/main/java/edu/uci/ics/asterix/transaction/management/service/locking/LockMgrLatchHandlerException.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-2014 by The Regents of the University of California
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * you may obtain a copy of the License from
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package edu.uci.ics.asterix.transaction.management.service.locking;
+
+import edu.uci.ics.asterix.transaction.management.exception.ACIDException;
+
+public class LockMgrLatchHandlerException extends ACIDException {
+
+ private static final long serialVersionUID = 1203182080428864199L;
+ private final Exception internalException;
+
+ public LockMgrLatchHandlerException(Exception e) {
+ super(e);
+ this.internalException = e;
+ }
+
+ public Exception getInternalException() {
+ return internalException;
+ }
+}
\ No newline at end of file
diff --git a/pom.xml b/pom.xml
index 2d0922f..b424195 100644
--- a/pom.xml
+++ b/pom.xml
@@ -7,8 +7,8 @@
<packaging>pom</packaging>
<properties>
- <algebricks.version>0.2.4</algebricks.version>
- <hyracks.version>0.2.4</hyracks.version>
+ <algebricks.version>0.2.5-SNAPSHOT</algebricks.version>
+ <hyracks.version>0.2.5-SNAPSHOT</hyracks.version>
</properties>
<build>