Revert "ASTERIXDB-1109: Fixed deletion of records from open secondary index"
This reverts commit a05f71826a4ab2c0e7148009e0b7e269b15e0a2f.
Change-Id: I8fcf41c009f9faf51bc8bccc0c3c7217b7769148
Reviewed-on: https://asterix-gerrit.ics.uci.edu/698
Tested-by: Jenkins <jenkins@fulliautomatix.ics.uci.edu>
Reviewed-by: Ildar Absalyamov <ildar.absalyamov@gmail.com>
diff --git a/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/IntroduceMaterializationForInsertWithSelfScanRule.java b/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/IntroduceMaterializationForInsertWithSelfScanRule.java
index 43ec793..adce8ce 100644
--- a/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/IntroduceMaterializationForInsertWithSelfScanRule.java
+++ b/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/IntroduceMaterializationForInsertWithSelfScanRule.java
@@ -42,7 +42,6 @@
import org.apache.hyracks.algebricks.core.algebra.operators.physical.MaterializePOperator;
import org.apache.hyracks.algebricks.core.rewriter.base.IAlgebraicRewriteRule;
-// TODO: Reconsider if materialization is needed in delete pipeline
public class IntroduceMaterializationForInsertWithSelfScanRule implements IAlgebraicRewriteRule {
@Override
diff --git a/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/IntroduceSecondaryIndexInsertDeleteRule.java b/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/IntroduceSecondaryIndexInsertDeleteRule.java
index 1ff97b3..6eb3807 100644
--- a/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/IntroduceSecondaryIndexInsertDeleteRule.java
+++ b/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/IntroduceSecondaryIndexInsertDeleteRule.java
@@ -22,12 +22,14 @@
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
import java.util.List;
-import java.util.Set;
import java.util.Stack;
+import org.apache.commons.lang3.ArrayUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.mutable.Mutable;
+import org.apache.commons.lang3.mutable.MutableObject;
+
import org.apache.asterix.aql.util.FunctionUtils;
import org.apache.asterix.common.config.DatasetConfig.DatasetType;
import org.apache.asterix.common.config.DatasetConfig.IndexType;
@@ -51,12 +53,7 @@
import org.apache.asterix.om.types.AUnionType;
import org.apache.asterix.om.types.BuiltinType;
import org.apache.asterix.om.types.IAType;
-import org.apache.asterix.om.types.hierachy.ATypeHierarchy;
import org.apache.asterix.om.util.NonTaggedFormatUtil;
-import org.apache.commons.lang3.ArrayUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.lang3.mutable.Mutable;
-import org.apache.commons.lang3.mutable.MutableObject;
import org.apache.hyracks.algebricks.common.exceptions.AlgebricksException;
import org.apache.hyracks.algebricks.common.utils.Pair;
import org.apache.hyracks.algebricks.core.algebra.base.ILogicalExpression;
@@ -76,7 +73,6 @@
import org.apache.hyracks.algebricks.core.algebra.operators.logical.AssignOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.IndexInsertDeleteOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.InsertDeleteOperator;
-import org.apache.hyracks.algebricks.core.algebra.operators.logical.InsertDeleteOperator.Kind;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.ProjectOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.ReplicateOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.TokenizeOperator;
@@ -86,8 +82,7 @@
public class IntroduceSecondaryIndexInsertDeleteRule implements IAlgebraicRewriteRule {
@Override
- public boolean rewritePre(Mutable<ILogicalOperator> opRef, IOptimizationContext context)
- throws AlgebricksException {
+ public boolean rewritePre(Mutable<ILogicalOperator> opRef, IOptimizationContext context) throws AlgebricksException {
return false;
}
@@ -105,14 +100,11 @@
FunctionIdentifier fid = null;
/** find the record variable */
- InsertDeleteOperator insertDeleteOp = (InsertDeleteOperator) op1;
- ILogicalExpression recordExpr = insertDeleteOp.getPayloadExpression().getValue();
- LogicalVariable recordVar = null;
- List<LogicalVariable> usedRecordVars = new ArrayList<>();
+ InsertDeleteOperator insertOp = (InsertDeleteOperator) op1;
+ ILogicalExpression recordExpr = insertOp.getPayloadExpression().getValue();
+ List<LogicalVariable> recordVar = new ArrayList<LogicalVariable>();
/** assume the payload is always a single variable expression */
- recordExpr.getUsedVariables(usedRecordVars);
- if (usedRecordVars.size() == 1)
- recordVar = usedRecordVars.get(0);
+ recordExpr.getUsedVariables(recordVar);
/**
* op2 is the assign operator which extract primary keys from the record
@@ -120,7 +112,7 @@
*/
AbstractLogicalOperator op2 = (AbstractLogicalOperator) op1.getInputs().get(0).getValue();
- if (recordVar == null) {
+ if (recordVar.size() == 0) {
/**
* For the case primary key-assignment expressions are constant
* expressions, find assign op that creates record to be
@@ -143,9 +135,9 @@
}
}
AssignOperator assignOp2 = (AssignOperator) op2;
- recordVar = assignOp2.getVariables().get(0);
+ recordVar.addAll(assignOp2.getVariables());
}
- AqlDataSource datasetSource = (AqlDataSource) insertDeleteOp.getDataSource();
+ AqlDataSource datasetSource = (AqlDataSource) insertOp.getDataSource();
AqlMetadataProvider mp = (AqlMetadataProvider) context.getMetadataProvider();
String dataverseName = datasetSource.getId().getDataverseName();
String datasetName = datasetSource.getId().getDatasourceName();
@@ -190,6 +182,19 @@
op0.getInputs().clear();
}
+ // Replicate Operator is applied only when doing the bulk-load.
+ AbstractLogicalOperator replicateOp = null;
+
+ if (secondaryIndexTotalCnt > 1 && insertOp.isBulkload()) {
+ // Split the logical plan into "each secondary index update branch"
+ // to replicate each <PK,RECORD> pair.
+ replicateOp = new ReplicateOperator(secondaryIndexTotalCnt);
+ replicateOp.getInputs().add(new MutableObject<ILogicalOperator>(currentTop));
+ replicateOp.setExecutionMode(ExecutionMode.PARTITIONED);
+ context.computeAndSetTypeEnvironmentForOperator(replicateOp);
+ currentTop = replicateOp;
+ }
+
// Prepare filtering field information
List<String> additionalFilteringField = ((InternalDatasetDetails) dataset.getDatasetDetails()).getFilterField();
List<LogicalVariable> additionalFilteringVars = null;
@@ -201,70 +206,53 @@
additionalFilteringVars = new ArrayList<LogicalVariable>();
additionalFilteringAssignExpressions = new ArrayList<Mutable<ILogicalExpression>>();
additionalFilteringExpressions = new ArrayList<Mutable<ILogicalExpression>>();
- prepareVarAndExpression(additionalFilteringField, recType.getFieldNames(), recordVar,
+ prepareVarAndExpression(additionalFilteringField, recType.getFieldNames(), recordVar.get(0),
additionalFilteringAssignExpressions, additionalFilteringVars, context);
additionalFilteringAssign = new AssignOperator(additionalFilteringVars,
additionalFilteringAssignExpressions);
for (LogicalVariable var : additionalFilteringVars) {
- additionalFilteringExpressions
- .add(new MutableObject<ILogicalExpression>(new VariableReferenceExpression(var)));
+ additionalFilteringExpressions.add(new MutableObject<ILogicalExpression>(
+ new VariableReferenceExpression(var)));
}
}
- LogicalVariable enforcedRecordVar = recordVar;
- if (insertDeleteOp.getOperation() == Kind.INSERT) {
- try {
- DatasetDataSource ds = (DatasetDataSource) (insertDeleteOp.getDataSource());
- ARecordType insertRecType = (ARecordType) ds.getSchemaTypes()[ds.getSchemaTypes().length - 1];
- LogicalVariable castVar = context.newVar();
- ARecordType enforcedType = createEnforcedType(insertRecType, indexes);
- if (!enforcedType.equals(insertRecType)) {
+ // Iterate each secondary index and applying Index Update operations.
+ for (Index index : indexes) {
+ List<LogicalVariable> projectVars = new ArrayList<LogicalVariable>();
+ VariableUtilities.getUsedVariables(op1, projectVars);
+ if (!index.isSecondaryIndex()) {
+ continue;
+ }
+ LogicalVariable enforcedRecordVar = recordVar.get(0);
+ hasSecondaryIndex = true;
+ //if the index is enforcing field types
+ if (index.isEnforcingKeyFileds()) {
+ try {
+ DatasetDataSource ds = (DatasetDataSource) (insertOp.getDataSource());
+ ARecordType insertRecType = (ARecordType) ds.getSchemaTypes()[ds.getSchemaTypes().length - 1];
+ LogicalVariable castVar = context.newVar();
+ ARecordType enforcedType = createEnforcedType(insertRecType, index);
//introduce casting to enforced type
AbstractFunctionCallExpression castFunc = new ScalarFunctionCallExpression(
FunctionUtils.getFunctionInfo(AsterixBuiltinFunctions.CAST_RECORD));
castFunc.getArguments().add(
- new MutableObject<ILogicalExpression>(insertDeleteOp.getPayloadExpression().getValue()));
+ new MutableObject<ILogicalExpression>(insertOp.getPayloadExpression().getValue()));
TypeComputerUtilities.setRequiredAndInputTypes(castFunc, enforcedType, insertRecType);
- AssignOperator castedRecordAssignOperator = new AssignOperator(castVar,
+ AssignOperator newAssignOperator = new AssignOperator(castVar,
new MutableObject<ILogicalExpression>(castFunc));
- castedRecordAssignOperator.getInputs().add(new MutableObject<ILogicalOperator>(currentTop));
- currentTop = castedRecordAssignOperator;
+ newAssignOperator.getInputs().add(new MutableObject<ILogicalOperator>(currentTop));
+ currentTop = newAssignOperator;
+ //project out casted record
+ projectVars.add(castVar);
enforcedRecordVar = castVar;
+ context.computeAndSetTypeEnvironmentForOperator(newAssignOperator);
+ context.computeAndSetTypeEnvironmentForOperator(currentTop);
recType = enforcedType;
- context.computeAndSetTypeEnvironmentForOperator(castedRecordAssignOperator);
+ } catch (AsterixException e) {
+ throw new AlgebricksException(e);
}
- } catch (AsterixException e) {
- throw new AlgebricksException(e);
}
- }
- Set<LogicalVariable> projectVars = new HashSet<LogicalVariable>();
- VariableUtilities.getUsedVariables(op1, projectVars);
- if (enforcedRecordVar != null)
- projectVars.add(enforcedRecordVar);
- ProjectOperator project = new ProjectOperator(new ArrayList<LogicalVariable>(projectVars));
- project.getInputs().add(new MutableObject<ILogicalOperator>(currentTop));
- context.computeAndSetTypeEnvironmentForOperator(project);
- currentTop = project;
-
- // Replicate Operator is applied only when doing the bulk-load.
- AbstractLogicalOperator replicateOp = null;
- if (secondaryIndexTotalCnt > 1 && insertDeleteOp.isBulkload()) {
- // Split the logical plan into "each secondary index update branch"
- // to replicate each <PK,RECORD> pair.
- replicateOp = new ReplicateOperator(secondaryIndexTotalCnt);
- replicateOp.getInputs().add(new MutableObject<ILogicalOperator>(currentTop));
- replicateOp.setExecutionMode(ExecutionMode.PARTITIONED);
- context.computeAndSetTypeEnvironmentForOperator(replicateOp);
- currentTop = replicateOp;
- }
-
- // Iterate each secondary index and applying Index Update operations.
- for (Index index : indexes) {
- if (!index.isSecondaryIndex()) {
- continue;
- }
- hasSecondaryIndex = true;
List<List<String>> secondaryKeyFields = index.getKeyFieldNames();
List<IAType> secondaryKeyTypes = index.getKeyFieldTypes();
@@ -278,35 +266,39 @@
}
AssignOperator assign = new AssignOperator(secondaryKeyVars, expressions);
+ ProjectOperator project = new ProjectOperator(projectVars);
- ILogicalOperator filterOrAssignOp = null;
if (additionalFilteringAssign != null) {
- filterOrAssignOp = additionalFilteringAssign;
+ additionalFilteringAssign.getInputs().add(new MutableObject<ILogicalOperator>(project));
assign.getInputs().add(new MutableObject<ILogicalOperator>(additionalFilteringAssign));
} else {
- filterOrAssignOp = assign;
+ assign.getInputs().add(new MutableObject<ILogicalOperator>(project));
}
// Only apply replicate operator when doing bulk-load
- if (secondaryIndexTotalCnt > 1 && insertDeleteOp.isBulkload())
- filterOrAssignOp.getInputs().add(new MutableObject<ILogicalOperator>(replicateOp));
+ if (secondaryIndexTotalCnt > 1 && insertOp.isBulkload())
+ project.getInputs().add(new MutableObject<ILogicalOperator>(replicateOp));
else
- filterOrAssignOp.getInputs().add(new MutableObject<ILogicalOperator>(currentTop));
+ project.getInputs().add(new MutableObject<ILogicalOperator>(currentTop));
+
+ context.computeAndSetTypeEnvironmentForOperator(project);
if (additionalFilteringAssign != null) {
context.computeAndSetTypeEnvironmentForOperator(additionalFilteringAssign);
}
+
context.computeAndSetTypeEnvironmentForOperator(assign);
currentTop = assign;
// BTree, Keyword, or n-gram index case
- if (index.getIndexType() == IndexType.BTREE || index.getIndexType() == IndexType.SINGLE_PARTITION_WORD_INVIX
+ if (index.getIndexType() == IndexType.BTREE
+ || index.getIndexType() == IndexType.SINGLE_PARTITION_WORD_INVIX
|| index.getIndexType() == IndexType.SINGLE_PARTITION_NGRAM_INVIX
|| index.getIndexType() == IndexType.LENGTH_PARTITIONED_WORD_INVIX
|| index.getIndexType() == IndexType.LENGTH_PARTITIONED_NGRAM_INVIX) {
for (LogicalVariable secondaryKeyVar : secondaryKeyVars) {
- secondaryExpressions.add(
- new MutableObject<ILogicalExpression>(new VariableReferenceExpression(secondaryKeyVar)));
+ secondaryExpressions.add(new MutableObject<ILogicalExpression>(new VariableReferenceExpression(
+ secondaryKeyVar)));
}
Mutable<ILogicalExpression> filterExpression = createFilterExpression(secondaryKeyVars,
context.getOutputTypeEnvironment(currentTop), false);
@@ -314,7 +306,7 @@
// Introduce the TokenizeOperator only when doing bulk-load,
// and index type is keyword or n-gram.
- if (index.getIndexType() != IndexType.BTREE && insertDeleteOp.isBulkload()) {
+ if (index.getIndexType() != IndexType.BTREE && insertOp.isBulkload()) {
// Check whether the index is length-partitioned or not.
// If partitioned, [input variables to TokenizeOperator,
@@ -334,8 +326,8 @@
List<Mutable<ILogicalExpression>> tokenizeKeyExprs = new ArrayList<Mutable<ILogicalExpression>>();
LogicalVariable tokenVar = context.newVar();
tokenizeKeyVars.add(tokenVar);
- tokenizeKeyExprs
- .add(new MutableObject<ILogicalExpression>(new VariableReferenceExpression(tokenVar)));
+ tokenizeKeyExprs.add(new MutableObject<ILogicalExpression>(
+ new VariableReferenceExpression(tokenVar)));
// Check the field type of the secondary key.
IAType secondaryKeyType = null;
@@ -353,22 +345,21 @@
if (isPartitioned) {
LogicalVariable lengthVar = context.newVar();
tokenizeKeyVars.add(lengthVar);
- tokenizeKeyExprs
- .add(new MutableObject<ILogicalExpression>(new VariableReferenceExpression(lengthVar)));
+ tokenizeKeyExprs.add(new MutableObject<ILogicalExpression>(new VariableReferenceExpression(
+ lengthVar)));
varTypes.add(BuiltinType.SHORTWITHOUTTYPEINFO);
}
// TokenizeOperator to tokenize [SK, PK] pairs
TokenizeOperator tokenUpdate = new TokenizeOperator(dataSourceIndex,
- insertDeleteOp.getPrimaryKeyExpressions(), secondaryExpressions, tokenizeKeyVars,
- filterExpression, insertDeleteOp.getOperation(), insertDeleteOp.isBulkload(), isPartitioned,
- varTypes);
+ insertOp.getPrimaryKeyExpressions(), secondaryExpressions, tokenizeKeyVars,
+ filterExpression, insertOp.getOperation(), insertOp.isBulkload(), isPartitioned, varTypes);
tokenUpdate.getInputs().add(new MutableObject<ILogicalOperator>(assign));
context.computeAndSetTypeEnvironmentForOperator(tokenUpdate);
IndexInsertDeleteOperator indexUpdate = new IndexInsertDeleteOperator(dataSourceIndex,
- insertDeleteOp.getPrimaryKeyExpressions(), tokenizeKeyExprs, filterExpression,
- insertDeleteOp.getOperation(), insertDeleteOp.isBulkload());
+ insertOp.getPrimaryKeyExpressions(), tokenizeKeyExprs, filterExpression,
+ insertOp.getOperation(), insertOp.isBulkload());
indexUpdate.setAdditionalFilteringExpressions(additionalFilteringExpressions);
indexUpdate.getInputs().add(new MutableObject<ILogicalOperator>(tokenUpdate));
@@ -380,15 +371,15 @@
} else {
// When TokenizeOperator is not needed
IndexInsertDeleteOperator indexUpdate = new IndexInsertDeleteOperator(dataSourceIndex,
- insertDeleteOp.getPrimaryKeyExpressions(), secondaryExpressions, filterExpression,
- insertDeleteOp.getOperation(), insertDeleteOp.isBulkload());
+ insertOp.getPrimaryKeyExpressions(), secondaryExpressions, filterExpression,
+ insertOp.getOperation(), insertOp.isBulkload());
indexUpdate.setAdditionalFilteringExpressions(additionalFilteringExpressions);
indexUpdate.getInputs().add(new MutableObject<ILogicalOperator>(currentTop));
currentTop = indexUpdate;
context.computeAndSetTypeEnvironmentForOperator(indexUpdate);
- if (insertDeleteOp.isBulkload())
+ if (insertOp.isBulkload())
op0.getInputs().add(new MutableObject<ILogicalOperator>(currentTop));
}
@@ -406,17 +397,20 @@
keyVarList.add(keyVar);
AbstractFunctionCallExpression createMBR = new ScalarFunctionCallExpression(
FunctionUtils.getFunctionInfo(AsterixBuiltinFunctions.CREATE_MBR));
- createMBR.getArguments().add(new MutableObject<ILogicalExpression>(
- new VariableReferenceExpression(secondaryKeyVars.get(0))));
- createMBR.getArguments().add(new MutableObject<ILogicalExpression>(
- new ConstantExpression(new AsterixConstantValue(new AInt32(dimension)))));
- createMBR.getArguments().add(new MutableObject<ILogicalExpression>(
- new ConstantExpression(new AsterixConstantValue(new AInt32(i)))));
+ createMBR.getArguments().add(
+ new MutableObject<ILogicalExpression>(new VariableReferenceExpression(secondaryKeyVars
+ .get(0))));
+ createMBR.getArguments().add(
+ new MutableObject<ILogicalExpression>(new ConstantExpression(new AsterixConstantValue(
+ new AInt32(dimension)))));
+ createMBR.getArguments().add(
+ new MutableObject<ILogicalExpression>(new ConstantExpression(new AsterixConstantValue(
+ new AInt32(i)))));
keyExprList.add(new MutableObject<ILogicalExpression>(createMBR));
}
for (LogicalVariable secondaryKeyVar : keyVarList) {
- secondaryExpressions.add(
- new MutableObject<ILogicalExpression>(new VariableReferenceExpression(secondaryKeyVar)));
+ secondaryExpressions.add(new MutableObject<ILogicalExpression>(new VariableReferenceExpression(
+ secondaryKeyVar)));
}
AssignOperator assignCoordinates = new AssignOperator(keyVarList, keyExprList);
assignCoordinates.getInputs().add(new MutableObject<ILogicalOperator>(currentTop));
@@ -428,14 +422,14 @@
context.getOutputTypeEnvironment(assignCoordinates), forceFilter);
AqlIndex dataSourceIndex = new AqlIndex(index, dataverseName, datasetName, mp);
IndexInsertDeleteOperator indexUpdate = new IndexInsertDeleteOperator(dataSourceIndex,
- insertDeleteOp.getPrimaryKeyExpressions(), secondaryExpressions, filterExpression,
- insertDeleteOp.getOperation(), insertDeleteOp.isBulkload());
+ insertOp.getPrimaryKeyExpressions(), secondaryExpressions, filterExpression,
+ insertOp.getOperation(), insertOp.isBulkload());
indexUpdate.setAdditionalFilteringExpressions(additionalFilteringExpressions);
indexUpdate.getInputs().add(new MutableObject<ILogicalOperator>(assignCoordinates));
currentTop = indexUpdate;
context.computeAndSetTypeEnvironmentForOperator(indexUpdate);
- if (insertDeleteOp.isBulkload())
+ if (insertOp.isBulkload())
op0.getInputs().add(new MutableObject<ILogicalOperator>(currentTop));
}
@@ -445,107 +439,79 @@
return false;
}
- if (!insertDeleteOp.isBulkload()) {
+ if (!insertOp.isBulkload()) {
op0.getInputs().clear();
op0.getInputs().add(new MutableObject<ILogicalOperator>(currentTop));
}
return true;
}
- // Merges typed index fields with specified recordType, allowing indexed fields to be optional.
- // I.e. the type { "personId":int32, "name": string, "address" : { "street": string } } with typed indexes on age:int32, address.state:string
- // will be merged into type { "personId":int32, "name": string, "age": int32? "address" : { "street": string, "state": string? } }
- // Used by open indexes to enforce the type of an indexed record
- public static ARecordType createEnforcedType(ARecordType initialType, List<Index> indexes)
- throws AsterixException, AlgebricksException {
+ public static ARecordType createEnforcedType(ARecordType initialType, Index index) throws AsterixException,
+ AlgebricksException {
ARecordType enforcedType = initialType;
- for (Index index : indexes) {
- if (!index.isSecondaryIndex() || !index.isEnforcingKeyFileds()) {
- continue;
- }
- for (int i = 0; i < index.getKeyFieldNames().size(); i++) {
- try {
- Stack<Pair<ARecordType, String>> nestedTypeStack = new Stack<Pair<ARecordType, String>>();
- List<String> splits = index.getKeyFieldNames().get(i);
- ARecordType nestedFieldType = enforcedType;
- boolean openRecords = false;
- String bridgeName = nestedFieldType.getTypeName();
- int j;
- //Build the stack for the enforced type
- for (j = 1; j < splits.size(); j++) {
- nestedTypeStack.push(new Pair<ARecordType, String>(nestedFieldType, splits.get(j - 1)));
- bridgeName = nestedFieldType.getTypeName();
- nestedFieldType = (ARecordType) enforcedType.getSubFieldType(splits.subList(0, j));
- if (nestedFieldType == null) {
- openRecords = true;
- break;
- }
+ for (int i = 0; i < index.getKeyFieldNames().size(); i++) {
+ try {
+ Stack<Pair<ARecordType, String>> nestedTypeStack = new Stack<Pair<ARecordType, String>>();
+ List<String> splits = index.getKeyFieldNames().get(i);
+ ARecordType nestedFieldType = enforcedType;
+ boolean openRecords = false;
+ String bridgeName = nestedFieldType.getTypeName();
+ int j;
+ //Build the stack for the enforced type
+ for (j = 1; j < splits.size(); j++) {
+ nestedTypeStack.push(new Pair<ARecordType, String>(nestedFieldType, splits.get(j - 1)));
+ bridgeName = nestedFieldType.getTypeName();
+ nestedFieldType = (ARecordType) enforcedType.getSubFieldType(splits.subList(0, j));
+ if (nestedFieldType == null) {
+ openRecords = true;
+ break;
}
- if (openRecords == true) {
- //create the smallest record
- enforcedType = new ARecordType(splits.get(splits.size() - 2),
- new String[] { splits.get(splits.size() - 1) },
- new IAType[] { AUnionType.createNullableType(index.getKeyFieldTypes().get(i)) }, true);
- //create the open part of the nested field
- for (int k = splits.size() - 3; k > (j - 2); k--) {
- enforcedType = new ARecordType(splits.get(k), new String[] { splits.get(k + 1) },
- new IAType[] { AUnionType.createNullableType(enforcedType) }, true);
- }
- //Bridge the gap
- Pair<ARecordType, String> gapPair = nestedTypeStack.pop();
- ARecordType parent = gapPair.first;
-
- IAType[] parentFieldTypes = ArrayUtils.addAll(parent.getFieldTypes().clone(),
- new IAType[] { AUnionType.createNullableType(enforcedType) });
- enforcedType = new ARecordType(bridgeName,
- ArrayUtils.addAll(parent.getFieldNames(), enforcedType.getTypeName()), parentFieldTypes,
- true);
-
- } else {
- //Schema is closed all the way to the field
- //enforced fields are either null or strongly typed
- LinkedHashMap<String, IAType> recordNameTypesMap = new LinkedHashMap<String, IAType>();
- for (j = 0; j < nestedFieldType.getFieldNames().length; j++) {
- recordNameTypesMap.put(nestedFieldType.getFieldNames()[j],
- nestedFieldType.getFieldTypes()[j]);
- }
- // if a an enforced field already exists and the type is correct
- IAType enforcedFieldType = recordNameTypesMap.get(splits.get(splits.size() - 1));
- if (enforcedFieldType != null && enforcedFieldType.getTypeTag() == ATypeTag.UNION
- && ((AUnionType) enforcedFieldType).isNullableType())
- enforcedFieldType = ((AUnionType) enforcedFieldType).getNullableType();
- if (enforcedFieldType != null && !ATypeHierarchy.canPromote(enforcedFieldType.getTypeTag(),
- index.getKeyFieldTypes().get(i).getTypeTag()))
- throw new AlgebricksException("Cannot enforce field " + index.getKeyFieldNames().get(i)
- + " to have type " + index.getKeyFieldTypes().get(i));
- if (enforcedFieldType == null)
- recordNameTypesMap.put(splits.get(splits.size() - 1),
- AUnionType.createNullableType(index.getKeyFieldTypes().get(i)));
- enforcedType = new ARecordType(nestedFieldType.getTypeName(),
- recordNameTypesMap.keySet().toArray(new String[recordNameTypesMap.size()]),
- recordNameTypesMap.values().toArray(new IAType[recordNameTypesMap.size()]),
- nestedFieldType.isOpen());
- }
-
- //Create the enforcedtype for the nested fields in the schema, from the ground up
- if (nestedTypeStack.size() > 0) {
- while (!nestedTypeStack.isEmpty()) {
- Pair<ARecordType, String> nestedTypePair = nestedTypeStack.pop();
- ARecordType nestedRecType = nestedTypePair.first;
- IAType[] nestedRecTypeFieldTypes = nestedRecType.getFieldTypes().clone();
- nestedRecTypeFieldTypes[nestedRecType
- .findFieldPosition(nestedTypePair.second)] = enforcedType;
- enforcedType = new ARecordType(nestedRecType.getTypeName() + "_enforced",
- nestedRecType.getFieldNames(), nestedRecTypeFieldTypes, nestedRecType.isOpen());
- }
- }
-
- } catch (AsterixException e) {
- throw new AlgebricksException(
- "Cannot enforce typed fields " + StringUtils.join(index.getKeyFieldNames()), e);
- } catch (IOException e) {
- throw new AsterixException(e);
}
+ if (openRecords == true) {
+ //create the smallest record
+ enforcedType = new ARecordType(splits.get(splits.size() - 2), new String[] { splits.get(splits
+ .size() - 1) }, new IAType[] { AUnionType.createNullableType(index.getKeyFieldTypes()
+ .get(i)) }, true);
+ //create the open part of the nested field
+ for (int k = splits.size() - 3; k > (j - 2); k--) {
+ enforcedType = new ARecordType(splits.get(k), new String[] { splits.get(k + 1) },
+ new IAType[] { AUnionType.createNullableType(enforcedType) }, true);
+ }
+ //Bridge the gap
+ Pair<ARecordType, String> gapPair = nestedTypeStack.pop();
+ ARecordType parent = gapPair.first;
+
+ IAType[] parentFieldTypes = ArrayUtils.addAll(parent.getFieldTypes().clone(),
+ new IAType[] { AUnionType.createNullableType(enforcedType) });
+ enforcedType = new ARecordType(bridgeName, ArrayUtils.addAll(parent.getFieldNames(),
+ enforcedType.getTypeName()), parentFieldTypes, true);
+
+ } else {
+ //Schema is closed all the way to the field
+ //enforced fields are either null or strongly typed
+ enforcedType = new ARecordType(nestedFieldType.getTypeName(), ArrayUtils.addAll(
+ nestedFieldType.getFieldNames(), splits.get(splits.size() - 1)), ArrayUtils.addAll(
+ nestedFieldType.getFieldTypes(),
+ AUnionType.createNullableType(index.getKeyFieldTypes().get(i))), nestedFieldType.isOpen());
+ }
+
+ //Create the enforcedtype for the nested fields in the schema, from the ground up
+ if (nestedTypeStack.size() > 0) {
+ while (!nestedTypeStack.isEmpty()) {
+ Pair<ARecordType, String> nestedTypePair = nestedTypeStack.pop();
+ ARecordType nestedRecType = nestedTypePair.first;
+ IAType[] nestedRecTypeFieldTypes = nestedRecType.getFieldTypes().clone();
+ nestedRecTypeFieldTypes[nestedRecType.findFieldPosition(nestedTypePair.second)] = enforcedType;
+ enforcedType = new ARecordType(nestedRecType.getTypeName(), nestedRecType.getFieldNames(),
+ nestedRecTypeFieldTypes, nestedRecType.isOpen());
+ }
+ }
+
+ } catch (AsterixException e) {
+ throw new AlgebricksException("Cannot enforce typed fields "
+ + StringUtils.join(index.getKeyFieldNames()), e);
+ } catch (IOException e) {
+ throw new AsterixException(e);
}
}
return enforcedType;
@@ -554,9 +520,9 @@
@SuppressWarnings("unchecked")
private void prepareVarAndExpression(List<String> field, String[] fieldNames, LogicalVariable recordVar,
List<Mutable<ILogicalExpression>> expressions, List<LogicalVariable> vars, IOptimizationContext context)
- throws AlgebricksException {
- Mutable<ILogicalExpression> varRef = new MutableObject<ILogicalExpression>(
- new VariableReferenceExpression(recordVar));
+ throws AlgebricksException {
+ Mutable<ILogicalExpression> varRef = new MutableObject<ILogicalExpression>(new VariableReferenceExpression(
+ recordVar));
int pos = -1;
if (field.size() == 1) {
for (int j = 0; j < fieldNames.length; j++) {
@@ -573,14 +539,14 @@
for (int i = 0; i < field.size(); i++) {
fieldList.add(new AString(field.get(i)));
}
- Mutable<ILogicalExpression> fieldRef = new MutableObject<ILogicalExpression>(
- new ConstantExpression(new AsterixConstantValue(fieldList)));
+ Mutable<ILogicalExpression> fieldRef = new MutableObject<ILogicalExpression>(new ConstantExpression(
+ new AsterixConstantValue(fieldList)));
//Create an expression for the nested case
func = new ScalarFunctionCallExpression(
FunctionUtils.getFunctionInfo(AsterixBuiltinFunctions.FIELD_ACCESS_NESTED), varRef, fieldRef);
} else {
- Mutable<ILogicalExpression> fieldRef = new MutableObject<ILogicalExpression>(
- new ConstantExpression(new AsterixConstantValue(new AString(field.get(0)))));
+ Mutable<ILogicalExpression> fieldRef = new MutableObject<ILogicalExpression>(new ConstantExpression(
+ new AsterixConstantValue(new AString(field.get(0)))));
//Create an expression for the open field case (By name)
func = new ScalarFunctionCallExpression(
FunctionUtils.getFunctionInfo(AsterixBuiltinFunctions.FIELD_ACCESS_BY_NAME), varRef, fieldRef);
@@ -590,8 +556,8 @@
vars.add(newVar);
} else {
// Assumes the indexed field is in the closed portion of the type.
- Mutable<ILogicalExpression> indexRef = new MutableObject<ILogicalExpression>(
- new ConstantExpression(new AsterixConstantValue(new AInt32(pos))));
+ Mutable<ILogicalExpression> indexRef = new MutableObject<ILogicalExpression>(new ConstantExpression(
+ new AsterixConstantValue(new AInt32(pos))));
AbstractFunctionCallExpression func = new ScalarFunctionCallExpression(
FunctionUtils.getFunctionInfo(AsterixBuiltinFunctions.FIELD_ACCESS_BY_INDEX), varRef, indexRef);
expressions.add(new MutableObject<ILogicalExpression>(func));
@@ -615,8 +581,8 @@
FunctionUtils.getFunctionInfo(AsterixBuiltinFunctions.IS_NULL),
new MutableObject<ILogicalExpression>(new VariableReferenceExpression(secondaryKeyVar)));
ScalarFunctionCallExpression notFuncExpr = new ScalarFunctionCallExpression(
- FunctionUtils.getFunctionInfo(AsterixBuiltinFunctions.NOT),
- new MutableObject<ILogicalExpression>(isNullFuncExpr));
+ FunctionUtils.getFunctionInfo(AsterixBuiltinFunctions.NOT), new MutableObject<ILogicalExpression>(
+ isNullFuncExpr));
filterExpressions.add(new MutableObject<ILogicalExpression>(notFuncExpr));
}
// No nullable secondary keys.
diff --git a/asterix-algebra/src/main/java/org/apache/asterix/translator/CompiledStatements.java b/asterix-algebra/src/main/java/org/apache/asterix/translator/CompiledStatements.java
index 26f6bd3..25649ee 100644
--- a/asterix-algebra/src/main/java/org/apache/asterix/translator/CompiledStatements.java
+++ b/asterix-algebra/src/main/java/org/apache/asterix/translator/CompiledStatements.java
@@ -27,9 +27,13 @@
import org.apache.asterix.aql.base.Statement.Kind;
import org.apache.asterix.aql.expression.CallExpr;
import org.apache.asterix.aql.expression.FLWOGRExpression;
+import org.apache.asterix.aql.expression.FieldAccessor;
+import org.apache.asterix.aql.expression.FieldBinding;
import org.apache.asterix.aql.expression.ForClause;
+import org.apache.asterix.aql.expression.Identifier;
import org.apache.asterix.aql.expression.LiteralExpr;
import org.apache.asterix.aql.expression.Query;
+import org.apache.asterix.aql.expression.RecordConstructor;
import org.apache.asterix.aql.expression.VariableExpr;
import org.apache.asterix.aql.expression.WhereClause;
import org.apache.asterix.aql.literal.StringLiteral;
@@ -38,6 +42,8 @@
import org.apache.asterix.common.functions.FunctionConstants;
import org.apache.asterix.common.functions.FunctionSignature;
import org.apache.asterix.metadata.declared.AqlMetadataProvider;
+import org.apache.asterix.metadata.entities.Dataset;
+import org.apache.asterix.om.types.ARecordType;
import org.apache.asterix.om.types.IAType;
import org.apache.hyracks.algebricks.common.exceptions.AlgebricksException;
@@ -77,8 +83,8 @@
// added by yasser
public static class CompiledCreateDataverseStatement implements ICompiledStatement {
- private final String dataverseName;
- private final String format;
+ private String dataverseName;
+ private String format;
public CompiledCreateDataverseStatement(String dataverseName, String format) {
this.dataverseName = dataverseName;
@@ -100,7 +106,7 @@
}
public static class CompiledNodeGroupDropStatement implements ICompiledStatement {
- private final String nodeGroupName;
+ private String nodeGroupName;
public CompiledNodeGroupDropStatement(String nodeGroupName) {
this.nodeGroupName = nodeGroupName;
@@ -117,9 +123,9 @@
}
public static class CompiledIndexDropStatement implements ICompiledStatement {
- private final String dataverseName;
- private final String datasetName;
- private final String indexName;
+ private String dataverseName;
+ private String datasetName;
+ private String indexName;
public CompiledIndexDropStatement(String dataverseName, String datasetName, String indexName) {
this.dataverseName = dataverseName;
@@ -146,8 +152,8 @@
}
public static class CompiledDataverseDropStatement implements ICompiledStatement {
- private final String dataverseName;
- private final boolean ifExists;
+ private String dataverseName;
+ private boolean ifExists;
public CompiledDataverseDropStatement(String dataverseName, boolean ifExists) {
this.dataverseName = dataverseName;
@@ -169,7 +175,7 @@
}
public static class CompiledTypeDropStatement implements ICompiledStatement {
- private final String typeName;
+ private String typeName;
public CompiledTypeDropStatement(String nodeGroupName) {
this.typeName = nodeGroupName;
@@ -205,8 +211,7 @@
private final int gramLength;
public CompiledCreateIndexStatement(String indexName, String dataverseName, String datasetName,
- List<List<String>> keyFields, List<IAType> keyTypes, boolean isEnforced, int gramLength,
- IndexType indexType) {
+ List<List<String>> keyFields, List<IAType> keyTypes, boolean isEnforced, int gramLength, IndexType indexType) {
this.indexName = indexName;
this.dataverseName = dataverseName;
this.datasetName = datasetName;
@@ -217,12 +222,10 @@
this.indexType = indexType;
}
- @Override
public String getDatasetName() {
return datasetName;
}
- @Override
public String getDataverseName() {
return dataverseName;
}
@@ -258,11 +261,11 @@
}
public static class CompiledLoadFromFileStatement implements ICompiledDmlStatement {
- private final String dataverseName;
- private final String datasetName;
- private final boolean alreadySorted;
- private final String adapter;
- private final Map<String, String> properties;
+ private String dataverseName;
+ private String datasetName;
+ private boolean alreadySorted;
+ private String adapter;
+ private Map<String, String> properties;
public CompiledLoadFromFileStatement(String dataverseName, String datasetName, String adapter,
Map<String, String> properties, boolean alreadySorted) {
@@ -273,12 +276,10 @@
this.properties = properties;
}
- @Override
public String getDataverseName() {
return dataverseName;
}
- @Override
public String getDatasetName() {
return datasetName;
}
@@ -314,12 +315,10 @@
this.varCounter = varCounter;
}
- @Override
public String getDataverseName() {
return dataverseName;
}
- @Override
public String getDatasetName() {
return datasetName;
}
@@ -339,12 +338,12 @@
}
public static class CompiledConnectFeedStatement implements ICompiledDmlStatement {
- private final String dataverseName;
- private final String feedName;
- private final String datasetName;
- private final String policyName;
+ private String dataverseName;
+ private String feedName;
+ private String datasetName;
+ private String policyName;
private Query query;
- private final int varCounter;
+ private int varCounter;
public CompiledConnectFeedStatement(String dataverseName, String feedName, String datasetName,
String policyName, Query query, int varCounter) {
@@ -391,7 +390,7 @@
return policyName;
}
}
-
+
public static class CompiledSubscribeFeedStatement implements ICompiledDmlStatement {
private final FeedConnectionRequest request;
@@ -433,10 +432,11 @@
}
+
public static class CompiledDisconnectFeedStatement implements ICompiledDmlStatement {
- private final String dataverseName;
- private final String datasetName;
- private final String feedName;
+ private String dataverseName;
+ private String datasetName;
+ private String feedName;
private Query query;
private int varCounter;
@@ -476,15 +476,15 @@
}
public static class CompiledDeleteStatement implements ICompiledDmlStatement {
- private final VariableExpr var;
- private final String dataverseName;
- private final String datasetName;
- private final Expression condition;
- private final int varCounter;
- private final AqlMetadataProvider metadataProvider;
+ private VariableExpr var;
+ private String dataverseName;
+ private String datasetName;
+ private Expression condition;
+ private int varCounter;
+ private AqlMetadataProvider metadataProvider;
- public CompiledDeleteStatement(VariableExpr var, String dataverseName, String datasetName, Expression condition,
- int varCounter, AqlMetadataProvider metadataProvider) {
+ public CompiledDeleteStatement(VariableExpr var, String dataverseName, String datasetName,
+ Expression condition, int varCounter, AqlMetadataProvider metadataProvider) {
this.var = var;
this.dataverseName = dataverseName;
this.datasetName = datasetName;
@@ -529,7 +529,23 @@
clauseList.add(whereClause);
}
- FLWOGRExpression flowgr = new FLWOGRExpression(clauseList, var);
+ Dataset dataset = metadataProvider.findDataset(dataverseName, datasetName);
+ if (dataset == null) {
+ throw new AlgebricksException("Unknown dataset " + datasetName);
+ }
+ String itemTypeName = dataset.getItemTypeName();
+ IAType itemType = metadataProvider.findType(dataset.getDataverseName(), itemTypeName);
+ ARecordType recType = (ARecordType) itemType;
+ String[] fieldNames = recType.getFieldNames();
+ List<FieldBinding> fieldBindings = new ArrayList<FieldBinding>();
+ for (int i = 0; i < fieldNames.length; i++) {
+ FieldAccessor fa = new FieldAccessor(var, new Identifier(fieldNames[i]));
+ FieldBinding fb = new FieldBinding(new LiteralExpr(new StringLiteral(fieldNames[i])), fa);
+ fieldBindings.add(fb);
+ }
+ RecordConstructor rc = new RecordConstructor(fieldBindings);
+
+ FLWOGRExpression flowgr = new FLWOGRExpression(clauseList, rc);
Query query = new Query();
query.setBody(flowgr);
return query;
@@ -576,8 +592,7 @@
private final int gramLength;
public CompiledIndexCompactStatement(String dataverseName, String datasetName, String indexName,
- List<List<String>> keyFields, List<IAType> keyTypes, boolean isEnforced, int gramLength,
- IndexType indexType) {
+ List<List<String>> keyFields, List<IAType> keyTypes, boolean isEnforced, int gramLength, IndexType indexType) {
super(dataverseName, datasetName);
this.indexName = indexName;
this.keyFields = keyFields;
diff --git a/asterix-app/src/main/java/org/apache/asterix/aql/translator/AqlTranslator.java b/asterix-app/src/main/java/org/apache/asterix/aql/translator/AqlTranslator.java
index 034e1f5..dcfbc98 100644
--- a/asterix-app/src/main/java/org/apache/asterix/aql/translator/AqlTranslator.java
+++ b/asterix-app/src/main/java/org/apache/asterix/aql/translator/AqlTranslator.java
@@ -39,6 +39,11 @@
import java.util.logging.Level;
import java.util.logging.Logger;
+import org.apache.commons.lang3.StringUtils;
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+
import org.apache.asterix.api.common.APIFramework;
import org.apache.asterix.api.common.Job;
import org.apache.asterix.api.common.SessionConfig;
@@ -165,12 +170,10 @@
import org.apache.asterix.translator.CompiledStatements.CompiledSubscribeFeedStatement;
import org.apache.asterix.translator.CompiledStatements.ICompiledDmlStatement;
import org.apache.asterix.translator.TypeTranslator;
-import org.apache.commons.lang3.StringUtils;
import org.apache.hyracks.algebricks.common.constraints.AlgebricksPartitionConstraint;
import org.apache.hyracks.algebricks.common.constraints.AlgebricksPartitionConstraintHelper;
import org.apache.hyracks.algebricks.common.exceptions.AlgebricksException;
import org.apache.hyracks.algebricks.common.utils.Pair;
-import org.apache.hyracks.algebricks.common.utils.Triple;
import org.apache.hyracks.algebricks.core.algebra.expressions.AbstractFunctionCallExpression.FunctionKind;
import org.apache.hyracks.algebricks.data.IAWriterFactory;
import org.apache.hyracks.algebricks.data.IResultSerializerFactoryProvider;
@@ -188,15 +191,11 @@
import org.apache.hyracks.api.io.FileReference;
import org.apache.hyracks.api.job.JobId;
import org.apache.hyracks.api.job.JobSpecification;
+import org.apache.hyracks.algebricks.common.utils.Triple;
import org.apache.hyracks.dataflow.std.connectors.OneToOneConnectorDescriptor;
import org.apache.hyracks.dataflow.std.file.FileSplit;
import org.apache.hyracks.dataflow.std.file.IFileSplitProvider;
import org.apache.hyracks.storage.am.lsm.common.api.ILSMMergePolicyFactory;
-import org.json.JSONArray;
-import org.json.JSONException;
-import org.json.JSONObject;
-
-import com.google.common.collect.Lists;
/*
* Provides functionality for executing a batch of AQL statements (queries included)
@@ -241,7 +240,7 @@
/**
* Compiles and submits for execution a list of AQL statements.
- *
+ *
* @param hcc
* A Hyracks client connection that is used to submit a jobspec to Hyracks.
* @param hdc
@@ -381,8 +380,8 @@
case QUERY: {
metadataProvider.setResultSetId(new ResultSetId(resultSetIdCounter++));
- metadataProvider.setResultAsyncMode(
- resultDelivery == ResultDelivery.ASYNC || resultDelivery == ResultDelivery.ASYNC_DEFERRED);
+ metadataProvider.setResultAsyncMode(resultDelivery == ResultDelivery.ASYNC
+ || resultDelivery == ResultDelivery.ASYNC_DEFERRED);
handleQuery(metadataProvider, (Query) stmt, hcc, hdc, resultDelivery);
break;
}
@@ -474,8 +473,8 @@
throw new AlgebricksException("A dataverse with this name " + dvName + " already exists.");
}
}
- MetadataManager.INSTANCE.addDataverse(metadataProvider.getMetadataTxnContext(),
- new Dataverse(dvName, stmtCreateDataverse.getFormat(), IMetadataEntity.PENDING_NO_OP));
+ MetadataManager.INSTANCE.addDataverse(metadataProvider.getMetadataTxnContext(), new Dataverse(dvName,
+ stmtCreateDataverse.getFormat(), IMetadataEntity.PENDING_NO_OP));
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
} catch (Exception e) {
abort(e, e, mdTxnCtx);
@@ -493,8 +492,8 @@
throw new AsterixException("Unknown compaction policy: " + compactionPolicy);
}
String compactionPolicyFactoryClassName = compactionPolicyEntity.getClassName();
- ILSMMergePolicyFactory mergePolicyFactory = (ILSMMergePolicyFactory) Class
- .forName(compactionPolicyFactoryClassName).newInstance();
+ ILSMMergePolicyFactory mergePolicyFactory = (ILSMMergePolicyFactory) Class.forName(
+ compactionPolicyFactoryClassName).newInstance();
if (isExternalDataset && mergePolicyFactory.getName().compareTo("correlated-prefix") == 0) {
throw new AsterixException("The correlated-prefix merge policy cannot be used with external dataset.");
}
@@ -557,8 +556,8 @@
if (dt == null) {
throw new AlgebricksException(": type " + itemTypeName + " could not be found.");
}
- String ngName = ngNameId != null ? ngNameId.getValue()
- : configureNodegroupForDataset(dd, dataverseName, mdTxnCtx);
+ String ngName = ngNameId != null ? ngNameId.getValue() : configureNodegroupForDataset(dd, dataverseName,
+ mdTxnCtx);
if (compactionPolicy == null) {
compactionPolicy = GlobalConfig.DEFAULT_COMPACTION_POLICY_NAME;
@@ -806,8 +805,8 @@
ds = MetadataManager.INSTANCE.getDataset(metadataProvider.getMetadataTxnContext(), dataverseName,
datasetName);
if (ds == null) {
- throw new AlgebricksException(
- "There is no dataset with this name " + datasetName + " in dataverse " + dataverseName);
+ throw new AlgebricksException("There is no dataset with this name " + datasetName + " in dataverse "
+ + dataverseName);
}
indexName = stmtCreateIndex.getIndexName().getValue();
@@ -896,9 +895,9 @@
// External dataset
// Check if the dataset is indexible
if (!ExternalIndexingOperations.isIndexible((ExternalDatasetDetails) ds.getDatasetDetails())) {
- throw new AlgebricksException(
- "dataset using " + ((ExternalDatasetDetails) ds.getDatasetDetails()).getAdapter()
- + " Adapter can't be indexed");
+ throw new AlgebricksException("dataset using "
+ + ((ExternalDatasetDetails) ds.getDatasetDetails()).getAdapter()
+ + " Adapter can't be indexed");
}
// check if the name of the index is valid
if (!ExternalIndexingOperations.isValidIndexName(datasetName, indexName)) {
@@ -950,14 +949,14 @@
//check whether there exists another enforced index on the same field
if (stmtCreateIndex.isEnforced()) {
- List<Index> indexes = MetadataManager.INSTANCE
- .getDatasetIndexes(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName);
+ List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(
+ metadataProvider.getMetadataTxnContext(), dataverseName, datasetName);
for (Index index : indexes) {
if (index.getKeyFieldNames().equals(indexFields)
&& !index.getKeyFieldTypes().equals(indexFieldTypes) && index.isEnforcingKeyFileds())
- throw new AsterixException(
- "Cannot create index " + indexName + " , enforced index " + index.getIndexName()
- + " on field \"" + StringUtils.join(indexFields, ',') + "\" already exist");
+ throw new AsterixException("Cannot create index " + indexName + " , enforced index "
+ + index.getIndexName() + " on field \"" + StringUtils.join(indexFields, ',')
+ + "\" already exist");
}
}
@@ -969,8 +968,7 @@
ARecordType enforcedType = null;
if (stmtCreateIndex.isEnforced()) {
- enforcedType = IntroduceSecondaryIndexInsertDeleteRule.createEnforcedType(aRecordType,
- Lists.newArrayList(index));
+ enforcedType = IntroduceSecondaryIndexInsertDeleteRule.createEnforcedType(aRecordType, index);
}
//#. prepare to create the index artifact in NC.
@@ -1016,8 +1014,8 @@
MetadataManager.INSTANCE.addIndex(metadataProvider.getMetadataTxnContext(), index);
// add another new files index with PendingNoOp after deleting the index with PendingAddOp
if (firstExternalDatasetIndex) {
- MetadataManager.INSTANCE.dropIndex(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName,
- filesIndex.getIndexName());
+ MetadataManager.INSTANCE.dropIndex(metadataProvider.getMetadataTxnContext(), dataverseName,
+ datasetName, filesIndex.getIndexName());
filesIndex.setPendingOp(IMetadataEntity.PENDING_NO_OP);
MetadataManager.INSTANCE.addIndex(metadataProvider.getMetadataTxnContext(), filesIndex);
// update transaction timestamp
@@ -1058,8 +1056,8 @@
metadataProvider.setMetadataTxnContext(mdTxnCtx);
CompiledIndexDropStatement cds = new CompiledIndexDropStatement(dataverseName, datasetName, indexName);
try {
- JobSpecification jobSpec = IndexOperations.buildDropSecondaryIndexJobSpec(cds, metadataProvider,
- ds);
+ JobSpecification jobSpec = IndexOperations
+ .buildDropSecondaryIndexJobSpec(cds, metadataProvider, ds);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
bActiveTxn = false;
@@ -1191,8 +1189,8 @@
for (FeedConnectionId connection : activeFeedConnections) {
FeedId feedId = connection.getFeedId();
if (feedId.getDataverse().equals(dataverseName)) {
- disStmt = new DisconnectFeedStatement(dvId, new Identifier(feedId.getFeedName()),
- new Identifier(connection.getDatasetName()));
+ disStmt = new DisconnectFeedStatement(dvId, new Identifier(feedId.getFeedName()), new Identifier(
+ connection.getDatasetName()));
try {
handleDisconnectFeedStatement(metadataProvider, disStmt, hcc);
if (LOGGER.isLoggable(Level.INFO)) {
@@ -1253,8 +1251,8 @@
// first, deleting the dataverse record from the DATAVERSE_DATASET
// second, inserting the dataverse record with the PendingDropOp value into the DATAVERSE_DATASET
MetadataManager.INSTANCE.dropDataverse(mdTxnCtx, dataverseName);
- MetadataManager.INSTANCE.addDataverse(mdTxnCtx,
- new Dataverse(dataverseName, dv.getDataFormat(), IMetadataEntity.PENDING_DROP_OP));
+ MetadataManager.INSTANCE.addDataverse(mdTxnCtx, new Dataverse(dataverseName, dv.getDataFormat(),
+ IMetadataEntity.PENDING_DROP_OP));
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
bActiveTxn = false;
@@ -1335,8 +1333,8 @@
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
return;
} else {
- throw new AlgebricksException("There is no dataset with this name " + datasetName + " in dataverse "
- + dataverseName + ".");
+ throw new AlgebricksException("There is no dataset with this name " + datasetName
+ + " in dataverse " + dataverseName + ".");
}
}
@@ -1370,11 +1368,11 @@
//#. mark the existing dataset as PendingDropOp
MetadataManager.INSTANCE.dropDataset(mdTxnCtx, dataverseName, datasetName);
- MetadataManager.INSTANCE.addDataset(mdTxnCtx,
- new Dataset(dataverseName, datasetName, ds.getItemTypeName(), ds.getNodeGroupName(),
- ds.getCompactionPolicy(), ds.getCompactionPolicyProperties(), ds.getDatasetDetails(),
- ds.getHints(), ds.getDatasetType(), ds.getDatasetId(),
- IMetadataEntity.PENDING_DROP_OP));
+ MetadataManager.INSTANCE.addDataset(
+ mdTxnCtx,
+ new Dataset(dataverseName, datasetName, ds.getItemTypeName(), ds.getNodeGroupName(), ds
+ .getCompactionPolicy(), ds.getCompactionPolicyProperties(), ds.getDatasetDetails(), ds
+ .getHints(), ds.getDatasetType(), ds.getDatasetId(), IMetadataEntity.PENDING_DROP_OP));
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
bActiveTxn = false;
@@ -1406,18 +1404,18 @@
} else {
CompiledIndexDropStatement cds = new CompiledIndexDropStatement(dataverseName, datasetName,
indexes.get(j).getIndexName());
- jobsToExecute
- .add(ExternalIndexingOperations.buildDropFilesIndexJobSpec(cds, metadataProvider, ds));
+ jobsToExecute.add(ExternalIndexingOperations.buildDropFilesIndexJobSpec(cds, metadataProvider,
+ ds));
}
}
//#. mark the existing dataset as PendingDropOp
MetadataManager.INSTANCE.dropDataset(mdTxnCtx, dataverseName, datasetName);
- MetadataManager.INSTANCE.addDataset(mdTxnCtx,
- new Dataset(dataverseName, datasetName, ds.getItemTypeName(), ds.getNodeGroupName(),
- ds.getCompactionPolicy(), ds.getCompactionPolicyProperties(), ds.getDatasetDetails(),
- ds.getHints(), ds.getDatasetType(), ds.getDatasetId(),
- IMetadataEntity.PENDING_DROP_OP));
+ MetadataManager.INSTANCE.addDataset(
+ mdTxnCtx,
+ new Dataset(dataverseName, datasetName, ds.getItemTypeName(), ds.getNodeGroupName(), ds
+ .getCompactionPolicy(), ds.getCompactionPolicyProperties(), ds.getDatasetDetails(), ds
+ .getHints(), ds.getDatasetType(), ds.getDatasetId(), IMetadataEntity.PENDING_DROP_OP));
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
bActiveTxn = false;
@@ -1503,8 +1501,8 @@
Dataset ds = MetadataManager.INSTANCE.getDataset(mdTxnCtx, dataverseName, datasetName);
if (ds == null) {
- throw new AlgebricksException(
- "There is no dataset with this name " + datasetName + " in dataverse " + dataverseName);
+ throw new AlgebricksException("There is no dataset with this name " + datasetName + " in dataverse "
+ + dataverseName);
}
List<FeedConnectionId> feedConnections = FeedLifecycleListener.INSTANCE.getActiveFeedConnections(null);
@@ -1518,9 +1516,9 @@
}
}
if (resourceInUse) {
- throw new AsterixException(
- "Dataset" + datasetName + " is currently being fed into by the following feeds " + "."
- + builder.toString() + "\nOperation not supported.");
+ throw new AsterixException("Dataset" + datasetName
+ + " is currently being fed into by the following feeds " + "." + builder.toString()
+ + "\nOperation not supported.");
}
}
@@ -1541,10 +1539,11 @@
//#. mark PendingDropOp on the existing index
MetadataManager.INSTANCE.dropIndex(mdTxnCtx, dataverseName, datasetName, indexName);
- MetadataManager.INSTANCE.addIndex(mdTxnCtx,
- new Index(dataverseName, datasetName, indexName, index.getIndexType(), index.getKeyFieldNames(),
- index.getKeyFieldTypes(), index.isEnforcingKeyFileds(), index.isPrimaryIndex(),
- IMetadataEntity.PENDING_DROP_OP));
+ MetadataManager.INSTANCE.addIndex(
+ mdTxnCtx,
+ new Index(dataverseName, datasetName, indexName, index.getIndexType(),
+ index.getKeyFieldNames(), index.getKeyFieldTypes(), index.isEnforcingKeyFileds(), index
+ .isPrimaryIndex(), IMetadataEntity.PENDING_DROP_OP));
//#. commit the existing transaction before calling runJob.
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
@@ -1588,26 +1587,28 @@
if (ExternalIndexingOperations.isFileIndex(externalIndex)) {
cds = new CompiledIndexDropStatement(dataverseName, datasetName,
externalIndex.getIndexName());
- jobsToExecute.add(
- ExternalIndexingOperations.buildDropFilesIndexJobSpec(cds, metadataProvider, ds));
+ jobsToExecute.add(ExternalIndexingOperations.buildDropFilesIndexJobSpec(cds,
+ metadataProvider, ds));
//#. mark PendingDropOp on the existing files index
MetadataManager.INSTANCE.dropIndex(mdTxnCtx, dataverseName, datasetName,
externalIndex.getIndexName());
- MetadataManager.INSTANCE.addIndex(mdTxnCtx,
- new Index(dataverseName, datasetName, externalIndex.getIndexName(),
- externalIndex.getIndexType(), externalIndex.getKeyFieldNames(),
- index.getKeyFieldTypes(), index.isEnforcingKeyFileds(),
- externalIndex.isPrimaryIndex(), IMetadataEntity.PENDING_DROP_OP));
+ MetadataManager.INSTANCE.addIndex(
+ mdTxnCtx,
+ new Index(dataverseName, datasetName, externalIndex.getIndexName(), externalIndex
+ .getIndexType(), externalIndex.getKeyFieldNames(),
+ index.getKeyFieldTypes(), index.isEnforcingKeyFileds(), externalIndex
+ .isPrimaryIndex(), IMetadataEntity.PENDING_DROP_OP));
}
}
}
//#. mark PendingDropOp on the existing index
MetadataManager.INSTANCE.dropIndex(mdTxnCtx, dataverseName, datasetName, indexName);
- MetadataManager.INSTANCE.addIndex(mdTxnCtx,
- new Index(dataverseName, datasetName, indexName, index.getIndexType(), index.getKeyFieldNames(),
- index.getKeyFieldTypes(), index.isEnforcingKeyFileds(), index.isPrimaryIndex(),
- IMetadataEntity.PENDING_DROP_OP));
+ MetadataManager.INSTANCE.addIndex(
+ mdTxnCtx,
+ new Index(dataverseName, datasetName, indexName, index.getIndexType(),
+ index.getKeyFieldNames(), index.getKeyFieldTypes(), index.isEnforcingKeyFileds(), index
+ .isPrimaryIndex(), IMetadataEntity.PENDING_DROP_OP));
//#. commit the existing transaction before calling runJob.
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
@@ -1666,8 +1667,8 @@
} catch (Exception e2) {
e.addSuppressed(e2);
abort(e, e2, mdTxnCtx);
- throw new IllegalStateException("System is inconsistent state: pending index(" + dataverseName + "."
- + datasetName + "." + indexName + ") couldn't be removed from the metadata", e);
+ throw new IllegalStateException("System is inconsistent state: pending index(" + dataverseName
+ + "." + datasetName + "." + indexName + ") couldn't be removed from the metadata", e);
}
}
@@ -1763,8 +1764,8 @@
FunctionSignature signature = stmtDropFunction.getFunctionSignature();
MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
metadataProvider.setMetadataTxnContext(mdTxnCtx);
- MetadataLockManager.INSTANCE.functionStatementBegin(signature.getNamespace(),
- signature.getNamespace() + "." + signature.getName());
+ MetadataLockManager.INSTANCE.functionStatementBegin(signature.getNamespace(), signature.getNamespace() + "."
+ + signature.getName());
try {
Function function = MetadataManager.INSTANCE.getFunction(mdTxnCtx, signature);
if (function == null) {
@@ -1778,8 +1779,8 @@
abort(e, e, mdTxnCtx);
throw e;
} finally {
- MetadataLockManager.INSTANCE.functionStatementEnd(signature.getNamespace(),
- signature.getNamespace() + "." + signature.getName());
+ MetadataLockManager.INSTANCE.functionStatementEnd(signature.getNamespace(), signature.getNamespace() + "."
+ + signature.getName());
}
}
@@ -1793,11 +1794,11 @@
metadataProvider.setMetadataTxnContext(mdTxnCtx);
MetadataLockManager.INSTANCE.modifyDatasetBegin(dataverseName, dataverseName + "." + datasetName);
try {
- CompiledLoadFromFileStatement cls = new CompiledLoadFromFileStatement(dataverseName,
- loadStmt.getDatasetName().getValue(), loadStmt.getAdapter(), loadStmt.getProperties(),
+ CompiledLoadFromFileStatement cls = new CompiledLoadFromFileStatement(dataverseName, loadStmt
+ .getDatasetName().getValue(), loadStmt.getAdapter(), loadStmt.getProperties(),
loadStmt.dataIsAlreadySorted());
- JobSpecification spec = APIFramework.compileQuery(null, metadataProvider, null, 0, null, sessionConfig,
- cls);
+ JobSpecification spec = APIFramework
+ .compileQuery(null, metadataProvider, null, 0, null, sessionConfig, cls);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
bActiveTxn = false;
if (spec != null) {
@@ -1822,13 +1823,13 @@
MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
boolean bActiveTxn = true;
metadataProvider.setMetadataTxnContext(mdTxnCtx);
- MetadataLockManager.INSTANCE.insertDeleteBegin(dataverseName, dataverseName + "." + stmtInsert.getDatasetName(),
- query.getDataverses(), query.getDatasets());
+ MetadataLockManager.INSTANCE.insertDeleteBegin(dataverseName,
+ dataverseName + "." + stmtInsert.getDatasetName(), query.getDataverses(), query.getDatasets());
try {
metadataProvider.setWriteTransaction(true);
- CompiledInsertStatement clfrqs = new CompiledInsertStatement(dataverseName,
- stmtInsert.getDatasetName().getValue(), query, stmtInsert.getVarCounter());
+ CompiledInsertStatement clfrqs = new CompiledInsertStatement(dataverseName, stmtInsert.getDatasetName()
+ .getValue(), query, stmtInsert.getVarCounter());
JobSpecification compiled = rewriteCompileQuery(metadataProvider, query, clfrqs);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
@@ -1857,8 +1858,9 @@
MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
boolean bActiveTxn = true;
metadataProvider.setMetadataTxnContext(mdTxnCtx);
- MetadataLockManager.INSTANCE.insertDeleteBegin(dataverseName, dataverseName + "." + stmtDelete.getDatasetName(),
- stmtDelete.getDataverses(), stmtDelete.getDatasets());
+ MetadataLockManager.INSTANCE
+ .insertDeleteBegin(dataverseName, dataverseName + "." + stmtDelete.getDatasetName(),
+ stmtDelete.getDataverses(), stmtDelete.getDatasets());
try {
metadataProvider.setWriteTransaction(true);
@@ -1887,8 +1889,8 @@
}
private JobSpecification rewriteCompileQuery(AqlMetadataProvider metadataProvider, Query query,
- ICompiledDmlStatement stmt)
- throws AsterixException, RemoteException, AlgebricksException, JSONException, ACIDException {
+ ICompiledDmlStatement stmt) throws AsterixException, RemoteException, AlgebricksException, JSONException,
+ ACIDException {
// Query Rewriting (happens under the same ongoing metadata transaction)
Pair<Query, Integer> reWrittenQuery = APIFramework.reWriteQuery(declaredFunctions, metadataProvider, query,
@@ -1975,8 +1977,8 @@
boolean extendingExisting = cfps.getSourcePolicyName() != null;
String description = cfps.getDescription() == null ? "" : cfps.getDescription();
if (extendingExisting) {
- FeedPolicy sourceFeedPolicy = MetadataManager.INSTANCE
- .getFeedPolicy(metadataProvider.getMetadataTxnContext(), dataverse, cfps.getSourcePolicyName());
+ FeedPolicy sourceFeedPolicy = MetadataManager.INSTANCE.getFeedPolicy(
+ metadataProvider.getMetadataTxnContext(), dataverse, cfps.getSourcePolicyName());
if (sourceFeedPolicy == null) {
sourceFeedPolicy = MetadataManager.INSTANCE.getFeedPolicy(metadataProvider.getMetadataTxnContext(),
MetadataConstants.METADATA_DATAVERSE_NAME, cfps.getSourcePolicyName());
@@ -2099,13 +2101,13 @@
IFeedLifecycleEventSubscriber eventSubscriber = new FeedLifecycleEventSubscriber();
FeedConnectionId feedConnId = null;
- MetadataLockManager.INSTANCE.connectFeedBegin(dataverseName, dataverseName + "." + datasetName,
- dataverseName + "." + feedName);
+ MetadataLockManager.INSTANCE.connectFeedBegin(dataverseName, dataverseName + "." + datasetName, dataverseName
+ + "." + feedName);
try {
metadataProvider.setWriteTransaction(true);
- CompiledConnectFeedStatement cbfs = new CompiledConnectFeedStatement(dataverseName, cfs.getFeedName(),
- cfs.getDatasetName().getValue(), cfs.getPolicy(), cfs.getQuery(), cfs.getVarCounter());
+ CompiledConnectFeedStatement cbfs = new CompiledConnectFeedStatement(dataverseName, cfs.getFeedName(), cfs
+ .getDatasetName().getValue(), cfs.getPolicy(), cfs.getQuery(), cfs.getVarCounter());
FeedUtil.validateIfDatasetExists(dataverseName, cfs.getDatasetName().getValue(),
metadataProvider.getMetadataTxnContext());
@@ -2163,8 +2165,8 @@
eventSubscriber.assertEvent(FeedLifecycleEvent.FEED_ENDED); // blocking call
}
String waitForCompletionParam = metadataProvider.getConfig().get(ConnectFeedStatement.WAIT_FOR_COMPLETION);
- boolean waitForCompletion = waitForCompletionParam == null ? false
- : Boolean.valueOf(waitForCompletionParam);
+ boolean waitForCompletion = waitForCompletionParam == null ? false : Boolean
+ .valueOf(waitForCompletionParam);
if (waitForCompletion) {
MetadataLockManager.INSTANCE.connectFeedEnd(dataverseName, dataverseName + "." + datasetName,
dataverseName + "." + feedName);
@@ -2189,7 +2191,7 @@
/**
* Generates a subscription request corresponding to a connect feed request. In addition, provides a boolean
* flag indicating if feed intake job needs to be started (source primary feed not found to be active).
- *
+ *
* @param dataverse
* @param feed
* @param dataset
@@ -2200,7 +2202,7 @@
*/
private Triple<FeedConnectionRequest, Boolean, List<IFeedJoint>> getFeedConnectionRequest(String dataverse,
Feed feed, String dataset, FeedPolicy feedPolicy, MetadataTransactionContext mdTxnCtx)
- throws MetadataException {
+ throws MetadataException {
IFeedJoint sourceFeedJoint = null;
FeedConnectionRequest request = null;
List<String> functionsToApply = new ArrayList<String>();
@@ -2215,7 +2217,7 @@
sourceFeedJoint = FeedLifecycleListener.INSTANCE.getAvailableFeedJoint(feedJointKey);
if (sourceFeedJoint == null) { // the feed is currently not being ingested, i.e., it is unavailable.
connectionLocation = ConnectionLocation.SOURCE_FEED_INTAKE_STAGE;
- FeedId sourceFeedId = feedJointKey.getFeedId(); // the root/primary feedId
+ FeedId sourceFeedId = feedJointKey.getFeedId(); // the root/primary feedId
Feed primaryFeed = MetadataManager.INSTANCE.getFeed(mdTxnCtx, dataverse, sourceFeedId.getFeedName());
FeedJointKey intakeFeedJointKey = new FeedJointKey(sourceFeedId, new ArrayList<String>());
sourceFeedJoint = new FeedJoint(intakeFeedJointKey, primaryFeed.getFeedId(), connectionLocation,
@@ -2234,7 +2236,7 @@
}
}
// register the compute feed point that represents the final output from the collection of
- // functions that will be applied.
+ // functions that will be applied.
if (!functionsToApply.isEmpty()) {
FeedJointKey computeFeedJointKey = new FeedJointKey(feed.getFeedId(), functionsToApply);
IFeedJoint computeFeedJoint = new FeedJoint(computeFeedJointKey, feed.getFeedId(),
@@ -2257,7 +2259,7 @@
}
/*
- * Gets the feed joint corresponding to the feed definition. Tuples constituting the feed are
+ * Gets the feed joint corresponding to the feed definition. Tuples constituting the feed are
* available at this feed joint.
*/
private FeedJointKey getFeedJointKey(Feed feed, MetadataTransactionContext ctx) throws MetadataException {
@@ -2305,12 +2307,12 @@
Dataset dataset = MetadataManager.INSTANCE.getDataset(metadataProvider.getMetadataTxnContext(),
dataverseName, cfs.getDatasetName().getValue());
if (dataset == null) {
- throw new AsterixException(
- "Unknown dataset :" + cfs.getDatasetName().getValue() + " in dataverse " + dataverseName);
+ throw new AsterixException("Unknown dataset :" + cfs.getDatasetName().getValue() + " in dataverse "
+ + dataverseName);
}
- Pair<JobSpecification, Boolean> specDisconnectType = FeedOperations
- .buildDisconnectFeedJobSpec(metadataProvider, connectionId);
+ Pair<JobSpecification, Boolean> specDisconnectType = FeedOperations.buildDisconnectFeedJobSpec(
+ metadataProvider, connectionId);
JobSpecification jobSpec = specDisconnectType.first;
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
bActiveTxn = false;
@@ -2354,17 +2356,17 @@
StringUtils.join(bfs.getLocations(), ','));
JobSpecification compiled = rewriteCompileQuery(metadataProvider, bfs.getQuery(), csfs);
- FeedConnectionId feedConnectionId = new FeedConnectionId(bfs.getSubscriptionRequest().getReceivingFeedId(),
- bfs.getSubscriptionRequest().getTargetDataset());
+ FeedConnectionId feedConnectionId = new FeedConnectionId(bfs.getSubscriptionRequest().getReceivingFeedId(), bfs
+ .getSubscriptionRequest().getTargetDataset());
String dataverse = feedConnectionId.getFeedId().getDataverse();
String dataset = feedConnectionId.getDatasetName();
- MetadataLockManager.INSTANCE.subscribeFeedBegin(dataverse, dataverse + "." + dataset,
- dataverse + "." + feedConnectionId.getFeedId().getFeedName());
+ MetadataLockManager.INSTANCE.subscribeFeedBegin(dataverse, dataverse + "." + dataset, dataverse + "."
+ + feedConnectionId.getFeedId().getFeedName());
try {
- JobSpecification alteredJobSpec = FeedUtil.alterJobSpecificationForFeed(compiled, feedConnectionId,
- bfs.getSubscriptionRequest().getPolicyParameters());
+ JobSpecification alteredJobSpec = FeedUtil.alterJobSpecificationForFeed(compiled, feedConnectionId, bfs
+ .getSubscriptionRequest().getPolicyParameters());
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
bActiveTxn = false;
@@ -2379,8 +2381,8 @@
}
throw e;
} finally {
- MetadataLockManager.INSTANCE.subscribeFeedEnd(dataverse, dataverse + "." + dataset,
- dataverse + "." + feedConnectionId.getFeedId().getFeedName());
+ MetadataLockManager.INSTANCE.subscribeFeedEnd(dataverse, dataverse + "." + dataset, dataverse + "."
+ + feedConnectionId.getFeedId().getFeedName());
}
}
@@ -2398,8 +2400,8 @@
try {
Dataset ds = MetadataManager.INSTANCE.getDataset(mdTxnCtx, dataverseName, datasetName);
if (ds == null) {
- throw new AlgebricksException(
- "There is no dataset with this name " + datasetName + " in dataverse " + dataverseName + ".");
+ throw new AlgebricksException("There is no dataset with this name " + datasetName + " in dataverse "
+ + dataverseName + ".");
}
String itemTypeName = ds.getItemTypeName();
@@ -2409,26 +2411,45 @@
// Prepare jobs to compact the datatset and its indexes
List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseName, datasetName);
if (indexes.size() == 0) {
- throw new AlgebricksException(
- "Cannot compact the extrenal dataset " + datasetName + " because it has no indexes");
+ throw new AlgebricksException("Cannot compact the extrenal dataset " + datasetName
+ + " because it has no indexes");
}
- Dataverse dataverse = MetadataManager.INSTANCE.getDataverse(metadataProvider.getMetadataTxnContext(),
- dataverseName);
- jobsToExecute.add(DatasetOperations.compactDatasetJobSpec(dataverse, datasetName, metadataProvider));
- ARecordType aRecordType = (ARecordType) dt.getDatatype();
- ARecordType enforcedType = IntroduceSecondaryIndexInsertDeleteRule.createEnforcedType(aRecordType, indexes);
- for (int j = 0; j < indexes.size(); j++) {
- if (ds.getDatasetType() == DatasetType.INTERNAL && indexes.get(j).isSecondaryIndex()
- || ds.getDatasetType() == DatasetType.EXTERNAL
- && !ExternalIndexingOperations.isFileIndex(indexes.get(j))) {
- CompiledIndexCompactStatement cics = new CompiledIndexCompactStatement(dataverseName, datasetName,
- indexes.get(j).getIndexName(), indexes.get(j).getKeyFieldNames(),
- indexes.get(j).getKeyFieldTypes(), indexes.get(j).isEnforcingKeyFileds(),
- indexes.get(j).getGramLength(), indexes.get(j).getIndexType());
- jobsToExecute.add(IndexOperations.buildSecondaryIndexCompactJobSpec(cics, aRecordType, enforcedType,
- metadataProvider, ds));
+ if (ds.getDatasetType() == DatasetType.INTERNAL) {
+ for (int j = 0; j < indexes.size(); j++) {
+ if (indexes.get(j).isSecondaryIndex()) {
+ CompiledIndexCompactStatement cics = new CompiledIndexCompactStatement(dataverseName,
+ datasetName, indexes.get(j).getIndexName(), indexes.get(j).getKeyFieldNames(), indexes
+ .get(j).getKeyFieldTypes(), indexes.get(j).isEnforcingKeyFileds(), indexes.get(
+ j).getGramLength(), indexes.get(j).getIndexType());
+
+ Dataverse dataverse = MetadataManager.INSTANCE.getDataverse(
+ metadataProvider.getMetadataTxnContext(), dataverseName);
+ jobsToExecute.add(DatasetOperations.compactDatasetJobSpec(dataverse, datasetName,
+ metadataProvider));
+
+ }
}
+ } else {
+ for (int j = 0; j < indexes.size(); j++) {
+ if (!ExternalIndexingOperations.isFileIndex(indexes.get(j))) {
+ CompiledIndexCompactStatement cics = new CompiledIndexCompactStatement(dataverseName,
+ datasetName, indexes.get(j).getIndexName(), indexes.get(j).getKeyFieldNames(), indexes
+ .get(j).getKeyFieldTypes(), indexes.get(j).isEnforcingKeyFileds(), indexes.get(
+ j).getGramLength(), indexes.get(j).getIndexType());
+ ARecordType aRecordType = (ARecordType) dt.getDatatype();
+ ARecordType enforcedType = null;
+ if (cics.isEnforced()) {
+ enforcedType = IntroduceSecondaryIndexInsertDeleteRule.createEnforcedType(aRecordType,
+ indexes.get(j));
+ }
+ jobsToExecute.add(IndexOperations.buildSecondaryIndexCompactJobSpec(cics, aRecordType,
+ enforcedType, metadataProvider, ds));
+
+ }
+
+ }
+ jobsToExecute.add(ExternalIndexingOperations.compactFilesIndexJobSpec(ds, metadataProvider));
}
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
bActiveTxn = false;
@@ -2575,13 +2596,13 @@
// Dataset exists ?
if (ds == null) {
- throw new AlgebricksException(
- "There is no dataset with this name " + datasetName + " in dataverse " + dataverseName);
+ throw new AlgebricksException("There is no dataset with this name " + datasetName + " in dataverse "
+ + dataverseName);
}
// Dataset external ?
if (ds.getDatasetType() != DatasetType.EXTERNAL) {
- throw new AlgebricksException(
- "dataset " + datasetName + " in dataverse " + dataverseName + " is not an external dataset");
+ throw new AlgebricksException("dataset " + datasetName + " in dataverse " + dataverseName
+ + " is not an external dataset");
}
// Dataset has indexes ?
indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseName, datasetName);
@@ -2605,8 +2626,8 @@
// Compute delta
// Now we compare snapshot with external file system
- if (ExternalIndexingOperations.isDatasetUptodate(ds, metadataFiles, addedFiles, deletedFiles,
- appendedFiles)) {
+ if (ExternalIndexingOperations
+ .isDatasetUptodate(ds, metadataFiles, addedFiles, deletedFiles, appendedFiles)) {
((ExternalDatasetDetails) ds.getDatasetDetails()).setRefreshTimestamp(txnTime);
MetadataManager.INSTANCE.updateDataset(mdTxnCtx, ds);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
@@ -2782,8 +2803,8 @@
handlePregelixStatement(metadataProvider, runStmt, hcc);
break;
default:
- throw new AlgebricksException(
- "The system \"" + runStmt.getSystem() + "\" specified in your run statement is not supported.");
+ throw new AlgebricksException("The system \"" + runStmt.getSystem()
+ + "\" specified in your run statement is not supported.");
}
}
@@ -2812,8 +2833,8 @@
// construct input paths
Index fromIndex = null;
- List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseNameFrom,
- pregelixStmt.getDatasetNameFrom().getValue());
+ List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseNameFrom, pregelixStmt
+ .getDatasetNameFrom().getValue());
for (Index ind : indexes) {
if (ind.isPrimaryIndex())
fromIndex = ind;
@@ -2825,8 +2846,8 @@
Dataset datasetFrom = MetadataManager.INSTANCE.getDataset(mdTxnCtx, dataverseNameFrom, datasetNameFrom);
IFileSplitProvider fromSplits = metadataProvider.splitProviderAndPartitionConstraintsForDataset(
- dataverseNameFrom, datasetNameFrom, fromIndex.getIndexName(),
- datasetFrom.getDatasetDetails().isTemp()).first;
+ dataverseNameFrom, datasetNameFrom, fromIndex.getIndexName(), datasetFrom.getDatasetDetails()
+ .isTemp()).first;
StringBuilder fromSplitsPaths = new StringBuilder();
for (FileSplit f : fromSplits.getFileSplits()) {
@@ -2837,8 +2858,8 @@
// Construct output paths
Index toIndex = null;
- indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseNameTo,
- pregelixStmt.getDatasetNameTo().getValue());
+ indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseNameTo, pregelixStmt
+ .getDatasetNameTo().getValue());
for (Index ind : indexes) {
if (ind.isPrimaryIndex())
toIndex = ind;
@@ -2850,8 +2871,7 @@
Dataset datasetTo = MetadataManager.INSTANCE.getDataset(mdTxnCtx, dataverseNameTo, datasetNameTo);
IFileSplitProvider toSplits = metadataProvider.splitProviderAndPartitionConstraintsForDataset(
- dataverseNameTo, datasetNameTo, toIndex.getIndexName(),
- datasetTo.getDatasetDetails().isTemp()).first;
+ dataverseNameTo, datasetNameTo, toIndex.getIndexName(), datasetTo.getDatasetDetails().isTemp()).first;
StringBuilder toSplitsPaths = new StringBuilder();
for (FileSplit f : toSplits.getFileSplits()) {
@@ -2866,11 +2886,11 @@
pregelixStmt.getDatasetNameTo(), true);
this.handleDatasetDropStatement(metadataProvider, dropStmt, hcc);
- IDatasetDetailsDecl idd = new InternalDetailsDecl(toIndex.getKeyFieldNames(), false, null,
- toDataset.getDatasetDetails().isTemp());
+ IDatasetDetailsDecl idd = new InternalDetailsDecl(toIndex.getKeyFieldNames(), false, null, toDataset
+ .getDatasetDetails().isTemp());
DatasetDecl createToDataset = new DatasetDecl(new Identifier(dataverseNameTo),
- pregelixStmt.getDatasetNameTo(), new Identifier(toDataset.getItemTypeName()),
- new Identifier(toDataset.getNodeGroupName()), toDataset.getCompactionPolicy(),
+ pregelixStmt.getDatasetNameTo(), new Identifier(toDataset.getItemTypeName()), new Identifier(
+ toDataset.getNodeGroupName()), toDataset.getCompactionPolicy(),
toDataset.getCompactionPolicyProperties(), toDataset.getHints(), toDataset.getDatasetType(),
idd, false);
this.handleCreateDatasetStatement(metadataProvider, createToDataset, hcc);
@@ -2977,7 +2997,7 @@
private void flushDataset(IHyracksClientConnection hcc, AqlMetadataProvider metadataProvider,
MetadataTransactionContext mdTxnCtx, String dataverseName, String datasetName, String indexName)
- throws Exception {
+ throws Exception {
AsterixCompilerProperties compilerProperties = AsterixAppContextInfo.getInstance().getCompilerProperties();
int frameSize = compilerProperties.getFrameSize();
JobSpecification spec = new JobSpecification(frameSize);
@@ -2994,8 +3014,8 @@
spec.connect(new OneToOneConnectorDescriptor(spec), emptySource, 0, flushOperator, 0);
Pair<IFileSplitProvider, AlgebricksPartitionConstraint> primarySplitsAndConstraint = metadataProvider
- .splitProviderAndPartitionConstraintsForDataset(dataverseName, datasetName, indexName,
- dataset.getDatasetDetails().isTemp());
+ .splitProviderAndPartitionConstraintsForDataset(dataverseName, datasetName, indexName, dataset
+ .getDatasetDetails().isTemp());
AlgebricksPartitionConstraint primaryPartitionConstraint = primarySplitsAndConstraint.second;
AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, emptySource,
diff --git a/asterix-app/src/test/resources/optimizerts/results/disjunction-to-join-delete-1.plan b/asterix-app/src/test/resources/optimizerts/results/disjunction-to-join-delete-1.plan
index 4c2d144..563a75b 100644
--- a/asterix-app/src/test/resources/optimizerts/results/disjunction-to-join-delete-1.plan
+++ b/asterix-app/src/test/resources/optimizerts/results/disjunction-to-join-delete-1.plan
@@ -12,10 +12,12 @@
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- ASSIGN |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- BTREE_SEARCH |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- STABLE_SORT [$$11(ASC)] |PARTITIONED|
- -- HASH_PARTITION_EXCHANGE [$$11] |PARTITIONED|
- -- UNNEST |UNPARTITIONED|
- -- EMPTY_TUPLE_SOURCE |UNPARTITIONED|
\ No newline at end of file
+ -- BTREE_SEARCH |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STABLE_SORT [$$15(ASC)] |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$15] |PARTITIONED|
+ -- UNNEST |UNPARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |UNPARTITIONED|
diff --git a/asterix-app/src/test/resources/optimizerts/results/disjunction-to-join-delete-2.plan b/asterix-app/src/test/resources/optimizerts/results/disjunction-to-join-delete-2.plan
index 4b9e8a2..ba4c536 100644
--- a/asterix-app/src/test/resources/optimizerts/results/disjunction-to-join-delete-2.plan
+++ b/asterix-app/src/test/resources/optimizerts/results/disjunction-to-join-delete-2.plan
@@ -9,19 +9,22 @@
-- INSERT_DELETE |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- MATERIALIZE |PARTITIONED|
- -- HASH_PARTITION_EXCHANGE [$$8] |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- ASSIGN |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
- -- STREAM_SELECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- BTREE_SEARCH |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- STABLE_SORT [$$13(ASC)] |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- STREAM_PROJECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- BTREE_SEARCH |PARTITIONED|
- -- BROADCAST_EXCHANGE |PARTITIONED|
- -- UNNEST |UNPARTITIONED|
- -- EMPTY_TUPLE_SOURCE |UNPARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- BTREE_SEARCH |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STABLE_SORT [$$19(ASC)] |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- BTREE_SEARCH |PARTITIONED|
+ -- BROADCAST_EXCHANGE |PARTITIONED|
+ -- UNNEST |UNPARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |UNPARTITIONED|
diff --git a/asterix-app/src/test/resources/optimizerts/results/disjunction-to-join-delete-3.plan b/asterix-app/src/test/resources/optimizerts/results/disjunction-to-join-delete-3.plan
index 7d38039..55fc23c 100644
--- a/asterix-app/src/test/resources/optimizerts/results/disjunction-to-join-delete-3.plan
+++ b/asterix-app/src/test/resources/optimizerts/results/disjunction-to-join-delete-3.plan
@@ -9,18 +9,20 @@
-- INSERT_DELETE |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- MATERIALIZE |PARTITIONED|
- -- HASH_PARTITION_EXCHANGE [$$10] |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$12] |PARTITIONED|
-- ASSIGN |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- HYBRID_HASH_JOIN [$$11][$$9] |PARTITIONED|
- -- HASH_PARTITION_EXCHANGE [$$11] |PARTITIONED|
- -- UNNEST |UNPARTITIONED|
- -- EMPTY_TUPLE_SOURCE |UNPARTITIONED|
- -- HASH_PARTITION_EXCHANGE [$$9] |PARTITIONED|
- -- STREAM_PROJECT |PARTITIONED|
- -- ASSIGN |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- DATASOURCE_SCAN |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$15][$$13] |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$15] |PARTITIONED|
+ -- UNNEST |UNPARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |UNPARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$13] |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- EMPTY_TUPLE_SOURCE |PARTITIONED|
\ No newline at end of file
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
diff --git a/asterix-app/src/test/resources/optimizerts/results/scan-delete-all.plan b/asterix-app/src/test/resources/optimizerts/results/scan-delete-all.plan
index 3e031e9..c2173fb 100644
--- a/asterix-app/src/test/resources/optimizerts/results/scan-delete-all.plan
+++ b/asterix-app/src/test/resources/optimizerts/results/scan-delete-all.plan
@@ -7,7 +7,8 @@
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- ASSIGN |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- DATASOURCE_SCAN |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- EMPTY_TUPLE_SOURCE |PARTITIONED|
\ No newline at end of file
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
diff --git a/asterix-app/src/test/resources/optimizerts/results/scan-delete-rtree-secondary-index.plan b/asterix-app/src/test/resources/optimizerts/results/scan-delete-rtree-secondary-index.plan
index ca4a6c2..691761d 100644
--- a/asterix-app/src/test/resources/optimizerts/results/scan-delete-rtree-secondary-index.plan
+++ b/asterix-app/src/test/resources/optimizerts/results/scan-delete-rtree-secondary-index.plan
@@ -25,8 +25,9 @@
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- ASSIGN |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- BTREE_SEARCH |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- ASSIGN |PARTITIONED|
- -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- BTREE_SEARCH |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
diff --git a/asterix-app/src/test/resources/optimizerts/results/scan-delete.plan b/asterix-app/src/test/resources/optimizerts/results/scan-delete.plan
index 790ac4f..cfe5d35 100644
--- a/asterix-app/src/test/resources/optimizerts/results/scan-delete.plan
+++ b/asterix-app/src/test/resources/optimizerts/results/scan-delete.plan
@@ -6,9 +6,11 @@
-- MATERIALIZE |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- ASSIGN |PARTITIONED|
- -- STREAM_SELECT |PARTITIONED|
- -- STREAM_PROJECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- DATASOURCE_SCAN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
diff --git a/asterix-app/src/test/resources/optimizerts/results/skip-index/dont-skip-primary-index-search-in-delete.plan b/asterix-app/src/test/resources/optimizerts/results/skip-index/dont-skip-primary-index-search-in-delete.plan
index bc9f1d1..e2e6dff 100644
--- a/asterix-app/src/test/resources/optimizerts/results/skip-index/dont-skip-primary-index-search-in-delete.plan
+++ b/asterix-app/src/test/resources/optimizerts/results/skip-index/dont-skip-primary-index-search-in-delete.plan
@@ -7,8 +7,9 @@
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- ASSIGN |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- BTREE_SEARCH |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- ASSIGN |PARTITIONED|
- -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- BTREE_SEARCH |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
diff --git a/asterix-app/src/test/resources/optimizerts/results/skip-index/skip-ngram-index-search-in-delete.plan b/asterix-app/src/test/resources/optimizerts/results/skip-index/skip-ngram-index-search-in-delete.plan
index 31de832..c882d81 100644
--- a/asterix-app/src/test/resources/optimizerts/results/skip-index/skip-ngram-index-search-in-delete.plan
+++ b/asterix-app/src/test/resources/optimizerts/results/skip-index/skip-ngram-index-search-in-delete.plan
@@ -11,9 +11,11 @@
-- MATERIALIZE |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- ASSIGN |PARTITIONED|
- -- STREAM_SELECT |PARTITIONED|
- -- STREAM_PROJECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- DATASOURCE_SCAN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
diff --git a/asterix-app/src/test/resources/optimizerts/results/skip-index/skip-rtree-index-search-in-delete.plan b/asterix-app/src/test/resources/optimizerts/results/skip-index/skip-rtree-index-search-in-delete.plan
index 40461a8..b69dfa3 100644
--- a/asterix-app/src/test/resources/optimizerts/results/skip-index/skip-rtree-index-search-in-delete.plan
+++ b/asterix-app/src/test/resources/optimizerts/results/skip-index/skip-rtree-index-search-in-delete.plan
@@ -13,9 +13,11 @@
-- MATERIALIZE |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- ASSIGN |PARTITIONED|
- -- STREAM_SELECT |PARTITIONED|
- -- STREAM_PROJECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- DATASOURCE_SCAN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- EMPTY_TUPLE_SOURCE |PARTITIONED|
\ No newline at end of file
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
\ No newline at end of file
diff --git a/asterix-app/src/test/resources/optimizerts/results/skip-index/skip-secondary-btree-index-search-in-delete.plan b/asterix-app/src/test/resources/optimizerts/results/skip-index/skip-secondary-btree-index-search-in-delete.plan
index 31de832..c882d81 100644
--- a/asterix-app/src/test/resources/optimizerts/results/skip-index/skip-secondary-btree-index-search-in-delete.plan
+++ b/asterix-app/src/test/resources/optimizerts/results/skip-index/skip-secondary-btree-index-search-in-delete.plan
@@ -11,9 +11,11 @@
-- MATERIALIZE |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- ASSIGN |PARTITIONED|
- -- STREAM_SELECT |PARTITIONED|
- -- STREAM_PROJECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- DATASOURCE_SCAN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
diff --git a/asterix-app/src/test/resources/optimizerts/results/skip-index/skip-word-index-search-in-delete.plan b/asterix-app/src/test/resources/optimizerts/results/skip-index/skip-word-index-search-in-delete.plan
index 31de832..c882d81 100644
--- a/asterix-app/src/test/resources/optimizerts/results/skip-index/skip-word-index-search-in-delete.plan
+++ b/asterix-app/src/test/resources/optimizerts/results/skip-index/skip-word-index-search-in-delete.plan
@@ -11,9 +11,11 @@
-- MATERIALIZE |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- ASSIGN |PARTITIONED|
- -- STREAM_SELECT |PARTITIONED|
- -- STREAM_PROJECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- DATASOURCE_SCAN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
diff --git a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/error-checking/index-type-collision/index-type-collision.1.ddl.aql b/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/error-checking/enforced-field-name-collision/enforced-field-name-collision.1.ddl.aql
similarity index 87%
rename from asterix-app/src/test/resources/runtimets/queries/open-index-enforced/error-checking/index-type-collision/index-type-collision.1.ddl.aql
rename to asterix-app/src/test/resources/runtimets/queries/open-index-enforced/error-checking/enforced-field-name-collision/enforced-field-name-collision.1.ddl.aql
index 84d35b7..853386d 100644
--- a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/error-checking/index-type-collision/index-type-collision.1.ddl.aql
+++ b/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/error-checking/enforced-field-name-collision/enforced-field-name-collision.1.ddl.aql
@@ -21,9 +21,9 @@
use dataverse test;
create type testType as open {
- "id": int32
+ "id": int32,
+ "value": string
}
create dataset testDS(testType) primary key id;
-create index testIdx1 on testDS(value: int32) enforced;
-create index testIdx2 on testDS(value: string) enforced;
+create index testIdx on testDS(value: string) enforced;
diff --git a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/error-checking/record-type-collision/record-collision.1.ddl.aql b/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/error-checking/enforced-field-type-collision/enforced-field-name-collision.1.ddl.aql
similarity index 100%
rename from asterix-app/src/test/resources/runtimets/queries/open-index-enforced/error-checking/record-type-collision/record-collision.1.ddl.aql
rename to asterix-app/src/test/resources/runtimets/queries/open-index-enforced/error-checking/enforced-field-type-collision/enforced-field-name-collision.1.ddl.aql
diff --git a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/error-checking/index-type-promotion-collision/index-type-promotion-collision.1.ddl.aql b/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/error-checking/index-type-promotion-collision/index-type-promotion-collision.1.ddl.aql
deleted file mode 100644
index 6c47032..0000000
--- a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/error-checking/index-type-promotion-collision/index-type-promotion-collision.1.ddl.aql
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-drop dataverse test if exists;
-create dataverse test;
-use dataverse test;
-
-create type testType as open {
- "id": int32
-}
-
-create dataset testDS(testType) primary key id;
-create index testIdx1 on testDS(value: int64) enforced;
-create index testIdx2 on testDS(value: int32) enforced;
diff --git a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.1.ddl.aql b/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.1.ddl.aql
deleted file mode 100644
index 10a3530..0000000
--- a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.1.ddl.aql
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/*
- * Description : Test that BTree open index is used in query plan
- * : define the BTree open index on a composite key (fname,lanme)
- * : predicate => where $l.fname="Julio" and $l.lname="Isa"
- * Expected Result : Success
- * Issue : Issue 162
- * Date : 27th March 2014
- */
-
-drop dataverse test if exists;
-create dataverse test;
-use dataverse test;
-
-create type Emp as closed {
-id:int64,
-fname:string,
-lname:string,
-age:int64,
-dept:string
-}
-
-create type EmpOpen as open {
-id:int64,
-fname:string,
-age:int64,
-dept:string
-}
-
-create dataset employee(Emp) primary key id;
-
-create dataset employeeOpen(EmpOpen) primary key id;
-
diff --git a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.2.update.aql b/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.2.update.aql
deleted file mode 100644
index d28adff..0000000
--- a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.2.update.aql
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/*
- * Description : Test that BTree enforced open index is used in query plan
- * : define the BTree enforced open index on a composite key (fname,lanme)
- * : predicate => where $l.fname="Julio" and $l.lname="Isa"
- * Expected Result : Success
- * Issue : Issue 162
- * Date : 27th March 2014
- */
-
-use dataverse test;
-
-load dataset employee
-using "org.apache.asterix.external.dataset.adapter.NCFileSystemAdapter"
-(("path"="nc1://data/names.adm"),("format"="delimited-text"),("delimiter"="|"));
-
-insert into dataset employeeOpen (
- for $x in dataset employee return $x
-);
diff --git a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.3.ddl.aql b/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.3.ddl.aql
deleted file mode 100644
index 11b0baa..0000000
--- a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.3.ddl.aql
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/*
- * Description : Test that BTree enforced open index is used in query plan
- * : define the BTree enforced open index on a composite key (fname,lanme)
- * : predicate => where $l.fname="Julio" and $l.lname="Isa"
- * Expected Result : Success
- * Issue : Issue 162
- * Date : 27th March 2014
- */
-
-use dataverse test;
-
-// create secondary index
-
-create index idx_employee_f_l_name on employeeOpen(fname,lname:string) enforced;
-create index idx_employee_l_f_name on employeeOpen(lname:string, fname) enforced;
diff --git a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.4.query.aql b/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.4.query.aql
deleted file mode 100644
index b1f2adf..0000000
--- a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.4.query.aql
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/*
- * Description : Test that BTree enforced open index is used in query plan
- * : define the BTree enforced open index on a composite key (fname,lanme)
- * : predicate => where $l.fname="Julio" and $l.lname="Isa"
- * Expected Result : Success
- * Issue : Issue 162
- * Date : 27th March 2014
- */
-
-use dataverse test;
-
-for $l in dataset('employeeOpen')
-where $l.fname="Julio" and $l.lname="Isa"
-return {
- "id": $l.id,
- "fname": $l.fname,
- "lname": $l.lname,
- "age": $l.age,
- "dept": $l.dept
-}
diff --git a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.5.ddl.aql b/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.5.ddl.aql
deleted file mode 100644
index 4eaf8e8..0000000
--- a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.5.ddl.aql
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/*
- * Description : Test that BTree enforced open index is used in query plan
- * : define the BTree enforced open index on a composite key (fname,lanme)
- * : predicate => where $l.fname="Julio" and $l.lname="Isa"
- * Expected Result : Success
- * Issue : Issue 162
- * Date : 27th March 2014
- */
-
-use dataverse test;
-
-// create secondary index
-
-drop index employeeOpen.idx_employee_f_l_name;
diff --git a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.6.query.aql b/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.6.query.aql
deleted file mode 100644
index b1f2adf..0000000
--- a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.6.query.aql
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/*
- * Description : Test that BTree enforced open index is used in query plan
- * : define the BTree enforced open index on a composite key (fname,lanme)
- * : predicate => where $l.fname="Julio" and $l.lname="Isa"
- * Expected Result : Success
- * Issue : Issue 162
- * Date : 27th March 2014
- */
-
-use dataverse test;
-
-for $l in dataset('employeeOpen')
-where $l.fname="Julio" and $l.lname="Isa"
-return {
- "id": $l.id,
- "fname": $l.fname,
- "lname": $l.lname,
- "age": $l.age,
- "dept": $l.dept
-}
diff --git a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.1.ddl.aql b/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.1.ddl.aql
deleted file mode 100644
index 0efffb2..0000000
--- a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.1.ddl.aql
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-drop dataverse test if exists;
-create dataverse test;
-use dataverse test;
-
-create type DBLPType as closed {
- id: int64,
- dblpid: string,
- title: string,
- authors: string,
- misc: string
-}
-
-create type DBLPOpenType as open {
- id: int64,
- dblpid: string,
- authors: string,
- misc: string
-}
-
-create nodegroup group1 if not exists on nc1, nc2;
-
-create dataset DBLP(DBLPType)
- primary key id on group1;
-create dataset DBLPOpen(DBLPOpenType)
- primary key id on group1;
diff --git a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.2.update.aql b/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.2.update.aql
deleted file mode 100644
index 3ee5d4e..0000000
--- a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.2.update.aql
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-use dataverse test;
-
-load dataset DBLP
-using "org.apache.asterix.external.dataset.adapter.NCFileSystemAdapter"
-(("path"="nc1://data/dblp-small/dblp-small-id.txt"),("format"="delimited-text"),("delimiter"=":")) pre-sorted;
-
-insert into dataset test.DBLPOpen (
- for $x in dataset test.DBLP
- where $x.id <= 50
- return $x
-);
-
-insert into dataset test.DBLPOpen (
- for $c in dataset test.DBLP
- where $c.id > 50
- return {
- "id": $c.id,
- "dblpid": $c.dblpid,
- "authors": $c.authors,
- "misc": $c.misc
- }
-);
diff --git a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.3.ddl.aql b/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.3.ddl.aql
deleted file mode 100644
index 98338b2..0000000
--- a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.3.ddl.aql
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-use dataverse test;
-
-create index ngram_index on DBLPOpen(title:string) type ngram(3) enforced;
-create index keyword_index on DBLPOpen(title:string) type keyword enforced;
-create index btree_index on DBLPOpen(title:string) enforced;
\ No newline at end of file
diff --git a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.4.query.aql b/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.4.query.aql
deleted file mode 100644
index 775e97d..0000000
--- a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.4.query.aql
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-use dataverse test;
-
-for $o in dataset('DBLPOpen')
-where contains($o.title, "Multimedia")
-order by $o.id
-return {
- "id": $o.id,
- "dblpid": $o.dblpid,
- "title": $o.title,
- "authors": $o.authors,
- "misc": $o.misc
-}
-
diff --git a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.5.query.aql b/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.5.query.aql
deleted file mode 100644
index b7423e2..0000000
--- a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.5.query.aql
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-use dataverse test;
-
-for $o in dataset('DBLPOpen')
-let $jacc := similarity-jaccard-check(word-tokens($o.title), word-tokens("Transactions for Cooperative Environments"), 0.5f)
-where $jacc[0]
-return {
- "id": $o.id,
- "dblpid": $o.dblpid,
- "title": $o.title,
- "authors": $o.authors,
- "misc": $o.misc
-}
diff --git a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.6.query.aql b/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.6.query.aql
deleted file mode 100644
index 0e1ef29..0000000
--- a/asterix-app/src/test/resources/runtimets/queries/open-index-enforced/index-selection/multi-index/multi-index.6.query.aql
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-use dataverse test;
-
-for $o in dataset('DBLPOpen')
-where $o.title = "Multimedia Information Systems Issues and Approaches."
-order by $o.title
-return {
- "id": $o.id,
- "dblpid": $o.dblpid,
- "title": $o.title,
- "authors": $o.authors,
- "misc": $o.misc
-}
diff --git a/asterix-app/src/test/resources/runtimets/results/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.1.adm b/asterix-app/src/test/resources/runtimets/results/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.1.adm
deleted file mode 100644
index 1e19d0d..0000000
--- a/asterix-app/src/test/resources/runtimets/results/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.1.adm
+++ /dev/null
@@ -1,2 +0,0 @@
-[ { "id": 881, "fname": "Julio", "age": 38, "dept": "Sales", "lname": "Isa" }
- ]
diff --git a/asterix-app/src/test/resources/runtimets/results/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.2.adm b/asterix-app/src/test/resources/runtimets/results/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.2.adm
deleted file mode 100644
index 1e19d0d..0000000
--- a/asterix-app/src/test/resources/runtimets/results/open-index-enforced/index-selection/multi-index-composite-key/multi-index-composite-key.2.adm
+++ /dev/null
@@ -1,2 +0,0 @@
-[ { "id": 881, "fname": "Julio", "age": 38, "dept": "Sales", "lname": "Isa" }
- ]
diff --git a/asterix-app/src/test/resources/runtimets/results/open-index-enforced/index-selection/multi-index/multi-index.1.adm b/asterix-app/src/test/resources/runtimets/results/open-index-enforced/index-selection/multi-index/multi-index.1.adm
deleted file mode 100644
index 61ee356..0000000
--- a/asterix-app/src/test/resources/runtimets/results/open-index-enforced/index-selection/multi-index/multi-index.1.adm
+++ /dev/null
@@ -1,2 +0,0 @@
-[ { "id": 4, "dblpid": "books/acm/kim95/ChristodoulakisK95", "authors": "Stavros Christodoulakis Leonidas Koveos", "misc": "2002-01-03 318-337 1995 Modern Database Systems db/books/collections/kim95.html#ChristodoulakisK95", "title": "Multimedia Information Systems Issues and Approaches." }
- ]
\ No newline at end of file
diff --git a/asterix-app/src/test/resources/runtimets/results/open-index-enforced/index-selection/multi-index/multi-index.2.adm b/asterix-app/src/test/resources/runtimets/results/open-index-enforced/index-selection/multi-index/multi-index.2.adm
deleted file mode 100644
index d7647ff..0000000
--- a/asterix-app/src/test/resources/runtimets/results/open-index-enforced/index-selection/multi-index/multi-index.2.adm
+++ /dev/null
@@ -1,2 +0,0 @@
-[ { "id": 9, "dblpid": "books/acm/kim95/Kaiser95", "authors": "Gail E. Kaiser", "misc": "2002-01-03 409-433 1995 Modern Database Systems db/books/collections/kim95.html#Kaiser95", "title": "Cooperative Transactions for Multiuser Environments." }
- ]
\ No newline at end of file
diff --git a/asterix-app/src/test/resources/runtimets/results/open-index-enforced/index-selection/multi-index/multi-index.3.adm b/asterix-app/src/test/resources/runtimets/results/open-index-enforced/index-selection/multi-index/multi-index.3.adm
deleted file mode 100644
index 61ee356..0000000
--- a/asterix-app/src/test/resources/runtimets/results/open-index-enforced/index-selection/multi-index/multi-index.3.adm
+++ /dev/null
@@ -1,2 +0,0 @@
-[ { "id": 4, "dblpid": "books/acm/kim95/ChristodoulakisK95", "authors": "Stavros Christodoulakis Leonidas Koveos", "misc": "2002-01-03 318-337 1995 Modern Database Systems db/books/collections/kim95.html#ChristodoulakisK95", "title": "Multimedia Information Systems Issues and Approaches." }
- ]
\ No newline at end of file
diff --git a/asterix-app/src/test/resources/runtimets/testsuite.xml b/asterix-app/src/test/resources/runtimets/testsuite.xml
index ce62d38..d706feb 100644
--- a/asterix-app/src/test/resources/runtimets/testsuite.xml
+++ b/asterix-app/src/test/resources/runtimets/testsuite.xml
@@ -2928,22 +2928,15 @@
<test-group name="open-index-enforced">
<test-group FilePath="open-index-enforced/error-checking">
<test-case FilePath="open-index-enforced/error-checking">
- <compilation-unit name="index-on-closed-type">
- <output-dir compare="Text">index-on-closed-type</output-dir>
+ <compilation-unit name="enforced-field-name-collision">
+ <output-dir compare="Text">enforced-field-name-collision</output-dir>
<expected-error>org.apache.hyracks.algebricks.common.exceptions.AlgebricksException
</expected-error>
</compilation-unit>
</test-case>
<test-case FilePath="open-index-enforced/error-checking">
- <compilation-unit name="index-type-collision">
- <output-dir compare="Text">index-type-collision</output-dir>
- <expected-error>org.apache.asterix.common.exceptions.AsterixException
- </expected-error>
- </compilation-unit>
- </test-case>
- <test-case FilePath="open-index-enforced/error-checking">
- <compilation-unit name="index-type-promotion-collision">
- <output-dir compare="Text">index-type-promotion-collision</output-dir>
+ <compilation-unit name="enforced-field-type-collision">
+ <output-dir compare="Text">enforced-field-type-collision</output-dir>
<expected-error>org.apache.asterix.common.exceptions.AsterixException</expected-error>
</compilation-unit>
</test-case>
@@ -2955,9 +2948,10 @@
</compilation-unit>
</test-case>
<test-case FilePath="open-index-enforced/error-checking">
- <compilation-unit name="record-type-collision">
- <output-dir compare="Text">record-type-collision</output-dir>
- <expected-error>org.apache.asterix.common.exceptions.AsterixException</expected-error>
+ <compilation-unit name="index-on-closed-type">
+ <output-dir compare="Text">index-on-closed-type</output-dir>
+ <expected-error>org.apache.hyracks.algebricks.common.exceptions.AlgebricksException
+ </expected-error>
</compilation-unit>
</test-case>
</test-group>
@@ -3092,16 +3086,6 @@
</compilation-unit>
</test-case>
<test-case FilePath="open-index-enforced/index-selection">
- <compilation-unit name="multi-index">
- <output-dir compare="Text">multi-index</output-dir>
- </compilation-unit>
- </test-case>
- <test-case FilePath="open-index-enforced/index-selection">
- <compilation-unit name="multi-index-composite-key">
- <output-dir compare="Text">multi-index-composite-key</output-dir>
- </compilation-unit>
- </test-case>
- <test-case FilePath="open-index-enforced/index-selection">
<compilation-unit name="orders-index-custkey">
<output-dir compare="Text">orders-index-custkey</output-dir>
</compilation-unit>