ASTERIXDB-1581: fix subquery decorrelation.
- fix concurrent modification exception;
- fix variable substitution ordering;
- fix required partitioning property for DataSourceScan;
- fix partitioning property enforcer for nested operators;
- fix recursive subplan decorrelation;
- fix CardinalityInferenceVisitor;
- add a rule to switch inner join branches;
- fix SimpleUnnestToProductRule;
- add test cases which are variants of ASTERIXDB-1581.
Change-Id: Ia2fa4b5b836eafee1975bd1164ae7c22199a4af0
Reviewed-on: https://asterix-gerrit.ics.uci.edu/1125
Tested-by: Jenkins <jenkins@fulliautomatix.ics.uci.edu>
Integration-Tests: Jenkins <jenkins@fulliautomatix.ics.uci.edu>
Reviewed-by: Till Westmann <tillw@apache.org>
diff --git a/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/base/RuleCollections.java b/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/base/RuleCollections.java
index cf36c05..582cac9 100644
--- a/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/base/RuleCollections.java
+++ b/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/base/RuleCollections.java
@@ -122,6 +122,7 @@
import org.apache.hyracks.algebricks.rewriter.rules.SetAlgebricksPhysicalOperatorsRule;
import org.apache.hyracks.algebricks.rewriter.rules.SetExecutionModeRule;
import org.apache.hyracks.algebricks.rewriter.rules.SimpleUnnestToProductRule;
+import org.apache.hyracks.algebricks.rewriter.rules.SwitchInnerJoinBranchRule;
import org.apache.hyracks.algebricks.rewriter.rules.subplan.EliminateSubplanRule;
import org.apache.hyracks.algebricks.rewriter.rules.subplan.EliminateSubplanWithInputCardinalityOneRule;
import org.apache.hyracks.algebricks.rewriter.rules.subplan.NestedSubplanToJoinRule;
@@ -282,6 +283,7 @@
public static final List<IAlgebraicRewriteRule> buildPlanCleanupRuleCollection() {
List<IAlgebraicRewriteRule> planCleanupRules = new LinkedList<>();
+ planCleanupRules.add(new SwitchInnerJoinBranchRule());
planCleanupRules.add(new PushAssignBelowUnionAllRule());
planCleanupRules.add(new ExtractCommonExpressionsRule());
planCleanupRules.add(new RemoveRedundantVariablesRule());
diff --git a/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/am/InvertedIndexAccessMethod.java b/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/am/InvertedIndexAccessMethod.java
index 1052a72..0bc5e78 100644
--- a/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/am/InvertedIndexAccessMethod.java
+++ b/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/am/InvertedIndexAccessMethod.java
@@ -21,6 +21,7 @@
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@@ -64,7 +65,6 @@
import org.apache.hyracks.algebricks.core.algebra.functions.FunctionIdentifier;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.AbstractBinaryJoinOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.AbstractLogicalOperator;
-import org.apache.hyracks.algebricks.core.algebra.operators.logical.AbstractLogicalOperator.ExecutionMode;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.AbstractUnnestMapOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.AssignOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.DataSourceScanOperator;
@@ -73,6 +73,7 @@
import org.apache.hyracks.algebricks.core.algebra.operators.logical.SelectOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.UnionAllOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.UnnestOperator;
+import org.apache.hyracks.algebricks.core.algebra.operators.logical.AbstractLogicalOperator.ExecutionMode;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.visitors.LogicalOperatorDeepCopyWithNewVariablesVisitor;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.visitors.VariableUtilities;
import org.apache.hyracks.algebricks.core.algebra.util.OperatorManipulationUtil;
@@ -589,9 +590,9 @@
// will have all primary-key and secondary-search keys replaced, but retains all other original variables.
// Variable replacement map for the first copy.
- Map<LogicalVariable, LogicalVariable> newProbeSubTreeVarMap = new HashMap<LogicalVariable, LogicalVariable>();
+ LinkedHashMap<LogicalVariable, LogicalVariable> newProbeSubTreeVarMap = new LinkedHashMap<>();
// Variable replacement map for the second copy.
- Map<LogicalVariable, LogicalVariable> joinInputSubTreeVarMap = new HashMap<LogicalVariable, LogicalVariable>();
+ LinkedHashMap<LogicalVariable, LogicalVariable> joinInputSubTreeVarMap = new LinkedHashMap<>();
// Init with all live vars.
List<LogicalVariable> liveVars = new ArrayList<LogicalVariable>();
VariableUtilities.getLiveVariables(probeSubTree.getRoot(), liveVars);
diff --git a/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/subplan/InlineAllNtsInSubplanVisitor.java b/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/subplan/InlineAllNtsInSubplanVisitor.java
index b061066..8e5fa79 100644
--- a/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/subplan/InlineAllNtsInSubplanVisitor.java
+++ b/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/subplan/InlineAllNtsInSubplanVisitor.java
@@ -23,10 +23,11 @@
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
-import java.util.Map.Entry;
import java.util.Set;
+import java.util.Map.Entry;
import org.apache.asterix.lang.common.util.FunctionUtil;
import org.apache.asterix.om.base.AString;
@@ -66,7 +67,6 @@
import org.apache.hyracks.algebricks.core.algebra.operators.logical.MaterializeOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.NestedTupleSourceOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.OrderOperator;
-import org.apache.hyracks.algebricks.core.algebra.operators.logical.OrderOperator.IOrder;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.PartitioningSplitOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.ProjectOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.ReplicateOperator;
@@ -78,6 +78,7 @@
import org.apache.hyracks.algebricks.core.algebra.operators.logical.UnionAllOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.UnnestMapOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.UnnestOperator;
+import org.apache.hyracks.algebricks.core.algebra.operators.logical.OrderOperator.IOrder;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.visitors.LogicalOperatorDeepCopyWithNewVariablesVisitor;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.visitors.VariableUtilities;
import org.apache.hyracks.algebricks.core.algebra.plan.ALogicalPlanImpl;
@@ -135,7 +136,8 @@
// Maps live variables at <code>subplanInputOperator</code> to variables in
// the flattened nested plan.
- private final Map<LogicalVariable, LogicalVariable> subplanInputVarToCurrentVarMap = new HashMap<>();
+ private final LinkedHashMap<LogicalVariable, LogicalVariable> subplanInputVarToCurrentVarMap = new
+ LinkedHashMap<>();
// Maps variables in the flattened nested plan to live variables at
// <code>subplannputOperator</code>.
@@ -154,7 +156,7 @@
/**
* @param context
* the optimization context
- * @param subplanInputOperator
+ * @param subplanOperator
* the input operator to the target subplan operator, which is to
* be inlined.
* @throws AlgebricksException
@@ -625,8 +627,6 @@
*
* @param op
* the logical operator for aggregate or running aggregate.
- * @param keyVarsToEnforce
- * the set of variables that needs to preserve.
* @return the wrapped group-by operator if {@code keyVarsToEnforce} is not
* empty, and {@code op} otherwise.
* @throws AlgebricksException
@@ -637,7 +637,9 @@
return op;
}
GroupByOperator gbyOp = new GroupByOperator();
- for (LogicalVariable keyVar : correlatedKeyVars) {
+ // Creates a copy of correlatedKeyVars, to fix the ConcurrentModificationExcetpion in ASTERIXDB-1581.
+ List<LogicalVariable> copyOfCorrelatedKeyVars = new ArrayList<>(correlatedKeyVars);
+ for (LogicalVariable keyVar : copyOfCorrelatedKeyVars) {
// This limits the visitor can only be applied to a nested logical
// plan inside a Subplan operator,
// where the keyVarsToEnforce forms a candidate key which can
diff --git a/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/subplan/InlineSubplanInputForNestedTupleSourceRule.java b/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/subplan/InlineSubplanInputForNestedTupleSourceRule.java
index 1881461..2e368c0 100644
--- a/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/subplan/InlineSubplanInputForNestedTupleSourceRule.java
+++ b/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/subplan/InlineSubplanInputForNestedTupleSourceRule.java
@@ -19,8 +19,8 @@
package org.apache.asterix.optimizer.rules.subplan;
import java.util.ArrayList;
-import java.util.HashMap;
import java.util.HashSet;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -48,9 +48,9 @@
import org.apache.hyracks.algebricks.core.algebra.operators.logical.LeftOuterJoinOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.NestedTupleSourceOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.OrderOperator;
-import org.apache.hyracks.algebricks.core.algebra.operators.logical.OrderOperator.IOrder;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.SelectOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.SubplanOperator;
+import org.apache.hyracks.algebricks.core.algebra.operators.logical.OrderOperator.IOrder;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.visitors.VariableUtilities;
import org.apache.hyracks.algebricks.core.algebra.plan.ALogicalPlanImpl;
import org.apache.hyracks.algebricks.core.algebra.util.OperatorManipulationUtil;
@@ -267,22 +267,26 @@
if (context.checkIfInDontApplySet(this, opRef.getValue())) {
return false;
}
- Pair<Boolean, Map<LogicalVariable, LogicalVariable>> result = rewriteSubplanOperator(opRef, context);
+ Pair<Boolean, LinkedHashMap<LogicalVariable, LogicalVariable>> result = rewriteSubplanOperator(opRef, context);
hasRun = true;
return result.first;
}
- private Pair<Boolean, Map<LogicalVariable, LogicalVariable>> rewriteSubplanOperator(Mutable<ILogicalOperator> opRef,
+ private Pair<Boolean, LinkedHashMap<LogicalVariable, LogicalVariable>> rewriteSubplanOperator(
+ Mutable<ILogicalOperator> opRef,
IOptimizationContext context) throws AlgebricksException {
AbstractLogicalOperator op = (AbstractLogicalOperator) opRef.getValue();
+ // Recursively traverses input operators as if the current operator before rewriting the current operator.
+ Pair<Boolean, LinkedHashMap<LogicalVariable, LogicalVariable>> changedAndVarMap = traverseNonSubplanOperator(op,
+ context);
if (op.getOperatorTag() != LogicalOperatorTag.SUBPLAN) {
- // Traverses non-subplan operators.
- return traverseNonSubplanOperator(op, context);
+ return changedAndVarMap;
}
+
/**
* Apply the special join-based rewriting.
*/
- Pair<Boolean, Map<LogicalVariable, LogicalVariable>> result = applySpecialFlattening(opRef, context);
+ Pair<Boolean, LinkedHashMap<LogicalVariable, LogicalVariable>> result = applySpecialFlattening(opRef, context);
if (!result.first) {
/**
* If the special join-based rewriting does not apply, apply the general
@@ -290,7 +294,12 @@
*/
result = applyGeneralFlattening(opRef, context);
}
- return result;
+ LinkedHashMap<LogicalVariable, LogicalVariable> returnedMap = new LinkedHashMap<>();
+ // Adds variable mappings from input operators.
+ returnedMap.putAll(changedAndVarMap.second);
+ // Adds variable mappings resulting from the rewriting of the current operator.
+ returnedMap.putAll(result.second);
+ return new Pair<>(result.first || changedAndVarMap.first, returnedMap);
}
/***
@@ -302,15 +311,17 @@
* @return
* @throws AlgebricksException
*/
- private Pair<Boolean, Map<LogicalVariable, LogicalVariable>> traverseNonSubplanOperator(ILogicalOperator op,
+ private Pair<Boolean, LinkedHashMap<LogicalVariable, LogicalVariable>> traverseNonSubplanOperator(
+ ILogicalOperator op,
IOptimizationContext context) throws AlgebricksException {
Set<LogicalVariable> liveVars = new HashSet<>();
VariableUtilities.getLiveVariables(op, liveVars);
- Map<LogicalVariable, LogicalVariable> replacedVarMap = new HashMap<LogicalVariable, LogicalVariable>();
- Map<LogicalVariable, LogicalVariable> replacedVarMapForAncestor = new HashMap<LogicalVariable, LogicalVariable>();
+ LinkedHashMap<LogicalVariable, LogicalVariable> replacedVarMap = new LinkedHashMap<>();
+ LinkedHashMap<LogicalVariable, LogicalVariable> replacedVarMapForAncestor = new LinkedHashMap<>();
boolean changed = false;
for (Mutable<ILogicalOperator> childrenRef : op.getInputs()) {
- Pair<Boolean, Map<LogicalVariable, LogicalVariable>> resultFromChild = rewriteSubplanOperator(childrenRef,
+ Pair<Boolean, LinkedHashMap<LogicalVariable, LogicalVariable>> resultFromChild = rewriteSubplanOperator(
+ childrenRef,
context);
changed = changed || resultFromChild.first;
for (Map.Entry<LogicalVariable, LogicalVariable> entry : resultFromChild.second.entrySet()) {
@@ -323,18 +334,18 @@
}
VariableUtilities.substituteVariables(op, replacedVarMap, context);
context.computeAndSetTypeEnvironmentForOperator(op);
- return new Pair<Boolean, Map<LogicalVariable, LogicalVariable>>(changed, replacedVarMapForAncestor);
+ return new Pair<>(changed, replacedVarMapForAncestor);
}
- private Pair<Boolean, Map<LogicalVariable, LogicalVariable>> applyGeneralFlattening(Mutable<ILogicalOperator> opRef,
+ private Pair<Boolean, LinkedHashMap<LogicalVariable, LogicalVariable>> applyGeneralFlattening(
+ Mutable<ILogicalOperator> opRef,
IOptimizationContext context) throws AlgebricksException {
SubplanOperator subplanOp = (SubplanOperator) opRef.getValue();
if (!SubplanFlatteningUtil.containsOperators(subplanOp,
ImmutableSet.of(LogicalOperatorTag.DATASOURCESCAN, LogicalOperatorTag.INNERJOIN,
// We don't have nested runtime for union-all and distinct hence we have to include them here.
LogicalOperatorTag.LEFTOUTERJOIN, LogicalOperatorTag.UNIONALL, LogicalOperatorTag.DISTINCT))) {
- // Traverses the operator as if it is not a subplan.
- return traverseNonSubplanOperator(subplanOp, context);
+ return new Pair<>(false, new LinkedHashMap<>());
}
Mutable<ILogicalOperator> inputOpRef = subplanOp.getInputs().get(0);
ILogicalOperator inputOpBackup = inputOpRef.getValue();
@@ -344,7 +355,7 @@
ILogicalOperator inputOp = primaryOpAndVars.first;
Set<LogicalVariable> primaryKeyVars = primaryOpAndVars.second;
inputOpRef.setValue(inputOp);
- Set<LogicalVariable> inputLiveVars = new HashSet<LogicalVariable>();
+ Set<LogicalVariable> inputLiveVars = new HashSet<>();
VariableUtilities.getLiveVariables(inputOp, inputLiveVars);
Pair<Map<LogicalVariable, LogicalVariable>, List<Pair<IOrder, Mutable<ILogicalExpression>>>> varMapAndOrderExprs = SubplanFlatteningUtil
@@ -352,32 +363,31 @@
Map<LogicalVariable, LogicalVariable> varMap = varMapAndOrderExprs.first;
if (varMap == null) {
inputOpRef.setValue(inputOpBackup);
- // Traverses the operator as if it is not a subplan.
- return traverseNonSubplanOperator(subplanOp, context);
+ return new Pair<>(false, new LinkedHashMap<>());
}
- Mutable<ILogicalOperator> rightInputOpRef = subplanOp.getNestedPlans().get(0).getRoots().get(0).getValue()
- .getInputs().get(0);
+ Mutable<ILogicalOperator> lowestAggregateRefInSubplan = SubplanFlatteningUtil
+ .findLowestAggregate(subplanOp.getNestedPlans().get(0).getRoots().get(0));
+ Mutable<ILogicalOperator> rightInputOpRef = lowestAggregateRefInSubplan.getValue().getInputs().get(0);
ILogicalOperator rightInputOp = rightInputOpRef.getValue();
// Creates a variable to indicate whether a left input tuple is killed in the plan rooted at rightInputOp.
LogicalVariable assignVar = context.newVar();
- ILogicalOperator assignOp = new AssignOperator(assignVar,
- new MutableObject<ILogicalExpression>(ConstantExpression.TRUE));
+ ILogicalOperator assignOp = new AssignOperator(assignVar, new MutableObject<>(ConstantExpression.TRUE));
assignOp.getInputs().add(rightInputOpRef);
context.computeAndSetTypeEnvironmentForOperator(assignOp);
- rightInputOpRef = new MutableObject<ILogicalOperator>(assignOp);
+ rightInputOpRef = new MutableObject<>(assignOp);
// Constructs the join predicate for the leftOuter join.
- List<Mutable<ILogicalExpression>> joinPredicates = new ArrayList<Mutable<ILogicalExpression>>();
+ List<Mutable<ILogicalExpression>> joinPredicates = new ArrayList<>();
for (LogicalVariable liveVar : primaryKeyVars) {
- List<Mutable<ILogicalExpression>> arguments = new ArrayList<Mutable<ILogicalExpression>>();
- arguments.add(new MutableObject<ILogicalExpression>(new VariableReferenceExpression(liveVar)));
+ List<Mutable<ILogicalExpression>> arguments = new ArrayList<>();
+ arguments.add(new MutableObject<>(new VariableReferenceExpression(liveVar)));
LogicalVariable rightVar = varMap.get(liveVar);
- arguments.add(new MutableObject<ILogicalExpression>(new VariableReferenceExpression(rightVar)));
+ arguments.add(new MutableObject<>(new VariableReferenceExpression(rightVar)));
ILogicalExpression expr = new ScalarFunctionCallExpression(
FunctionUtil.getFunctionInfo(AlgebricksBuiltinFunctions.EQ), arguments);
- joinPredicates.add(new MutableObject<ILogicalExpression>(expr));
+ joinPredicates.add(new MutableObject<>(expr));
}
ILogicalExpression joinExpr = joinPredicates.size() > 1
@@ -385,21 +395,20 @@
joinPredicates)
: joinPredicates.size() > 0 ? joinPredicates.get(0).getValue() : ConstantExpression.TRUE;
LeftOuterJoinOperator leftOuterJoinOp = new LeftOuterJoinOperator(
- new MutableObject<ILogicalExpression>(joinExpr), inputOpRef, rightInputOpRef);
+ new MutableObject<>(joinExpr), inputOpRef, rightInputOpRef);
OperatorManipulationUtil.computeTypeEnvironmentBottomUp(rightInputOp, context);
context.computeAndSetTypeEnvironmentForOperator(leftOuterJoinOp);
// Creates group-by operator.
List<Pair<LogicalVariable, Mutable<ILogicalExpression>>> groupByList = new ArrayList<Pair<LogicalVariable, Mutable<ILogicalExpression>>>();
List<Pair<LogicalVariable, Mutable<ILogicalExpression>>> groupByDecorList = new ArrayList<Pair<LogicalVariable, Mutable<ILogicalExpression>>>();
- List<ILogicalPlan> nestedPlans = new ArrayList<ILogicalPlan>();
+ List<ILogicalPlan> nestedPlans = new ArrayList<>();
GroupByOperator groupbyOp = new GroupByOperator(groupByList, groupByDecorList, nestedPlans);
- Map<LogicalVariable, LogicalVariable> replacedVarMap = new HashMap<>();
+ LinkedHashMap<LogicalVariable, LogicalVariable> replacedVarMap = new LinkedHashMap<>();
for (LogicalVariable liveVar : primaryKeyVars) {
LogicalVariable newVar = context.newVar();
- groupByList.add(new Pair<LogicalVariable, Mutable<ILogicalExpression>>(newVar,
- new MutableObject<ILogicalExpression>(new VariableReferenceExpression(liveVar))));
+ groupByList.add(new Pair<>(newVar, new MutableObject<>(new VariableReferenceExpression(liveVar))));
// Adds variables for replacements in ancestors.
replacedVarMap.put(liveVar, newVar);
}
@@ -407,44 +416,41 @@
if (primaryKeyVars.contains(liveVar)) {
continue;
}
- groupByDecorList.add(new Pair<LogicalVariable, Mutable<ILogicalExpression>>(null,
- new MutableObject<ILogicalExpression>(new VariableReferenceExpression(liveVar))));
+ groupByDecorList.add(new Pair<>(null, new MutableObject<>(new VariableReferenceExpression(liveVar))));
}
// Sets up the nested plan for the groupby operator.
Mutable<ILogicalOperator> aggOpRef = subplanOp.getNestedPlans().get(0).getRoots().get(0);
- aggOpRef.getValue().getInputs().clear();
-
- Mutable<ILogicalOperator> currentOpRef = aggOpRef;
+ lowestAggregateRefInSubplan.getValue().getInputs().clear(); // Clears the input of the lowest aggregate.
+ Mutable<ILogicalOperator> currentOpRef = lowestAggregateRefInSubplan;
// Adds an optional order operator.
List<Pair<IOrder, Mutable<ILogicalExpression>>> orderExprs = varMapAndOrderExprs.second;
if (!orderExprs.isEmpty()) {
OrderOperator orderOp = new OrderOperator(orderExprs);
- currentOpRef = new MutableObject<ILogicalOperator>(orderOp);
- aggOpRef.getValue().getInputs().add(currentOpRef);
+ currentOpRef = new MutableObject<>(orderOp);
+ lowestAggregateRefInSubplan.getValue().getInputs().add(currentOpRef);
}
// Adds a select operator into the nested plan for group-by to remove tuples with NULL on {@code assignVar}, i.e.,
// subplan input tuples that are filtered out within a subplan.
- Mutable<ILogicalExpression> filterVarExpr = new MutableObject<ILogicalExpression>(
+ Mutable<ILogicalExpression> filterVarExpr = new MutableObject<>(
new VariableReferenceExpression(assignVar));
- List<Mutable<ILogicalExpression>> args = new ArrayList<Mutable<ILogicalExpression>>();
+ List<Mutable<ILogicalExpression>> args = new ArrayList<>();
args.add(filterVarExpr);
- List<Mutable<ILogicalExpression>> argsForNotFunction = new ArrayList<Mutable<ILogicalExpression>>();
- argsForNotFunction.add(new MutableObject<ILogicalExpression>(new ScalarFunctionCallExpression(
+ List<Mutable<ILogicalExpression>> argsForNotFunction = new ArrayList<>();
+ argsForNotFunction.add(new MutableObject<>(new ScalarFunctionCallExpression(
FunctionUtil.getFunctionInfo(AsterixBuiltinFunctions.IS_MISSING), args)));
SelectOperator selectOp = new SelectOperator(
- new MutableObject<ILogicalExpression>(new ScalarFunctionCallExpression(
+ new MutableObject<>(new ScalarFunctionCallExpression(
FunctionUtil.getFunctionInfo(AsterixBuiltinFunctions.NOT), argsForNotFunction)),
false, null);
- currentOpRef.getValue().getInputs().add(new MutableObject<ILogicalOperator>(selectOp));
+ currentOpRef.getValue().getInputs().add(new MutableObject<>(selectOp));
- selectOp.getInputs().add(new MutableObject<ILogicalOperator>(
- new NestedTupleSourceOperator(new MutableObject<ILogicalOperator>(groupbyOp))));
- List<Mutable<ILogicalOperator>> nestedRoots = new ArrayList<Mutable<ILogicalOperator>>();
+ selectOp.getInputs().add(new MutableObject<>(new NestedTupleSourceOperator(new MutableObject<>(groupbyOp))));
+ List<Mutable<ILogicalOperator>> nestedRoots = new ArrayList<>();
nestedRoots.add(aggOpRef);
nestedPlans.add(new ALogicalPlanImpl(nestedRoots));
- groupbyOp.getInputs().add(new MutableObject<ILogicalOperator>(leftOuterJoinOp));
+ groupbyOp.getInputs().add(new MutableObject<>(leftOuterJoinOp));
// Replaces subplan with the group-by operator.
opRef.setValue(groupbyOp);
@@ -452,23 +458,25 @@
// Recursively applys this rule to the nested plan of the subplan operator,
// for the case where there are nested subplan operators within {@code subplanOp}.
- Pair<Boolean, Map<LogicalVariable, LogicalVariable>> result = rewriteSubplanOperator(rightInputOpRef, context);
+ Pair<Boolean, LinkedHashMap<LogicalVariable, LogicalVariable>> result = rewriteSubplanOperator(rightInputOpRef,
+ context);
VariableUtilities.substituteVariables(leftOuterJoinOp, result.second, context);
VariableUtilities.substituteVariables(groupbyOp, result.second, context);
// No var mapping from the right input operator should be populated up.
- return new Pair<Boolean, Map<LogicalVariable, LogicalVariable>>(true, replacedVarMap);
+ return new Pair<>(true, replacedVarMap);
}
- private Pair<Boolean, Map<LogicalVariable, LogicalVariable>> applySpecialFlattening(Mutable<ILogicalOperator> opRef,
+ private Pair<Boolean, LinkedHashMap<LogicalVariable, LogicalVariable>> applySpecialFlattening(
+ Mutable<ILogicalOperator> opRef,
IOptimizationContext context) throws AlgebricksException {
SubplanOperator subplanOp = (SubplanOperator) opRef.getValue();
Mutable<ILogicalOperator> inputOpRef = subplanOp.getInputs().get(0);
- Map<LogicalVariable, LogicalVariable> replacedVarMap = new HashMap<>();
+ LinkedHashMap<LogicalVariable, LogicalVariable> replacedVarMap = new LinkedHashMap<>();
// Recursively applies this rule to the nested plan of the subplan operator,
// for the case where there are nested subplan operators within {@code subplanOp}.
- Pair<Boolean, Map<LogicalVariable, LogicalVariable>> result = rewriteSubplanOperator(
+ Pair<Boolean, LinkedHashMap<LogicalVariable, LogicalVariable>> result = rewriteSubplanOperator(
subplanOp.getNestedPlans().get(0).getRoots().get(0), context);
ILogicalOperator inputOpBackup = inputOpRef.getValue();
@@ -485,7 +493,7 @@
.inlineLeftNtsInSubplanJoin(subplanOp, context);
if (notNullVarsAndTopJoinRef.first == null) {
inputOpRef.setValue(inputOpBackup);
- return new Pair<Boolean, Map<LogicalVariable, LogicalVariable>>(false, replacedVarMap);
+ return new Pair<>(false, replacedVarMap);
}
Set<LogicalVariable> notNullVars = notNullVarsAndTopJoinRef.first;
@@ -496,12 +504,9 @@
List<Pair<LogicalVariable, Mutable<ILogicalExpression>>> groupByDecorList = new ArrayList<Pair<LogicalVariable, Mutable<ILogicalExpression>>>();
GroupByOperator groupbyOp = new GroupByOperator(groupByList, groupByDecorList, subplanOp.getNestedPlans());
- Map<LogicalVariable, LogicalVariable> gbyVarMap = new HashMap<LogicalVariable, LogicalVariable>();
for (LogicalVariable coverVar : primaryKeyVars) {
LogicalVariable newVar = context.newVar();
- gbyVarMap.put(coverVar, newVar);
- groupByList.add(new Pair<LogicalVariable, Mutable<ILogicalExpression>>(newVar,
- new MutableObject<ILogicalExpression>(new VariableReferenceExpression(coverVar))));
+ groupByList.add(new Pair<>(newVar, new MutableObject<>(new VariableReferenceExpression(coverVar))));
// Adds variables for replacements in ancestors.
replacedVarMap.put(coverVar, newVar);
}
@@ -509,44 +514,43 @@
if (primaryKeyVars.contains(liveVar)) {
continue;
}
- groupByDecorList.add(new Pair<LogicalVariable, Mutable<ILogicalExpression>>(null,
- new MutableObject<ILogicalExpression>(new VariableReferenceExpression(liveVar))));
+ groupByDecorList.add(new Pair<>(null, new MutableObject<>(new VariableReferenceExpression(liveVar))));
}
- groupbyOp.getInputs().add(new MutableObject<ILogicalOperator>(topJoinRef.getValue()));
+ groupbyOp.getInputs().add(new MutableObject<>(topJoinRef.getValue()));
- if (notNullVars.size() > 0) {
+ if (!notNullVars.isEmpty()) {
// Adds a select operator into the nested plan for group-by to remove tuples with NULL on {@code assignVar}, i.e.,
// subplan input tuples that are filtered out within a subplan.
List<Mutable<ILogicalExpression>> nullCheckExprRefs = new ArrayList<>();
for (LogicalVariable notNullVar : notNullVars) {
- Mutable<ILogicalExpression> filterVarExpr = new MutableObject<ILogicalExpression>(
+ Mutable<ILogicalExpression> filterVarExpr = new MutableObject<>(
new VariableReferenceExpression(notNullVar));
- List<Mutable<ILogicalExpression>> args = new ArrayList<Mutable<ILogicalExpression>>();
+ List<Mutable<ILogicalExpression>> args = new ArrayList<>();
args.add(filterVarExpr);
- List<Mutable<ILogicalExpression>> argsForNotFunction = new ArrayList<Mutable<ILogicalExpression>>();
- argsForNotFunction.add(new MutableObject<ILogicalExpression>(new ScalarFunctionCallExpression(
+ List<Mutable<ILogicalExpression>> argsForNotFunction = new ArrayList<>();
+ argsForNotFunction.add(new MutableObject<>(new ScalarFunctionCallExpression(
FunctionUtil.getFunctionInfo(AsterixBuiltinFunctions.IS_MISSING), args)));
- nullCheckExprRefs.add(new MutableObject<ILogicalExpression>(new ScalarFunctionCallExpression(
+ nullCheckExprRefs.add(new MutableObject<>(new ScalarFunctionCallExpression(
FunctionUtil.getFunctionInfo(AsterixBuiltinFunctions.NOT), argsForNotFunction)));
}
Mutable<ILogicalExpression> selectExprRef = nullCheckExprRefs.size() > 1
- ? new MutableObject<ILogicalExpression>(new ScalarFunctionCallExpression(
+ ? new MutableObject<>(new ScalarFunctionCallExpression(
FunctionUtil.getFunctionInfo(AsterixBuiltinFunctions.AND), nullCheckExprRefs))
: nullCheckExprRefs.get(0);
SelectOperator selectOp = new SelectOperator(selectExprRef, false, null);
topJoinRef.setValue(selectOp);
- selectOp.getInputs().add(new MutableObject<ILogicalOperator>(
- new NestedTupleSourceOperator(new MutableObject<ILogicalOperator>(groupbyOp))));
+ selectOp.getInputs()
+ .add(new MutableObject<>(new NestedTupleSourceOperator(new MutableObject<>(groupbyOp))));
} else {
// The original join operator in the Subplan is a left-outer join.
// Therefore, no null-check variable is injected and no SelectOperator needs to be added.
- topJoinRef.setValue(new NestedTupleSourceOperator(new MutableObject<ILogicalOperator>(groupbyOp)));
+ topJoinRef.setValue(new NestedTupleSourceOperator(new MutableObject<>(groupbyOp)));
}
opRef.setValue(groupbyOp);
OperatorManipulationUtil.computeTypeEnvironmentBottomUp(groupbyOp, context);
VariableUtilities.substituteVariables(groupbyOp, result.second, context);
replacedVarMap.putAll(result.second);
- return new Pair<Boolean, Map<LogicalVariable, LogicalVariable>>(true, replacedVarMap);
+ return new Pair<>(true, replacedVarMap);
}
}
diff --git a/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/subplan/SubplanFlatteningUtil.java b/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/subplan/SubplanFlatteningUtil.java
index 6d91923..377e96d 100644
--- a/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/subplan/SubplanFlatteningUtil.java
+++ b/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/optimizer/rules/subplan/SubplanFlatteningUtil.java
@@ -31,8 +31,8 @@
import org.apache.hyracks.algebricks.core.algebra.base.IOptimizationContext;
import org.apache.hyracks.algebricks.core.algebra.base.LogicalOperatorTag;
import org.apache.hyracks.algebricks.core.algebra.base.LogicalVariable;
-import org.apache.hyracks.algebricks.core.algebra.operators.logical.OrderOperator.IOrder;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.SubplanOperator;
+import org.apache.hyracks.algebricks.core.algebra.operators.logical.OrderOperator.IOrder;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.visitors.VariableUtilities;
import org.apache.hyracks.algebricks.core.algebra.util.OperatorManipulationUtil;
@@ -62,7 +62,7 @@
InlineAllNtsInSubplanVisitor visitor = new InlineAllNtsInSubplanVisitor(context, subplanOp);
// Rewrites the query plan.
- ILogicalOperator topOp = subplanOp.getNestedPlans().get(0).getRoots().get(0).getValue();
+ ILogicalOperator topOp = findLowestAggregate(subplanOp.getNestedPlans().get(0).getRoots().get(0)).getValue();
ILogicalOperator opToVisit = topOp.getInputs().get(0).getValue();
ILogicalOperator result = opToVisit.accept(visitor, null);
topOp.getInputs().get(0).setValue(result);
@@ -143,6 +143,54 @@
}
/**
+ * Whether the query plan rooted {@code currentOp} contains a data source scan operator,
+ * with considering nested subplans.
+ *
+ * @param currentOp
+ * the current operator
+ * @return true if {@code currentOp} contains a data source scan operator; false otherwise.
+ */
+ public static boolean containsOperatorsInternal(ILogicalOperator currentOp,
+ Set<LogicalOperatorTag> interestedOperatorTags) {
+ if (interestedOperatorTags.contains(currentOp.getOperatorTag())) {
+ return true;
+ }
+ if (currentOp.getOperatorTag() == LogicalOperatorTag.SUBPLAN
+ && containsOperators((SubplanOperator) currentOp, interestedOperatorTags)) {
+ return true;
+ }
+ for (Mutable<ILogicalOperator> childRef : currentOp.getInputs()) {
+ if (containsOperatorsInternal(childRef.getValue(), interestedOperatorTags)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Finds the lowest aggregate operator that should be put into a nested aggregation pipeline within a group-by
+ * operator.
+ * <p/>
+ * Note that neither binary input operators nor data scan can be put into a group by operator.
+ *
+ * @param currentOpRef,
+ * the current root operator reference to look at.
+ * @return the operator reference of the lowest qualified aggregate operator.
+ */
+ public static Mutable<ILogicalOperator> findLowestAggregate(Mutable<ILogicalOperator> currentOpRef) {
+ ILogicalOperator currentOp = currentOpRef.getValue();
+ // Neither binary input operators nor data scan can be put into a group by operator.
+ if (currentOp.getInputs().size() != 1 || currentOp.getOperatorTag() == LogicalOperatorTag.DATASOURCESCAN) {
+ return null;
+ }
+ Mutable<ILogicalOperator> childReturn = findLowestAggregate(currentOp.getInputs().get(0));
+ if (childReturn == null) {
+ return currentOp.getOperatorTag() == LogicalOperatorTag.AGGREGATE ? currentOpRef : null;
+ }
+ return childReturn;
+ }
+
+ /**
* Determine whether a subplan could be rewritten as a join-related special case.
* The conditions include:
* a. there is a join (let's call it J1.) in the nested plan,
@@ -174,30 +222,4 @@
return new Pair<Boolean, ILogicalOperator>(true, visitor.getQualifiedNts());
}
- /**
- * Whether the query plan rooted {@code currentOp} contains a data source scan operator,
- * with considering nested subplans.
- *
- * @param currentOp
- * the current operator
- * @return true if {@code currentOp} contains a data source scan operator; false otherwise.
- */
- private static boolean containsOperatorsInternal(ILogicalOperator currentOp,
- Set<LogicalOperatorTag> interestedOperatorTags) {
- if (interestedOperatorTags.contains(currentOp.getOperatorTag())) {
- return true;
- }
- if (currentOp.getOperatorTag() == LogicalOperatorTag.SUBPLAN) {
- if (containsOperators((SubplanOperator) currentOp, interestedOperatorTags)) {
- return true;
- }
- }
- for (Mutable<ILogicalOperator> childRef : currentOp.getInputs()) {
- if (containsOperatorsInternal(childRef.getValue(), interestedOperatorTags)) {
- return true;
- }
- }
- return false;
- }
-
}
diff --git a/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/translator/LangExpressionToPlanTranslator.java b/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/translator/LangExpressionToPlanTranslator.java
index 56a5a57..b73ef82 100644
--- a/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/translator/LangExpressionToPlanTranslator.java
+++ b/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/translator/LangExpressionToPlanTranslator.java
@@ -22,13 +22,13 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
-import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
-import java.util.Map.Entry;
import java.util.Set;
+import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.asterix.algebra.base.ILangExpressionToPlanTranslator;
@@ -39,15 +39,15 @@
import org.apache.asterix.common.functions.FunctionSignature;
import org.apache.asterix.lang.aql.util.RangeMapBuilder;
import org.apache.asterix.lang.common.base.Expression;
-import org.apache.asterix.lang.common.base.Expression.Kind;
import org.apache.asterix.lang.common.base.ILangExpression;
import org.apache.asterix.lang.common.base.Statement;
+import org.apache.asterix.lang.common.base.Expression.Kind;
import org.apache.asterix.lang.common.clause.GroupbyClause;
import org.apache.asterix.lang.common.clause.LetClause;
import org.apache.asterix.lang.common.clause.LimitClause;
import org.apache.asterix.lang.common.clause.OrderbyClause;
-import org.apache.asterix.lang.common.clause.OrderbyClause.OrderModifier;
import org.apache.asterix.lang.common.clause.WhereClause;
+import org.apache.asterix.lang.common.clause.OrderbyClause.OrderModifier;
import org.apache.asterix.lang.common.expression.CallExpr;
import org.apache.asterix.lang.common.expression.FieldAccessor;
import org.apache.asterix.lang.common.expression.FieldBinding;
@@ -55,14 +55,14 @@
import org.apache.asterix.lang.common.expression.IfExpr;
import org.apache.asterix.lang.common.expression.IndexAccessor;
import org.apache.asterix.lang.common.expression.ListConstructor;
-import org.apache.asterix.lang.common.expression.ListConstructor.Type;
import org.apache.asterix.lang.common.expression.LiteralExpr;
import org.apache.asterix.lang.common.expression.OperatorExpr;
import org.apache.asterix.lang.common.expression.QuantifiedExpression;
-import org.apache.asterix.lang.common.expression.QuantifiedExpression.Quantifier;
import org.apache.asterix.lang.common.expression.RecordConstructor;
import org.apache.asterix.lang.common.expression.UnaryExpr;
import org.apache.asterix.lang.common.expression.VariableExpr;
+import org.apache.asterix.lang.common.expression.ListConstructor.Type;
+import org.apache.asterix.lang.common.expression.QuantifiedExpression.Quantifier;
import org.apache.asterix.lang.common.literal.StringLiteral;
import org.apache.asterix.lang.common.statement.FunctionDecl;
import org.apache.asterix.lang.common.statement.Query;
@@ -73,13 +73,13 @@
import org.apache.asterix.lang.common.visitor.base.AbstractQueryExpressionVisitor;
import org.apache.asterix.metadata.MetadataException;
import org.apache.asterix.metadata.MetadataManager;
-import org.apache.asterix.metadata.declared.AqlDataSource.AqlDataSourceType;
import org.apache.asterix.metadata.declared.AqlMetadataProvider;
import org.apache.asterix.metadata.declared.AqlSourceId;
import org.apache.asterix.metadata.declared.DatasetDataSource;
import org.apache.asterix.metadata.declared.LoadableDataSource;
import org.apache.asterix.metadata.declared.ResultSetDataSink;
import org.apache.asterix.metadata.declared.ResultSetSinkId;
+import org.apache.asterix.metadata.declared.AqlDataSource.AqlDataSourceType;
import org.apache.asterix.metadata.entities.Dataset;
import org.apache.asterix.metadata.entities.Function;
import org.apache.asterix.metadata.entities.InternalDatasetDetails;
@@ -115,15 +115,15 @@
import org.apache.hyracks.algebricks.core.algebra.base.LogicalVariable;
import org.apache.hyracks.algebricks.core.algebra.base.OperatorAnnotations;
import org.apache.hyracks.algebricks.core.algebra.expressions.AbstractFunctionCallExpression;
-import org.apache.hyracks.algebricks.core.algebra.expressions.AbstractFunctionCallExpression.FunctionKind;
import org.apache.hyracks.algebricks.core.algebra.expressions.AggregateFunctionCallExpression;
import org.apache.hyracks.algebricks.core.algebra.expressions.BroadcastExpressionAnnotation;
-import org.apache.hyracks.algebricks.core.algebra.expressions.BroadcastExpressionAnnotation.BroadcastSide;
import org.apache.hyracks.algebricks.core.algebra.expressions.ConstantExpression;
import org.apache.hyracks.algebricks.core.algebra.expressions.IExpressionAnnotation;
import org.apache.hyracks.algebricks.core.algebra.expressions.ScalarFunctionCallExpression;
import org.apache.hyracks.algebricks.core.algebra.expressions.UnnestingFunctionCallExpression;
import org.apache.hyracks.algebricks.core.algebra.expressions.VariableReferenceExpression;
+import org.apache.hyracks.algebricks.core.algebra.expressions.AbstractFunctionCallExpression.FunctionKind;
+import org.apache.hyracks.algebricks.core.algebra.expressions.BroadcastExpressionAnnotation.BroadcastSide;
import org.apache.hyracks.algebricks.core.algebra.functions.AlgebricksBuiltinFunctions;
import org.apache.hyracks.algebricks.core.algebra.functions.FunctionIdentifier;
import org.apache.hyracks.algebricks.core.algebra.functions.IFunctionInfo;
@@ -139,13 +139,13 @@
import org.apache.hyracks.algebricks.core.algebra.operators.logical.LimitOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.NestedTupleSourceOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.OrderOperator;
-import org.apache.hyracks.algebricks.core.algebra.operators.logical.OrderOperator.IOrder.OrderKind;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.ProjectOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.SelectOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.SinkOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.SubplanOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.UnionAllOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.UnnestOperator;
+import org.apache.hyracks.algebricks.core.algebra.operators.logical.OrderOperator.IOrder.OrderKind;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.visitors.LogicalOperatorDeepCopyWithNewVariablesVisitor;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.visitors.VariableUtilities;
import org.apache.hyracks.algebricks.core.algebra.plan.ALogicalPlanImpl;
@@ -1526,7 +1526,7 @@
* <code>currentOpRef</code> to replace variables properly.
* @throws AsterixException
*/
- private Map<LogicalVariable, LogicalVariable> eliminateSharedOperatorReference(
+ private LinkedHashMap<LogicalVariable, LogicalVariable> eliminateSharedOperatorReference(
Mutable<ILogicalOperator> currentOpRef, Set<Mutable<ILogicalOperator>> opRefSet) throws AsterixException {
try {
opRefSet.add(currentOpRef);
@@ -1547,7 +1547,7 @@
}
int childIndex = 0;
- Map<LogicalVariable, LogicalVariable> varMap = new HashMap<>();
+ LinkedHashMap<LogicalVariable, LogicalVariable> varMap = new LinkedHashMap<>();
for (Mutable<ILogicalOperator> childRef : currentOperator.getInputs()) {
if (opRefSet.contains(childRef)) {
// There is a shared operator reference in the query plan.
@@ -1555,7 +1555,8 @@
LogicalOperatorDeepCopyWithNewVariablesVisitor visitor =
new LogicalOperatorDeepCopyWithNewVariablesVisitor(context, null);
ILogicalOperator newChild = childRef.getValue().accept(visitor, null);
- Map<LogicalVariable, LogicalVariable> cloneVarMap = visitor.getInputToOutputVariableMapping();
+ LinkedHashMap<LogicalVariable, LogicalVariable> cloneVarMap = visitor
+ .getInputToOutputVariableMapping();
// Substitute variables according to the deep copy which generates new variables.
VariableUtilities.substituteVariables(currentOperator, cloneVarMap, null);
@@ -1568,7 +1569,7 @@
// Recursively eliminate shared operator reference for the operator subtree,
// even if it is a deep copy of some other one.
- Map<LogicalVariable, LogicalVariable> childVarMap = eliminateSharedOperatorReference(childRef,
+ LinkedHashMap<LogicalVariable, LogicalVariable> childVarMap = eliminateSharedOperatorReference(childRef,
opRefSet);
// Substitute variables according to the new subtree.
VariableUtilities.substituteVariables(currentOperator, childVarMap, null);
diff --git a/asterixdb/asterix-app/src/test/resources/optimizerts/queries/join-singletonbranch-2.sqlpp b/asterixdb/asterix-app/src/test/resources/optimizerts/queries/join-singletonbranch-2.sqlpp
new file mode 100644
index 0000000..6a85b6a
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/optimizerts/queries/join-singletonbranch-2.sqlpp
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+drop dataverse test if exists;
+
+create dataverse test;
+
+use test;
+
+create type FooType as {
+ fid: int32
+}
+
+create type BarType as {
+ bid: int32
+}
+
+create dataset Foo(FooType) primary key fid;
+
+create dataset Bar(BarType) primary key bid;
+
+SELECT *
+FROM (SELECT AVG(f.fee) foo_avg FROM Foo f) f
+JOIN Bar b
+ON b.fee < f.avg;
+
+drop dataverse test;
diff --git a/asterixdb/asterix-app/src/test/resources/optimizerts/queries/join-singletonbranch-3.sqlpp b/asterixdb/asterix-app/src/test/resources/optimizerts/queries/join-singletonbranch-3.sqlpp
new file mode 100644
index 0000000..e555352
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/optimizerts/queries/join-singletonbranch-3.sqlpp
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+drop dataverse test if exists;
+
+create dataverse test;
+
+use test;
+
+create type FooType as {
+ fid: int32
+}
+
+create type BarType as {
+ bid: int32
+}
+
+create dataset Foo(FooType) primary key fid;
+
+create dataset Bar(BarType) primary key bid;
+
+SELECT *
+FROM (SELECT AVG(f.fee) foo_avg FROM Foo f) f, Bar b
+WHERE b.fee < f.avg;
+
+drop dataverse test;
diff --git a/asterixdb/asterix-app/src/test/resources/optimizerts/queries/join-singletonbranch.sqlpp b/asterixdb/asterix-app/src/test/resources/optimizerts/queries/join-singletonbranch.sqlpp
new file mode 100644
index 0000000..2b92d97
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/optimizerts/queries/join-singletonbranch.sqlpp
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+drop dataverse test if exists;
+
+create dataverse test;
+
+use test;
+
+create type FooType as {
+ fid: int32
+}
+
+create type BarType as {
+ bid: int32
+}
+
+create dataset Foo(FooType) primary key fid;
+
+create dataset Bar(BarType) primary key bid;
+
+SELECT * FROM Bar b
+WHERE b.fee < (SELECT VALUE AVG(f.fee) FROM Foo f)[0];
+
+drop dataverse test;
diff --git a/asterixdb/asterix-app/src/test/resources/optimizerts/queries/tpcds/query-ASTERIXDB-1581-correlated.sqlpp b/asterixdb/asterix-app/src/test/resources/optimizerts/queries/tpcds/query-ASTERIXDB-1581-correlated.sqlpp
new file mode 100644
index 0000000..9f96f9b
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/optimizerts/queries/tpcds/query-ASTERIXDB-1581-correlated.sqlpp
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+drop dataverse tpcds if exists;
+create dataverse tpcds;
+
+use tpcds;
+
+create type tpcds.store_sales_type as closed {
+ ss_sold_date_sk: int64?,
+ ss_sold_time_sk: int64?,
+ ss_item_sk: int64,
+ ss_customer_sk: int64?,
+ ss_cdemo_sk: int64?,
+ ss_hdemo_sk: int64?,
+ ss_addr_sk: int64?,
+ ss_store_sk: int64?,
+ ss_promo_sk: int64?,
+ ss_ticket_number: int64,
+ ss_quantity: int64?,
+ ss_wholesale_cost: double?,
+ ss_list_price: double?,
+ ss_sales_price: double?,
+ ss_ext_discount_amt: double?,
+ ss_ext_sales_price: double?,
+ ss_ext_wholesale_cost: double?,
+ ss_ext_list_price: double?,
+ ss_ext_tax: double?,
+ ss_coupon_amt: double?,
+ ss_net_paid: double?,
+ ss_net_paid_inc_tax: double?,
+ ss_net_profit: double?
+}
+
+
+create type tpcds.item_type as closed {
+ i_item_sk: int64,
+ i_item_id: string,
+ i_rec_start_date: string?,
+ i_rec_end_date: string?,
+ i_item_desc: string?,
+ i_current_price: double?,
+ i_wholesale_cost: double?,
+ i_brand_id: int64? ,
+ i_brand: string?,
+ i_class_id: int64? ,
+ i_class: string?,
+ i_category_id: int64? ,
+ i_category: string?,
+ i_manufact_id: int64? ,
+ i_manufact: string?,
+ i_size: string?,
+ i_formulation: string?,
+ i_color: string?,
+ i_units: string?,
+ i_container: string?,
+ i_manager_id: int64?,
+ i_product_name: string?
+}
+
+create dataset store_sales (store_sales_type)
+primary key ss_item_sk, ss_ticket_number;
+
+create dataset item (item_type)
+primary key i_item_sk;
+
+select case when (select value count(ss)
+ from store_sales ss
+ where ss_quantity = item.i_item_sk)[0] < 25437
+ then (select avg(ss_ext_discount_amt)
+ from store_sales
+ where ss_quantity = item.i_item_sk)
+ else (select avg(ss_net_profit)
+ from store_sales
+ where ss_quantity = item.i_item_sk)
+ end bucket1
+from item
+where i_item_sk = 1;
+
+drop dataverse tpcds;
\ No newline at end of file
diff --git a/asterixdb/asterix-app/src/test/resources/optimizerts/queries/tpcds/query-ASTERIXDB-1581.sqlpp b/asterixdb/asterix-app/src/test/resources/optimizerts/queries/tpcds/query-ASTERIXDB-1581.sqlpp
new file mode 100644
index 0000000..1fecc6c
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/optimizerts/queries/tpcds/query-ASTERIXDB-1581.sqlpp
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+drop dataverse tpcds if exists;
+create dataverse tpcds;
+
+use tpcds;
+
+create type tpcds.store_sales_type as closed {
+ ss_sold_date_sk: int64?,
+ ss_sold_time_sk: int64?,
+ ss_item_sk: int64,
+ ss_customer_sk: int64?,
+ ss_cdemo_sk: int64?,
+ ss_hdemo_sk: int64?,
+ ss_addr_sk: int64?,
+ ss_store_sk: int64?,
+ ss_promo_sk: int64?,
+ ss_ticket_number: int64,
+ ss_quantity: int64?,
+ ss_wholesale_cost: double?,
+ ss_list_price: double?,
+ ss_sales_price: double?,
+ ss_ext_discount_amt: double?,
+ ss_ext_sales_price: double?,
+ ss_ext_wholesale_cost: double?,
+ ss_ext_list_price: double?,
+ ss_ext_tax: double?,
+ ss_coupon_amt: double?,
+ ss_net_paid: double?,
+ ss_net_paid_inc_tax: double?,
+ ss_net_profit: double?
+}
+
+
+create type tpcds.item_type as closed {
+ i_item_sk: int64,
+ i_item_id: string,
+ i_rec_start_date: string?,
+ i_rec_end_date: string?,
+ i_item_desc: string?,
+ i_current_price: double?,
+ i_wholesale_cost: double?,
+ i_brand_id: int64? ,
+ i_brand: string?,
+ i_class_id: int64? ,
+ i_class: string?,
+ i_category_id: int64? ,
+ i_category: string?,
+ i_manufact_id: int64? ,
+ i_manufact: string?,
+ i_size: string?,
+ i_formulation: string?,
+ i_color: string?,
+ i_units: string?,
+ i_container: string?,
+ i_manager_id: int64?,
+ i_product_name: string?
+}
+
+create dataset store_sales (store_sales_type)
+primary key ss_item_sk, ss_ticket_number;
+
+create dataset item (item_type)
+primary key i_item_sk;
+
+select case when (select value count(ss)
+ from store_sales ss
+ where ss_quantity >= 1 and ss_quantity <= 20)[0] < 25437
+ then (select avg(ss_ext_discount_amt)
+ from store_sales
+ where ss_quantity >= 1 and ss_quantity <= 20)
+ else (select avg(ss_net_profit)
+ from store_sales
+ where ss_quantity >= 1 and ss_quantity <= 20)
+ end bucket1
+from item
+where i_item_sk = 1;
+
+drop dataverse tpcds;
\ No newline at end of file
diff --git a/asterixdb/asterix-app/src/test/resources/optimizerts/results/filter-nested.plan b/asterixdb/asterix-app/src/test/resources/optimizerts/results/filter-nested.plan
index 7448380..3ed2b6f 100644
--- a/asterixdb/asterix-app/src/test/resources/optimizerts/results/filter-nested.plan
+++ b/asterixdb/asterix-app/src/test/resources/optimizerts/results/filter-nested.plan
@@ -6,6 +6,6 @@
-- STREAM_PROJECT |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- DATASOURCE_SCAN |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- BROADCAST_EXCHANGE |PARTITIONED|
-- ASSIGN |PARTITIONED|
- -- EMPTY_TUPLE_SOURCE |PARTITIONED|
\ No newline at end of file
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
diff --git a/asterixdb/asterix-app/src/test/resources/optimizerts/results/join-singletonbranch-2.plan b/asterixdb/asterix-app/src/test/resources/optimizerts/results/join-singletonbranch-2.plan
new file mode 100644
index 0000000..3bb60f3
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/optimizerts/results/join-singletonbranch-2.plan
@@ -0,0 +1,28 @@
+-- DISTRIBUTE_RESULT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- NESTED_LOOP |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- BROADCAST_EXCHANGE |PARTITIONED|
+ -- ASSIGN |UNPARTITIONED|
+ -- STREAM_PROJECT |UNPARTITIONED|
+ -- ASSIGN |UNPARTITIONED|
+ -- AGGREGATE |UNPARTITIONED|
+ -- RANDOM_MERGE_EXCHANGE |PARTITIONED|
+ -- AGGREGATE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
diff --git a/asterixdb/asterix-app/src/test/resources/optimizerts/results/join-singletonbranch-3.plan b/asterixdb/asterix-app/src/test/resources/optimizerts/results/join-singletonbranch-3.plan
new file mode 100644
index 0000000..7b5a4ee
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/optimizerts/results/join-singletonbranch-3.plan
@@ -0,0 +1,23 @@
+-- DISTRIBUTE_RESULT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- BROADCAST_EXCHANGE |PARTITIONED|
+ -- ASSIGN |UNPARTITIONED|
+ -- STREAM_PROJECT |UNPARTITIONED|
+ -- ASSIGN |UNPARTITIONED|
+ -- AGGREGATE |UNPARTITIONED|
+ -- RANDOM_MERGE_EXCHANGE |PARTITIONED|
+ -- AGGREGATE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
diff --git a/asterixdb/asterix-app/src/test/resources/optimizerts/results/join-singletonbranch.plan b/asterixdb/asterix-app/src/test/resources/optimizerts/results/join-singletonbranch.plan
new file mode 100644
index 0000000..c072812
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/optimizerts/results/join-singletonbranch.plan
@@ -0,0 +1,28 @@
+-- DISTRIBUTE_RESULT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- NESTED_LOOP |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- BROADCAST_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |UNPARTITIONED|
+ -- ASSIGN |UNPARTITIONED|
+ -- AGGREGATE |UNPARTITIONED|
+ -- AGGREGATE |UNPARTITIONED|
+ -- RANDOM_MERGE_EXCHANGE |PARTITIONED|
+ -- AGGREGATE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
diff --git a/asterixdb/asterix-app/src/test/resources/optimizerts/results/nest_aggregate.plan b/asterixdb/asterix-app/src/test/resources/optimizerts/results/nest_aggregate.plan
index 41b07d9..d4cdff4 100644
--- a/asterixdb/asterix-app/src/test/resources/optimizerts/results/nest_aggregate.plan
+++ b/asterixdb/asterix-app/src/test/resources/optimizerts/results/nest_aggregate.plan
@@ -8,7 +8,7 @@
{
-- AGGREGATE |LOCAL|
-- STREAM_LIMIT |LOCAL|
- -- IN_MEMORY_STABLE_SORT [$$41(ASC)] |LOCAL|
+ -- IN_MEMORY_STABLE_SORT [$$39(ASC)] |LOCAL|
-- MICRO_PRE_CLUSTERED_GROUP_BY[$$51] |LOCAL|
{
-- AGGREGATE |LOCAL|
@@ -29,7 +29,7 @@
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- HYBRID_HASH_JOIN [$$34][$$39] |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$34][$$40] |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- HYBRID_HASH_JOIN [$$34][$$35] |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
@@ -45,7 +45,7 @@
-- DATASOURCE_SCAN |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- EMPTY_TUPLE_SOURCE |PARTITIONED|
- -- HASH_PARTITION_EXCHANGE [$$39] |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$40] |PARTITIONED|
-- ASSIGN |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
diff --git a/asterixdb/asterix-app/src/test/resources/optimizerts/results/tpcds/query-ASTERIXDB-1581-correlated.plan b/asterixdb/asterix-app/src/test/resources/optimizerts/results/tpcds/query-ASTERIXDB-1581-correlated.plan
new file mode 100644
index 0000000..234c91b
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/optimizerts/results/tpcds/query-ASTERIXDB-1581-correlated.plan
@@ -0,0 +1,281 @@
+-- DISTRIBUTE_RESULT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- UNNEST |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- PRE_CLUSTERED_GROUP_BY[$$117] |PARTITIONED|
+ {
+ -- AGGREGATE |LOCAL|
+ -- AGGREGATE |LOCAL|
+ -- ASSIGN |LOCAL|
+ -- AGGREGATE |LOCAL|
+ -- STREAM_SELECT |LOCAL|
+ -- NESTED_TUPLE_SOURCE |LOCAL|
+ }
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STABLE_SORT [$$117(ASC)] |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$117][$$118] |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$117] |PARTITIONED|
+ -- PRE_CLUSTERED_GROUP_BY[$$104] |PARTITIONED|
+ {
+ -- AGGREGATE |LOCAL|
+ -- AGGREGATE |LOCAL|
+ -- ASSIGN |LOCAL|
+ -- AGGREGATE |LOCAL|
+ -- STREAM_SELECT |LOCAL|
+ -- NESTED_TUPLE_SOURCE |LOCAL|
+ }
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STABLE_SORT [$$104(ASC)] |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$104][$$105] |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$104] |PARTITIONED|
+ -- PRE_CLUSTERED_GROUP_BY[$$87] |PARTITIONED|
+ {
+ -- AGGREGATE |LOCAL|
+ -- AGGREGATE |LOCAL|
+ -- STREAM_SELECT |LOCAL|
+ -- NESTED_TUPLE_SOURCE |LOCAL|
+ }
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$87][$$94] |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- BTREE_SEARCH |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$143] |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$105] |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$98][$$97] |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$137] |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- PRE_CLUSTERED_GROUP_BY[$$142] |PARTITIONED|
+ {
+ -- AGGREGATE |LOCAL|
+ -- AGGREGATE |LOCAL|
+ -- STREAM_SELECT |LOCAL|
+ -- NESTED_TUPLE_SOURCE |LOCAL|
+ }
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$142][$$143] |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- BTREE_SEARCH |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$143] |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$97] |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$118] |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$100][$$99] |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$100] |PARTITIONED|
+ -- PRE_CLUSTERED_GROUP_BY[$$119] |PARTITIONED|
+ {
+ -- AGGREGATE |LOCAL|
+ -- AGGREGATE |LOCAL|
+ -- AGGREGATE |LOCAL|
+ -- STREAM_SELECT |LOCAL|
+ -- NESTED_TUPLE_SOURCE |LOCAL|
+ }
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STABLE_SORT [$$119(ASC)] |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$119][$$122] |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$119] |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- PRE_CLUSTERED_GROUP_BY[$$123] |PARTITIONED|
+ {
+ -- AGGREGATE |LOCAL|
+ -- AGGREGATE |LOCAL|
+ -- STREAM_SELECT |LOCAL|
+ -- NESTED_TUPLE_SOURCE |LOCAL|
+ }
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$123][$$124] |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- BTREE_SEARCH |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$143] |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$122] |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$137][$$136] |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$137] |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- PRE_CLUSTERED_GROUP_BY[$$142] |PARTITIONED|
+ {
+ -- AGGREGATE |LOCAL|
+ -- AGGREGATE |LOCAL|
+ -- STREAM_SELECT |LOCAL|
+ -- NESTED_TUPLE_SOURCE |LOCAL|
+ }
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$142][$$143] |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- BTREE_SEARCH |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$143] |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$136] |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$99] |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
diff --git a/asterixdb/asterix-app/src/test/resources/optimizerts/results/tpcds/query-ASTERIXDB-1581.plan b/asterixdb/asterix-app/src/test/resources/optimizerts/results/tpcds/query-ASTERIXDB-1581.plan
new file mode 100644
index 0000000..c09dedb
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/optimizerts/results/tpcds/query-ASTERIXDB-1581.plan
@@ -0,0 +1,180 @@
+-- DISTRIBUTE_RESULT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- NESTED_LOOP |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- BTREE_SEARCH |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- BROADCAST_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- UNNEST |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- PRE_CLUSTERED_GROUP_BY[$$114] |PARTITIONED|
+ {
+ -- AGGREGATE |LOCAL|
+ -- AGGREGATE |LOCAL|
+ -- ASSIGN |LOCAL|
+ -- AGGREGATE |LOCAL|
+ -- STREAM_SELECT |LOCAL|
+ -- NESTED_TUPLE_SOURCE |LOCAL|
+ }
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STABLE_SORT [$$114(ASC)] |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$114][$$115] |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$114] |PARTITIONED|
+ -- PRE_CLUSTERED_GROUP_BY[$$54] |PARTITIONED|
+ {
+ -- AGGREGATE |LOCAL|
+ -- AGGREGATE |LOCAL|
+ -- ASSIGN |LOCAL|
+ -- AGGREGATE |LOCAL|
+ -- STREAM_SELECT |LOCAL|
+ -- NESTED_TUPLE_SOURCE |LOCAL|
+ }
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STABLE_SORT [$$54(ASC)] |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$54][$$106] |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$116] |PARTITIONED|
+ -- STREAM_PROJECT |UNPARTITIONED|
+ -- ASSIGN |UNPARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |UNPARTITIONED|
+ -- SPLIT |UNPARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |UNPARTITIONED|
+ -- AGGREGATE |UNPARTITIONED|
+ -- AGGREGATE |UNPARTITIONED|
+ -- RANDOM_MERGE_EXCHANGE |PARTITIONED|
+ -- AGGREGATE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$106] |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- BROADCAST_EXCHANGE |PARTITIONED|
+ -- STREAM_SELECT |UNPARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |UNPARTITIONED|
+ -- SPLIT |UNPARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |UNPARTITIONED|
+ -- AGGREGATE |UNPARTITIONED|
+ -- AGGREGATE |UNPARTITIONED|
+ -- RANDOM_MERGE_EXCHANGE |PARTITIONED|
+ -- AGGREGATE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$115] |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- BROADCAST_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- PRE_CLUSTERED_GROUP_BY[$$116] |PARTITIONED|
+ {
+ -- AGGREGATE |LOCAL|
+ -- AGGREGATE |LOCAL|
+ -- AGGREGATE |LOCAL|
+ -- STREAM_SELECT |LOCAL|
+ -- NESTED_TUPLE_SOURCE |LOCAL|
+ }
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STABLE_SORT [$$116(ASC)] |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$116][$$117] |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$116] |PARTITIONED|
+ -- STREAM_PROJECT |UNPARTITIONED|
+ -- ASSIGN |UNPARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |UNPARTITIONED|
+ -- SPLIT |UNPARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |UNPARTITIONED|
+ -- AGGREGATE |UNPARTITIONED|
+ -- AGGREGATE |UNPARTITIONED|
+ -- RANDOM_MERGE_EXCHANGE |PARTITIONED|
+ -- AGGREGATE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$117] |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- SPLIT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- BROADCAST_EXCHANGE |PARTITIONED|
+ -- STREAM_SELECT |UNPARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |UNPARTITIONED|
+ -- SPLIT |UNPARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |UNPARTITIONED|
+ -- AGGREGATE |UNPARTITIONED|
+ -- AGGREGATE |UNPARTITIONED|
+ -- RANDOM_MERGE_EXCHANGE |PARTITIONED|
+ -- AGGREGATE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
diff --git a/asterixdb/asterix-app/src/test/resources/optimizerts/results/udfs/query-ASTERIXDB-1018.plan b/asterixdb/asterix-app/src/test/resources/optimizerts/results/udfs/query-ASTERIXDB-1018.plan
index 3eb81d9..2d37e1f 100644
--- a/asterixdb/asterix-app/src/test/resources/optimizerts/results/udfs/query-ASTERIXDB-1018.plan
+++ b/asterixdb/asterix-app/src/test/resources/optimizerts/results/udfs/query-ASTERIXDB-1018.plan
@@ -4,69 +4,70 @@
-- ASSIGN |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- HYBRID_HASH_JOIN [$$35][$$48] |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$35][$$50] |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- DATASOURCE_SCAN |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- EMPTY_TUPLE_SOURCE |PARTITIONED|
- -- HASH_PARTITION_EXCHANGE [$$48] |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$50] |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- ASSIGN |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- PRE_CLUSTERED_GROUP_BY[$$52, $$24, $$53] |PARTITIONED|
+ -- PRE_CLUSTERED_GROUP_BY[$$43, $$49] |PARTITIONED|
{
-- AGGREGATE |LOCAL|
-- STREAM_SELECT |LOCAL|
-- NESTED_TUPLE_SOURCE |LOCAL|
}
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- STABLE_SORT [$$52(ASC), $$24(ASC), $$53(ASC)] |PARTITIONED|
- -- HASH_PARTITION_EXCHANGE [$$52, $$24, $$53] |PARTITIONED|
+ -- STABLE_SORT [$$43(ASC), $$49(ASC)] |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$43, $$49] |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- NESTED_LOOP |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- STREAM_SELECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- PRE_CLUSTERED_GROUP_BY[$$36, $$47] |PARTITIONED|
- {
- -- AGGREGATE |LOCAL|
- -- STREAM_SELECT |LOCAL|
- -- NESTED_TUPLE_SOURCE |LOCAL|
- }
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- STABLE_SORT [$$36(ASC), $$47(ASC)] |PARTITIONED|
- -- HASH_PARTITION_EXCHANGE [$$36, $$47] |PARTITIONED|
- -- STREAM_PROJECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- HYBRID_HASH_JOIN [$$45][$$39] |PARTITIONED|
- -- HASH_PARTITION_EXCHANGE [$$45] |PARTITIONED|
- -- NESTED_LOOP |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- STREAM_PROJECT |PARTITIONED|
- -- ASSIGN |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- DATASOURCE_SCAN |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- EMPTY_TUPLE_SOURCE |PARTITIONED|
- -- BROADCAST_EXCHANGE |PARTITIONED|
- -- STREAM_PROJECT |PARTITIONED|
- -- ASSIGN |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- DATASOURCE_SCAN |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- EMPTY_TUPLE_SOURCE |PARTITIONED|
- -- HASH_PARTITION_EXCHANGE [$$39] |PARTITIONED|
- -- STREAM_PROJECT |PARTITIONED|
- -- ASSIGN |PARTITIONED|
- -- STREAM_PROJECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- DATASOURCE_SCAN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- PRE_CLUSTERED_GROUP_BY[$$36, $$48] |PARTITIONED|
+ {
+ -- AGGREGATE |LOCAL|
+ -- STREAM_SELECT |LOCAL|
+ -- NESTED_TUPLE_SOURCE |LOCAL|
+ }
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STABLE_SORT [$$36(ASC), $$48(ASC)] |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$36, $$48] |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$46][$$39] |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$46] |PARTITIONED|
+ -- NESTED_LOOP |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- BROADCAST_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$39] |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
-- BROADCAST_EXCHANGE |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- ASSIGN |PARTITIONED|
diff --git a/asterixdb/asterix-app/src/test/resources/optimizerts/results/udfs/query-ASTERIXDB-1019.plan b/asterixdb/asterix-app/src/test/resources/optimizerts/results/udfs/query-ASTERIXDB-1019.plan
index b910acf..ed40740 100644
--- a/asterixdb/asterix-app/src/test/resources/optimizerts/results/udfs/query-ASTERIXDB-1019.plan
+++ b/asterixdb/asterix-app/src/test/resources/optimizerts/results/udfs/query-ASTERIXDB-1019.plan
@@ -12,69 +12,70 @@
-- ASSIGN |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- HYBRID_HASH_JOIN [$$39][$$52] |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$39][$$54] |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- DATASOURCE_SCAN |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- EMPTY_TUPLE_SOURCE |PARTITIONED|
- -- HASH_PARTITION_EXCHANGE [$$52] |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$54] |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- ASSIGN |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- PRE_CLUSTERED_GROUP_BY[$$57, $$24, $$56] |PARTITIONED|
+ -- PRE_CLUSTERED_GROUP_BY[$$47, $$53] |PARTITIONED|
{
-- AGGREGATE |LOCAL|
-- STREAM_SELECT |LOCAL|
-- NESTED_TUPLE_SOURCE |LOCAL|
}
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- STABLE_SORT [$$57(ASC), $$24(ASC), $$56(ASC)] |PARTITIONED|
- -- HASH_PARTITION_EXCHANGE [$$57, $$24, $$56] |PARTITIONED|
+ -- STABLE_SORT [$$47(ASC), $$53(ASC)] |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$47, $$53] |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
-- NESTED_LOOP |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- STREAM_SELECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- PRE_CLUSTERED_GROUP_BY[$$51, $$40] |PARTITIONED|
- {
- -- AGGREGATE |LOCAL|
- -- STREAM_SELECT |LOCAL|
- -- NESTED_TUPLE_SOURCE |LOCAL|
- }
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- STABLE_SORT [$$51(ASC), $$40(ASC)] |PARTITIONED|
- -- HASH_PARTITION_EXCHANGE [$$51, $$40] |PARTITIONED|
- -- STREAM_PROJECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- HYBRID_HASH_JOIN [$$49][$$43] |PARTITIONED|
- -- HASH_PARTITION_EXCHANGE [$$49] |PARTITIONED|
- -- NESTED_LOOP |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- STREAM_PROJECT |PARTITIONED|
- -- ASSIGN |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- DATASOURCE_SCAN |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- EMPTY_TUPLE_SOURCE |PARTITIONED|
- -- BROADCAST_EXCHANGE |PARTITIONED|
- -- STREAM_PROJECT |PARTITIONED|
- -- ASSIGN |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- DATASOURCE_SCAN |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- EMPTY_TUPLE_SOURCE |PARTITIONED|
- -- HASH_PARTITION_EXCHANGE [$$43] |PARTITIONED|
- -- STREAM_PROJECT |PARTITIONED|
- -- ASSIGN |PARTITIONED|
- -- STREAM_PROJECT |PARTITIONED|
- -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- DATASOURCE_SCAN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- STREAM_SELECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- PRE_CLUSTERED_GROUP_BY[$$40, $$52] |PARTITIONED|
+ {
+ -- AGGREGATE |LOCAL|
+ -- STREAM_SELECT |LOCAL|
+ -- NESTED_TUPLE_SOURCE |LOCAL|
+ }
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STABLE_SORT [$$40(ASC), $$52(ASC)] |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$40, $$52] |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- HYBRID_HASH_JOIN [$$50][$$43] |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$50] |PARTITIONED|
+ -- NESTED_LOOP |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
-- ONE_TO_ONE_EXCHANGE |PARTITIONED|
- -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- BROADCAST_EXCHANGE |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
+ -- HASH_PARTITION_EXCHANGE [$$43] |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ASSIGN |PARTITIONED|
+ -- STREAM_PROJECT |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- DATASOURCE_SCAN |PARTITIONED|
+ -- ONE_TO_ONE_EXCHANGE |PARTITIONED|
+ -- EMPTY_TUPLE_SOURCE |PARTITIONED|
-- BROADCAST_EXCHANGE |PARTITIONED|
-- STREAM_PROJECT |PARTITIONED|
-- ASSIGN |PARTITIONED|
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/subquery/aggregate_join/aggregate_join.1.ddl.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/subquery/aggregate_join/aggregate_join.1.ddl.sqlpp
new file mode 100644
index 0000000..07c39e3
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/subquery/aggregate_join/aggregate_join.1.ddl.sqlpp
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+drop dataverse test if exists;
+create dataverse test;
+use test;
+
+drop dataset samptable if exists;
+drop type samptabletype if exists;
+
+create type samptabletype as closed {
+ id: int8
+};
+
+create dataset samptable(samptabletype) primary key id;
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/subquery/aggregate_join/aggregate_join.2.update.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/subquery/aggregate_join/aggregate_join.2.update.sqlpp
new file mode 100644
index 0000000..d1ad8b5
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/subquery/aggregate_join/aggregate_join.2.update.sqlpp
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use test;
+
+insert into samptable ({'id' : 0});
+insert into samptable ({'id' : 1});
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/subquery/aggregate_join/aggregate_join.3.query.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/subquery/aggregate_join/aggregate_join.3.query.sqlpp
new file mode 100644
index 0000000..3d0bf24
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/subquery/aggregate_join/aggregate_join.3.query.sqlpp
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+USE test;
+
+SELECT *
+FROM (SELECT VALUE MIN(s.id) FROM samptable s) min,
+ samptable s
+WHERE s.id > min
+ORDER by s.id;
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/subquery/aggregate_join_external/aggregate_join_external.1.ddl.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/subquery/aggregate_join_external/aggregate_join_external.1.ddl.sqlpp
new file mode 100644
index 0000000..e77519e
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/subquery/aggregate_join_external/aggregate_join_external.1.ddl.sqlpp
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+DROP DATAVERSE test IF EXISTS;
+CREATE DATAVERSE test;
+USE test;
+
+
+CREATE TYPE OrderType AS CLOSED {
+ o_orderkey: int32,
+ o_custkey: int32,
+ o_orderstatus: string,
+ o_totalprice: double,
+ o_orderdate: string,
+ o_orderpriority: string,
+ o_clerk: string,
+ o_shippriority: int32,
+ o_comment: string
+}
+
+CREATE TYPE CustomerType AS CLOSED {
+ c_custkey: int32,
+ c_name: string,
+ c_address: string,
+ c_nationkey: int32,
+ c_phone: string,
+ c_acctbal: double,
+ c_mktsegment: string,
+ c_comment: string
+}
+
+
+CREATE EXTERNAL DATASET Customers(CustomerType) USING `localfs`
+((`path`=`asterix_nc1://data/tpch0.001/customer.tbl`),
+(`input-format`=`text-input-format`),(`format`=`delimited-text`),(`delimiter`=`|`));
+
+CREATE EXTERNAL DATASET Orders(OrderType) USING `localfs`
+((`path`=`asterix_nc1://data/tpch0.001/orders.tbl`),
+(`input-format`=`text-input-format`),(`format`=`delimited-text`),(`delimiter`=`|`));
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/subquery/aggregate_join_external/aggregate_join_external.3.query.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/subquery/aggregate_join_external/aggregate_join_external.3.query.sqlpp
new file mode 100644
index 0000000..eb60441
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/subquery/aggregate_join_external/aggregate_join_external.3.query.sqlpp
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+USE test;
+
+SELECT *
+FROM (SELECT VALUE MIN(o.o_custkey) FROM Orders o) min,
+ Customers c
+WHERE c.c_custkey > min
+ORDER by c.c_custkey
+LIMIT 5;
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-2/query-ASTERIXDB-1581-2.1.ddl.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-2/query-ASTERIXDB-1581-2.1.ddl.sqlpp
new file mode 100644
index 0000000..2e4844e
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-2/query-ASTERIXDB-1581-2.1.ddl.sqlpp
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+drop dataverse tpcds if exists;
+create dataverse tpcds;
+
+use tpcds;
+
+create type tpcds.store_sales_type as closed {
+ ss_sold_date_sk: int64?,
+ ss_sold_time_sk: int64?,
+ ss_item_sk: int64,
+ ss_customer_sk: int64?,
+ ss_cdemo_sk: int64?,
+ ss_hdemo_sk: int64?,
+ ss_addr_sk: int64?,
+ ss_store_sk: int64?,
+ ss_promo_sk: int64?,
+ ss_ticket_number: int64,
+ ss_quantity: int64?,
+ ss_wholesale_cost: double?,
+ ss_list_price: double?,
+ ss_sales_price: double?,
+ ss_ext_discount_amt: double?,
+ ss_ext_sales_price: double?,
+ ss_ext_wholesale_cost: double?,
+ ss_ext_list_price: double?,
+ ss_ext_tax: double?,
+ ss_coupon_amt: double?,
+ ss_net_paid: double?,
+ ss_net_paid_inc_tax: double?,
+ ss_net_profit: double?
+}
+
+
+create type tpcds.item_type as closed {
+ i_item_sk: int64,
+ i_item_id: string,
+ i_rec_start_date: string?,
+ i_rec_end_date: string?,
+ i_item_desc: string?,
+ i_current_price: double?,
+ i_wholesale_cost: double?,
+ i_brand_id: int64? ,
+ i_brand: string?,
+ i_class_id: int64? ,
+ i_class: string?,
+ i_category_id: int64? ,
+ i_category: string?,
+ i_manufact_id: int64? ,
+ i_manufact: string?,
+ i_size: string?,
+ i_formulation: string?,
+ i_color: string?,
+ i_units: string?,
+ i_container: string?,
+ i_manager_id: int64?,
+ i_product_name: string?
+}
+
+create dataset store_sales (store_sales_type)
+primary key ss_item_sk, ss_ticket_number;
+
+create dataset item (item_type)
+primary key i_item_sk;
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-2/query-ASTERIXDB-1581-2.2.update.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-2/query-ASTERIXDB-1581-2.2.update.sqlpp
new file mode 100644
index 0000000..bf0a6f7
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-2/query-ASTERIXDB-1581-2.2.update.sqlpp
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use tpcds;
+
+load dataset store_sales using localfs ((`path`=`asterix_nc1://data/tpcds/store_sales.csv`),
+(`format`=`delimited-text`), (`delimiter`=`|`));
+
+load dataset item using localfs ((`path`=`asterix_nc1://data/tpcds/item.csv`),
+(`format`=`delimited-text`), (`delimiter`=`|`));
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-2/query-ASTERIXDB-1581-2.3.query.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-2/query-ASTERIXDB-1581-2.3.query.sqlpp
new file mode 100644
index 0000000..329ac5e
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-2/query-ASTERIXDB-1581-2.3.query.sqlpp
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use tpcds;
+
+select case when (select value count(ss)
+ from store_sales ss
+ where ss_quantity >= 1 and ss_quantity <= 20)[0] < 25437
+ then (select avg(ss_ext_discount_amt)
+ from store_sales
+ where ss_quantity >= 1 and ss_quantity <= 20)
+ else (select avg(ss_net_profit)
+ from store_sales
+ where ss_quantity >= 1 and ss_quantity <= 20)
+ end bucket1
+from item
+where i_item_sk = 1;
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-3/query-ASTERIXDB-1581-3.1.ddl.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-3/query-ASTERIXDB-1581-3.1.ddl.sqlpp
new file mode 100644
index 0000000..2e4844e
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-3/query-ASTERIXDB-1581-3.1.ddl.sqlpp
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+drop dataverse tpcds if exists;
+create dataverse tpcds;
+
+use tpcds;
+
+create type tpcds.store_sales_type as closed {
+ ss_sold_date_sk: int64?,
+ ss_sold_time_sk: int64?,
+ ss_item_sk: int64,
+ ss_customer_sk: int64?,
+ ss_cdemo_sk: int64?,
+ ss_hdemo_sk: int64?,
+ ss_addr_sk: int64?,
+ ss_store_sk: int64?,
+ ss_promo_sk: int64?,
+ ss_ticket_number: int64,
+ ss_quantity: int64?,
+ ss_wholesale_cost: double?,
+ ss_list_price: double?,
+ ss_sales_price: double?,
+ ss_ext_discount_amt: double?,
+ ss_ext_sales_price: double?,
+ ss_ext_wholesale_cost: double?,
+ ss_ext_list_price: double?,
+ ss_ext_tax: double?,
+ ss_coupon_amt: double?,
+ ss_net_paid: double?,
+ ss_net_paid_inc_tax: double?,
+ ss_net_profit: double?
+}
+
+
+create type tpcds.item_type as closed {
+ i_item_sk: int64,
+ i_item_id: string,
+ i_rec_start_date: string?,
+ i_rec_end_date: string?,
+ i_item_desc: string?,
+ i_current_price: double?,
+ i_wholesale_cost: double?,
+ i_brand_id: int64? ,
+ i_brand: string?,
+ i_class_id: int64? ,
+ i_class: string?,
+ i_category_id: int64? ,
+ i_category: string?,
+ i_manufact_id: int64? ,
+ i_manufact: string?,
+ i_size: string?,
+ i_formulation: string?,
+ i_color: string?,
+ i_units: string?,
+ i_container: string?,
+ i_manager_id: int64?,
+ i_product_name: string?
+}
+
+create dataset store_sales (store_sales_type)
+primary key ss_item_sk, ss_ticket_number;
+
+create dataset item (item_type)
+primary key i_item_sk;
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-3/query-ASTERIXDB-1581-3.2.update.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-3/query-ASTERIXDB-1581-3.2.update.sqlpp
new file mode 100644
index 0000000..bf0a6f7
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-3/query-ASTERIXDB-1581-3.2.update.sqlpp
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use tpcds;
+
+load dataset store_sales using localfs ((`path`=`asterix_nc1://data/tpcds/store_sales.csv`),
+(`format`=`delimited-text`), (`delimiter`=`|`));
+
+load dataset item using localfs ((`path`=`asterix_nc1://data/tpcds/item.csv`),
+(`format`=`delimited-text`), (`delimiter`=`|`));
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-3/query-ASTERIXDB-1581-3.3.query.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-3/query-ASTERIXDB-1581-3.3.query.sqlpp
new file mode 100644
index 0000000..cee2a7c
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-3/query-ASTERIXDB-1581-3.3.query.sqlpp
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use tpcds;
+
+// Returns a singleton list of records in the case expression.
+select case when i_brand_id > 1
+ then (select avg(ss_ext_discount_amt)
+ from store_sales
+ where ss_quantity >= 1 and ss_quantity <= 20)
+ else (select avg(ss_net_profit)
+ from store_sales
+ where ss_quantity >= 1 and ss_quantity <= 20)
+ end bucket1
+from item
+where i_item_sk = 1;
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-4/query-ASTERIXDB-1581-4.1.ddl.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-4/query-ASTERIXDB-1581-4.1.ddl.sqlpp
new file mode 100644
index 0000000..2e4844e
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-4/query-ASTERIXDB-1581-4.1.ddl.sqlpp
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+drop dataverse tpcds if exists;
+create dataverse tpcds;
+
+use tpcds;
+
+create type tpcds.store_sales_type as closed {
+ ss_sold_date_sk: int64?,
+ ss_sold_time_sk: int64?,
+ ss_item_sk: int64,
+ ss_customer_sk: int64?,
+ ss_cdemo_sk: int64?,
+ ss_hdemo_sk: int64?,
+ ss_addr_sk: int64?,
+ ss_store_sk: int64?,
+ ss_promo_sk: int64?,
+ ss_ticket_number: int64,
+ ss_quantity: int64?,
+ ss_wholesale_cost: double?,
+ ss_list_price: double?,
+ ss_sales_price: double?,
+ ss_ext_discount_amt: double?,
+ ss_ext_sales_price: double?,
+ ss_ext_wholesale_cost: double?,
+ ss_ext_list_price: double?,
+ ss_ext_tax: double?,
+ ss_coupon_amt: double?,
+ ss_net_paid: double?,
+ ss_net_paid_inc_tax: double?,
+ ss_net_profit: double?
+}
+
+
+create type tpcds.item_type as closed {
+ i_item_sk: int64,
+ i_item_id: string,
+ i_rec_start_date: string?,
+ i_rec_end_date: string?,
+ i_item_desc: string?,
+ i_current_price: double?,
+ i_wholesale_cost: double?,
+ i_brand_id: int64? ,
+ i_brand: string?,
+ i_class_id: int64? ,
+ i_class: string?,
+ i_category_id: int64? ,
+ i_category: string?,
+ i_manufact_id: int64? ,
+ i_manufact: string?,
+ i_size: string?,
+ i_formulation: string?,
+ i_color: string?,
+ i_units: string?,
+ i_container: string?,
+ i_manager_id: int64?,
+ i_product_name: string?
+}
+
+create dataset store_sales (store_sales_type)
+primary key ss_item_sk, ss_ticket_number;
+
+create dataset item (item_type)
+primary key i_item_sk;
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-4/query-ASTERIXDB-1581-4.2.update.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-4/query-ASTERIXDB-1581-4.2.update.sqlpp
new file mode 100644
index 0000000..bf0a6f7
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-4/query-ASTERIXDB-1581-4.2.update.sqlpp
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use tpcds;
+
+load dataset store_sales using localfs ((`path`=`asterix_nc1://data/tpcds/store_sales.csv`),
+(`format`=`delimited-text`), (`delimiter`=`|`));
+
+load dataset item using localfs ((`path`=`asterix_nc1://data/tpcds/item.csv`),
+(`format`=`delimited-text`), (`delimiter`=`|`));
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-4/query-ASTERIXDB-1581-4.3.query.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-4/query-ASTERIXDB-1581-4.3.query.sqlpp
new file mode 100644
index 0000000..fd0682d
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-4/query-ASTERIXDB-1581-4.3.query.sqlpp
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use tpcds;
+
+// Returns heterogeneous scalar expressions (string and double) in the case expression.
+select case when (select value count(ss)
+ from store_sales ss
+ where ss_quantity >= 1 and ss_quantity <= 20)[0] > 25437
+ then "1.0"
+ else (select value avg(ss_ext_discount_amt)
+ from store_sales
+ where ss_quantity >= 1 and ss_quantity <= 20)[0]
+ end bucket1
+from item
+where i_item_sk = 1;
\ No newline at end of file
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-5/query-ASTERIXDB-1581-5.1.ddl.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-5/query-ASTERIXDB-1581-5.1.ddl.sqlpp
new file mode 100644
index 0000000..2e4844e
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-5/query-ASTERIXDB-1581-5.1.ddl.sqlpp
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+drop dataverse tpcds if exists;
+create dataverse tpcds;
+
+use tpcds;
+
+create type tpcds.store_sales_type as closed {
+ ss_sold_date_sk: int64?,
+ ss_sold_time_sk: int64?,
+ ss_item_sk: int64,
+ ss_customer_sk: int64?,
+ ss_cdemo_sk: int64?,
+ ss_hdemo_sk: int64?,
+ ss_addr_sk: int64?,
+ ss_store_sk: int64?,
+ ss_promo_sk: int64?,
+ ss_ticket_number: int64,
+ ss_quantity: int64?,
+ ss_wholesale_cost: double?,
+ ss_list_price: double?,
+ ss_sales_price: double?,
+ ss_ext_discount_amt: double?,
+ ss_ext_sales_price: double?,
+ ss_ext_wholesale_cost: double?,
+ ss_ext_list_price: double?,
+ ss_ext_tax: double?,
+ ss_coupon_amt: double?,
+ ss_net_paid: double?,
+ ss_net_paid_inc_tax: double?,
+ ss_net_profit: double?
+}
+
+
+create type tpcds.item_type as closed {
+ i_item_sk: int64,
+ i_item_id: string,
+ i_rec_start_date: string?,
+ i_rec_end_date: string?,
+ i_item_desc: string?,
+ i_current_price: double?,
+ i_wholesale_cost: double?,
+ i_brand_id: int64? ,
+ i_brand: string?,
+ i_class_id: int64? ,
+ i_class: string?,
+ i_category_id: int64? ,
+ i_category: string?,
+ i_manufact_id: int64? ,
+ i_manufact: string?,
+ i_size: string?,
+ i_formulation: string?,
+ i_color: string?,
+ i_units: string?,
+ i_container: string?,
+ i_manager_id: int64?,
+ i_product_name: string?
+}
+
+create dataset store_sales (store_sales_type)
+primary key ss_item_sk, ss_ticket_number;
+
+create dataset item (item_type)
+primary key i_item_sk;
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-5/query-ASTERIXDB-1581-5.2.update.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-5/query-ASTERIXDB-1581-5.2.update.sqlpp
new file mode 100644
index 0000000..bf0a6f7
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-5/query-ASTERIXDB-1581-5.2.update.sqlpp
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use tpcds;
+
+load dataset store_sales using localfs ((`path`=`asterix_nc1://data/tpcds/store_sales.csv`),
+(`format`=`delimited-text`), (`delimiter`=`|`));
+
+load dataset item using localfs ((`path`=`asterix_nc1://data/tpcds/item.csv`),
+(`format`=`delimited-text`), (`delimiter`=`|`));
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-5/query-ASTERIXDB-1581-5.3.query.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-5/query-ASTERIXDB-1581-5.3.query.sqlpp
new file mode 100644
index 0000000..c39f703
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-5/query-ASTERIXDB-1581-5.3.query.sqlpp
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use tpcds;
+
+// Returns heterogeneous expressions (string and list) in the case expression.
+select case when (select value count(ss)
+ from store_sales ss
+ where ss_quantity >= 1 and ss_quantity <= 20)[0] > 25437
+ then "1.0"
+ else (select avg(ss_ext_discount_amt)
+ from store_sales
+ where ss_quantity >= 1 and ss_quantity <= 20)
+ end bucket1
+from item
+where i_item_sk = 1;
\ No newline at end of file
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-6/query-ASTERIXDB-1581-6.1.ddl.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-6/query-ASTERIXDB-1581-6.1.ddl.sqlpp
new file mode 100644
index 0000000..2e4844e
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-6/query-ASTERIXDB-1581-6.1.ddl.sqlpp
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+drop dataverse tpcds if exists;
+create dataverse tpcds;
+
+use tpcds;
+
+create type tpcds.store_sales_type as closed {
+ ss_sold_date_sk: int64?,
+ ss_sold_time_sk: int64?,
+ ss_item_sk: int64,
+ ss_customer_sk: int64?,
+ ss_cdemo_sk: int64?,
+ ss_hdemo_sk: int64?,
+ ss_addr_sk: int64?,
+ ss_store_sk: int64?,
+ ss_promo_sk: int64?,
+ ss_ticket_number: int64,
+ ss_quantity: int64?,
+ ss_wholesale_cost: double?,
+ ss_list_price: double?,
+ ss_sales_price: double?,
+ ss_ext_discount_amt: double?,
+ ss_ext_sales_price: double?,
+ ss_ext_wholesale_cost: double?,
+ ss_ext_list_price: double?,
+ ss_ext_tax: double?,
+ ss_coupon_amt: double?,
+ ss_net_paid: double?,
+ ss_net_paid_inc_tax: double?,
+ ss_net_profit: double?
+}
+
+
+create type tpcds.item_type as closed {
+ i_item_sk: int64,
+ i_item_id: string,
+ i_rec_start_date: string?,
+ i_rec_end_date: string?,
+ i_item_desc: string?,
+ i_current_price: double?,
+ i_wholesale_cost: double?,
+ i_brand_id: int64? ,
+ i_brand: string?,
+ i_class_id: int64? ,
+ i_class: string?,
+ i_category_id: int64? ,
+ i_category: string?,
+ i_manufact_id: int64? ,
+ i_manufact: string?,
+ i_size: string?,
+ i_formulation: string?,
+ i_color: string?,
+ i_units: string?,
+ i_container: string?,
+ i_manager_id: int64?,
+ i_product_name: string?
+}
+
+create dataset store_sales (store_sales_type)
+primary key ss_item_sk, ss_ticket_number;
+
+create dataset item (item_type)
+primary key i_item_sk;
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-6/query-ASTERIXDB-1581-6.2.update.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-6/query-ASTERIXDB-1581-6.2.update.sqlpp
new file mode 100644
index 0000000..bf0a6f7
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-6/query-ASTERIXDB-1581-6.2.update.sqlpp
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use tpcds;
+
+load dataset store_sales using localfs ((`path`=`asterix_nc1://data/tpcds/store_sales.csv`),
+(`format`=`delimited-text`), (`delimiter`=`|`));
+
+load dataset item using localfs ((`path`=`asterix_nc1://data/tpcds/item.csv`),
+(`format`=`delimited-text`), (`delimiter`=`|`));
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-6/query-ASTERIXDB-1581-6.3.query.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-6/query-ASTERIXDB-1581-6.3.query.sqlpp
new file mode 100644
index 0000000..62c1d49
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-6/query-ASTERIXDB-1581-6.3.query.sqlpp
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use tpcds;
+
+// Note: the condition always returns null, hence the case expression
+// should always return the "else" expression.
+select case when (select value count(ss)
+ from store_sales ss
+ where ss_quantity >= 1 and ss_quantity <= 20) > 25437
+ then (select avg(ss_ext_discount_amt)
+ from store_sales
+ where ss_quantity >= 1 and ss_quantity <= 20)
+ else (select avg(ss_net_profit)
+ from store_sales
+ where ss_quantity >= 1 and ss_quantity <= 20)
+ end bucket1
+from item
+where i_item_sk = 1;
\ No newline at end of file
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated-2/query-ASTERIXDB-1581-correlated-2.1.ddl.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated-2/query-ASTERIXDB-1581-correlated-2.1.ddl.sqlpp
new file mode 100644
index 0000000..2e4844e
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated-2/query-ASTERIXDB-1581-correlated-2.1.ddl.sqlpp
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+drop dataverse tpcds if exists;
+create dataverse tpcds;
+
+use tpcds;
+
+create type tpcds.store_sales_type as closed {
+ ss_sold_date_sk: int64?,
+ ss_sold_time_sk: int64?,
+ ss_item_sk: int64,
+ ss_customer_sk: int64?,
+ ss_cdemo_sk: int64?,
+ ss_hdemo_sk: int64?,
+ ss_addr_sk: int64?,
+ ss_store_sk: int64?,
+ ss_promo_sk: int64?,
+ ss_ticket_number: int64,
+ ss_quantity: int64?,
+ ss_wholesale_cost: double?,
+ ss_list_price: double?,
+ ss_sales_price: double?,
+ ss_ext_discount_amt: double?,
+ ss_ext_sales_price: double?,
+ ss_ext_wholesale_cost: double?,
+ ss_ext_list_price: double?,
+ ss_ext_tax: double?,
+ ss_coupon_amt: double?,
+ ss_net_paid: double?,
+ ss_net_paid_inc_tax: double?,
+ ss_net_profit: double?
+}
+
+
+create type tpcds.item_type as closed {
+ i_item_sk: int64,
+ i_item_id: string,
+ i_rec_start_date: string?,
+ i_rec_end_date: string?,
+ i_item_desc: string?,
+ i_current_price: double?,
+ i_wholesale_cost: double?,
+ i_brand_id: int64? ,
+ i_brand: string?,
+ i_class_id: int64? ,
+ i_class: string?,
+ i_category_id: int64? ,
+ i_category: string?,
+ i_manufact_id: int64? ,
+ i_manufact: string?,
+ i_size: string?,
+ i_formulation: string?,
+ i_color: string?,
+ i_units: string?,
+ i_container: string?,
+ i_manager_id: int64?,
+ i_product_name: string?
+}
+
+create dataset store_sales (store_sales_type)
+primary key ss_item_sk, ss_ticket_number;
+
+create dataset item (item_type)
+primary key i_item_sk;
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated-2/query-ASTERIXDB-1581-correlated-2.2.update.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated-2/query-ASTERIXDB-1581-correlated-2.2.update.sqlpp
new file mode 100644
index 0000000..bf0a6f7
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated-2/query-ASTERIXDB-1581-correlated-2.2.update.sqlpp
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use tpcds;
+
+load dataset store_sales using localfs ((`path`=`asterix_nc1://data/tpcds/store_sales.csv`),
+(`format`=`delimited-text`), (`delimiter`=`|`));
+
+load dataset item using localfs ((`path`=`asterix_nc1://data/tpcds/item.csv`),
+(`format`=`delimited-text`), (`delimiter`=`|`));
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated-2/query-ASTERIXDB-1581-correlated-2.3.query.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated-2/query-ASTERIXDB-1581-correlated-2.3.query.sqlpp
new file mode 100644
index 0000000..d2cc049
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated-2/query-ASTERIXDB-1581-correlated-2.3.query.sqlpp
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use tpcds;
+
+// The case expression contains correlated, non-equality condition subqueries.
+select case when (select value count(ss)
+ from store_sales ss
+ where ss_quantity >= item.i_item_sk and ss_quantity <= 20)[0] < 25437
+ then (select avg(ss_ext_discount_amt)
+ from store_sales
+ where ss_quantity >= item.i_item_sk and ss_quantity <= 20)
+ else (select avg(ss_net_profit)
+ from store_sales
+ where ss_quantity >= item.i_item_sk and ss_quantity <= 20)
+ end bucket1
+from item
+where i_item_sk = 1;
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated/query-ASTERIXDB-1581-correlated.1.ddl.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated/query-ASTERIXDB-1581-correlated.1.ddl.sqlpp
new file mode 100644
index 0000000..2e4844e
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated/query-ASTERIXDB-1581-correlated.1.ddl.sqlpp
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+drop dataverse tpcds if exists;
+create dataverse tpcds;
+
+use tpcds;
+
+create type tpcds.store_sales_type as closed {
+ ss_sold_date_sk: int64?,
+ ss_sold_time_sk: int64?,
+ ss_item_sk: int64,
+ ss_customer_sk: int64?,
+ ss_cdemo_sk: int64?,
+ ss_hdemo_sk: int64?,
+ ss_addr_sk: int64?,
+ ss_store_sk: int64?,
+ ss_promo_sk: int64?,
+ ss_ticket_number: int64,
+ ss_quantity: int64?,
+ ss_wholesale_cost: double?,
+ ss_list_price: double?,
+ ss_sales_price: double?,
+ ss_ext_discount_amt: double?,
+ ss_ext_sales_price: double?,
+ ss_ext_wholesale_cost: double?,
+ ss_ext_list_price: double?,
+ ss_ext_tax: double?,
+ ss_coupon_amt: double?,
+ ss_net_paid: double?,
+ ss_net_paid_inc_tax: double?,
+ ss_net_profit: double?
+}
+
+
+create type tpcds.item_type as closed {
+ i_item_sk: int64,
+ i_item_id: string,
+ i_rec_start_date: string?,
+ i_rec_end_date: string?,
+ i_item_desc: string?,
+ i_current_price: double?,
+ i_wholesale_cost: double?,
+ i_brand_id: int64? ,
+ i_brand: string?,
+ i_class_id: int64? ,
+ i_class: string?,
+ i_category_id: int64? ,
+ i_category: string?,
+ i_manufact_id: int64? ,
+ i_manufact: string?,
+ i_size: string?,
+ i_formulation: string?,
+ i_color: string?,
+ i_units: string?,
+ i_container: string?,
+ i_manager_id: int64?,
+ i_product_name: string?
+}
+
+create dataset store_sales (store_sales_type)
+primary key ss_item_sk, ss_ticket_number;
+
+create dataset item (item_type)
+primary key i_item_sk;
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated/query-ASTERIXDB-1581-correlated.2.update.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated/query-ASTERIXDB-1581-correlated.2.update.sqlpp
new file mode 100644
index 0000000..bf0a6f7
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated/query-ASTERIXDB-1581-correlated.2.update.sqlpp
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use tpcds;
+
+load dataset store_sales using localfs ((`path`=`asterix_nc1://data/tpcds/store_sales.csv`),
+(`format`=`delimited-text`), (`delimiter`=`|`));
+
+load dataset item using localfs ((`path`=`asterix_nc1://data/tpcds/item.csv`),
+(`format`=`delimited-text`), (`delimiter`=`|`));
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated/query-ASTERIXDB-1581-correlated.3.query.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated/query-ASTERIXDB-1581-correlated.3.query.sqlpp
new file mode 100644
index 0000000..a46ffe0
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581-correlated/query-ASTERIXDB-1581-correlated.3.query.sqlpp
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use tpcds;
+
+// The case expression contains correlated subqueries.
+select case when (select value count(ss)
+ from store_sales ss
+ where ss_quantity = item.i_item_sk)[0] < 25437
+ then (select avg(ss_ext_discount_amt)
+ from store_sales
+ where ss_quantity = item.i_item_sk)
+ else (select avg(ss_net_profit)
+ from store_sales
+ where ss_quantity = item.i_item_sk)
+ end bucket1
+from item
+where i_item_sk = 2;
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581/query-ASTERIXDB-1581.1.ddl.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581/query-ASTERIXDB-1581.1.ddl.sqlpp
new file mode 100644
index 0000000..2e4844e
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581/query-ASTERIXDB-1581.1.ddl.sqlpp
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+drop dataverse tpcds if exists;
+create dataverse tpcds;
+
+use tpcds;
+
+create type tpcds.store_sales_type as closed {
+ ss_sold_date_sk: int64?,
+ ss_sold_time_sk: int64?,
+ ss_item_sk: int64,
+ ss_customer_sk: int64?,
+ ss_cdemo_sk: int64?,
+ ss_hdemo_sk: int64?,
+ ss_addr_sk: int64?,
+ ss_store_sk: int64?,
+ ss_promo_sk: int64?,
+ ss_ticket_number: int64,
+ ss_quantity: int64?,
+ ss_wholesale_cost: double?,
+ ss_list_price: double?,
+ ss_sales_price: double?,
+ ss_ext_discount_amt: double?,
+ ss_ext_sales_price: double?,
+ ss_ext_wholesale_cost: double?,
+ ss_ext_list_price: double?,
+ ss_ext_tax: double?,
+ ss_coupon_amt: double?,
+ ss_net_paid: double?,
+ ss_net_paid_inc_tax: double?,
+ ss_net_profit: double?
+}
+
+
+create type tpcds.item_type as closed {
+ i_item_sk: int64,
+ i_item_id: string,
+ i_rec_start_date: string?,
+ i_rec_end_date: string?,
+ i_item_desc: string?,
+ i_current_price: double?,
+ i_wholesale_cost: double?,
+ i_brand_id: int64? ,
+ i_brand: string?,
+ i_class_id: int64? ,
+ i_class: string?,
+ i_category_id: int64? ,
+ i_category: string?,
+ i_manufact_id: int64? ,
+ i_manufact: string?,
+ i_size: string?,
+ i_formulation: string?,
+ i_color: string?,
+ i_units: string?,
+ i_container: string?,
+ i_manager_id: int64?,
+ i_product_name: string?
+}
+
+create dataset store_sales (store_sales_type)
+primary key ss_item_sk, ss_ticket_number;
+
+create dataset item (item_type)
+primary key i_item_sk;
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581/query-ASTERIXDB-1581.2.update.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581/query-ASTERIXDB-1581.2.update.sqlpp
new file mode 100644
index 0000000..bf0a6f7
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581/query-ASTERIXDB-1581.2.update.sqlpp
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use tpcds;
+
+load dataset store_sales using localfs ((`path`=`asterix_nc1://data/tpcds/store_sales.csv`),
+(`format`=`delimited-text`), (`delimiter`=`|`));
+
+load dataset item using localfs ((`path`=`asterix_nc1://data/tpcds/item.csv`),
+(`format`=`delimited-text`), (`delimiter`=`|`));
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581/query-ASTERIXDB-1581.3.query.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581/query-ASTERIXDB-1581.3.query.sqlpp
new file mode 100644
index 0000000..cb9a030
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/tpcds/query-ASTERIXDB-1581/query-ASTERIXDB-1581.3.query.sqlpp
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use tpcds;
+
+select case when (select value count(ss)
+ from store_sales ss
+ where ss_quantity >= 1 and ss_quantity <= 20)[0] > 25437
+ then 1.0
+ else 2.0
+ end bucket1
+from item
+where i_item_sk = 1;
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/results/subquery/aggregate_join/aggregate_join.1.adm b/asterixdb/asterix-app/src/test/resources/runtimets/results/subquery/aggregate_join/aggregate_join.1.adm
new file mode 100644
index 0000000..f03669f
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/results/subquery/aggregate_join/aggregate_join.1.adm
@@ -0,0 +1 @@
+{ "min": 0, "s": { "id": 1 } }
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/results/subquery/aggregate_join_external/aggregate_join_external.1.adm b/asterixdb/asterix-app/src/test/resources/runtimets/results/subquery/aggregate_join_external/aggregate_join_external.1.adm
new file mode 100644
index 0000000..9653dbe
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/results/subquery/aggregate_join_external/aggregate_join_external.1.adm
@@ -0,0 +1,5 @@
+{ "min": 1, "c": { "c_custkey": 2, "c_name": "Customer#000000002", "c_address": "XSTf4,NCwDVaWNe6tEgvwfmRchLXak", "c_nationkey": 13, "c_phone": "23-768-687-3665", "c_acctbal": 121.65, "c_mktsegment": "AUTOMOBILE", "c_comment": "l accounts. blithely ironic theodolites integrate boldly: caref" } }
+{ "min": 1, "c": { "c_custkey": 3, "c_name": "Customer#000000003", "c_address": "MG9kdTD2WBHm", "c_nationkey": 1, "c_phone": "11-719-748-3364", "c_acctbal": 7498.12, "c_mktsegment": "AUTOMOBILE", "c_comment": " deposits eat slyly ironic, even instructions. express foxes detect slyly. blithely even accounts abov" } }
+{ "min": 1, "c": { "c_custkey": 4, "c_name": "Customer#000000004", "c_address": "XxVSJsLAGtn", "c_nationkey": 4, "c_phone": "14-128-190-5944", "c_acctbal": 2866.83, "c_mktsegment": "MACHINERY", "c_comment": " requests. final, regular ideas sleep final accou" } }
+{ "min": 1, "c": { "c_custkey": 5, "c_name": "Customer#000000005", "c_address": "KvpyuHCplrB84WgAiGV6sYpZq7Tj", "c_nationkey": 3, "c_phone": "13-750-942-6364", "c_acctbal": 794.47, "c_mktsegment": "HOUSEHOLD", "c_comment": "n accounts will have to unwind. foxes cajole accor" } }
+{ "min": 1, "c": { "c_custkey": 6, "c_name": "Customer#000000006", "c_address": "sKZz0CsnMD7mp4Xd0YrBvx,LREYKUWAh yVn", "c_nationkey": 20, "c_phone": "30-114-968-4951", "c_acctbal": 7638.57, "c_mktsegment": "AUTOMOBILE", "c_comment": "tions. even deposits boost according to the slyly bold packages. final accounts cajole requests. furious" } }
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/results/tpcds/query-ASTERIXDB-1581-2/query-ASTERIXDB-1581-2.1.adm b/asterixdb/asterix-app/src/test/resources/runtimets/results/tpcds/query-ASTERIXDB-1581-2/query-ASTERIXDB-1581-2.1.adm
new file mode 100644
index 0000000..c7a4707
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/results/tpcds/query-ASTERIXDB-1581-2/query-ASTERIXDB-1581-2.1.adm
@@ -0,0 +1 @@
+{ "bucket1": [ { "$1": 2.16 } ] }
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/results/tpcds/query-ASTERIXDB-1581-4/query-ASTERIXDB-1581-4.1.adm b/asterixdb/asterix-app/src/test/resources/runtimets/results/tpcds/query-ASTERIXDB-1581-4/query-ASTERIXDB-1581-4.1.adm
new file mode 100644
index 0000000..92de03a
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/results/tpcds/query-ASTERIXDB-1581-4/query-ASTERIXDB-1581-4.1.adm
@@ -0,0 +1 @@
+{ "bucket1": 2.16 }
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/results/tpcds/query-ASTERIXDB-1581-6/query-ASTERIXDB-1581-6.1.adm b/asterixdb/asterix-app/src/test/resources/runtimets/results/tpcds/query-ASTERIXDB-1581-6/query-ASTERIXDB-1581-6.1.adm
new file mode 100644
index 0000000..0db3ede
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/results/tpcds/query-ASTERIXDB-1581-6/query-ASTERIXDB-1581-6.1.adm
@@ -0,0 +1 @@
+{ "bucket1": [ { "$2": -377.0216666666667 } ] }
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/results/tpcds/query-ASTERIXDB-1581-correlated/query-ASTERIXDB-1581-correlated.1.adm b/asterixdb/asterix-app/src/test/resources/runtimets/results/tpcds/query-ASTERIXDB-1581-correlated/query-ASTERIXDB-1581-correlated.1.adm
new file mode 100644
index 0000000..aad62bb
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/results/tpcds/query-ASTERIXDB-1581-correlated/query-ASTERIXDB-1581-correlated.1.adm
@@ -0,0 +1 @@
+{ "bucket1": [ { "$1": 0.0 } ] }
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/results/tpcds/query-ASTERIXDB-1581/query-ASTERIXDB-1581.1.adm b/asterixdb/asterix-app/src/test/resources/runtimets/results/tpcds/query-ASTERIXDB-1581/query-ASTERIXDB-1581.1.adm
new file mode 100644
index 0000000..faed854
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/results/tpcds/query-ASTERIXDB-1581/query-ASTERIXDB-1581.1.adm
@@ -0,0 +1 @@
+{ "bucket1": 2.0 }
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/testsuite_sqlpp.xml b/asterixdb/asterix-app/src/test/resources/runtimets/testsuite_sqlpp.xml
index a89808e..7fe7c40 100644
--- a/asterixdb/asterix-app/src/test/resources/runtimets/testsuite_sqlpp.xml
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/testsuite_sqlpp.xml
@@ -5671,6 +5671,16 @@
</test-group>
<test-group name="subquery">
<test-case FilePath="subquery">
+ <compilation-unit name="aggregate_join">
+ <output-dir compare="Text">aggregate_join</output-dir>
+ </compilation-unit>
+ </test-case>
+ <test-case FilePath="subquery">
+ <compilation-unit name="aggregate_join_external">
+ <output-dir compare="Text">aggregate_join_external</output-dir>
+ </compilation-unit>
+ </test-case>
+ <test-case FilePath="subquery">
<compilation-unit name="gby_inline">
<output-dir compare="Text">gby_inline</output-dir>
</compilation-unit>
@@ -5871,6 +5881,46 @@
</compilation-unit>
</test-case>
<test-case FilePath="tpcds">
+ <compilation-unit name="query-ASTERIXDB-1581">
+ <output-dir compare="Text">query-ASTERIXDB-1581</output-dir>
+ </compilation-unit>
+ </test-case>
+ <test-case FilePath="tpcds">
+ <compilation-unit name="query-ASTERIXDB-1581-2">
+ <output-dir compare="Text">query-ASTERIXDB-1581-2</output-dir>
+ </compilation-unit>
+ </test-case>
+ <test-case FilePath="tpcds">
+ <compilation-unit name="query-ASTERIXDB-1581-3">
+ <output-dir compare="Text">query-ASTERIXDB-1581-2</output-dir>
+ </compilation-unit>
+ </test-case>
+ <test-case FilePath="tpcds">
+ <compilation-unit name="query-ASTERIXDB-1581-4">
+ <output-dir compare="Text">query-ASTERIXDB-1581-4</output-dir>
+ </compilation-unit>
+ </test-case>
+ <test-case FilePath="tpcds">
+ <compilation-unit name="query-ASTERIXDB-1581-5">
+ <output-dir compare="Text">query-ASTERIXDB-1581-2</output-dir>
+ </compilation-unit>
+ </test-case>
+ <test-case FilePath="tpcds">
+ <compilation-unit name="query-ASTERIXDB-1581-6">
+ <output-dir compare="Text">query-ASTERIXDB-1581-6</output-dir>
+ </compilation-unit>
+ </test-case>
+ <test-case FilePath="tpcds">
+ <compilation-unit name="query-ASTERIXDB-1581-correlated">
+ <output-dir compare="Text">query-ASTERIXDB-1581-correlated</output-dir>
+ </compilation-unit>
+ </test-case>
+ <test-case FilePath="tpcds">
+ <compilation-unit name="query-ASTERIXDB-1581-correlated-2">
+ <output-dir compare="Text">query-ASTERIXDB-1581-2</output-dir>
+ </compilation-unit>
+ </test-case>
+ <test-case FilePath="tpcds">
<compilation-unit name="query-ASTERIXDB-1596">
<output-dir compare="Text">query-ASTERIXDB-1596</output-dir>
</compilation-unit>
diff --git a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/declared/AqlDataSource.java b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/declared/AqlDataSource.java
index d406108..6db28c5 100644
--- a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/declared/AqlDataSource.java
+++ b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/declared/AqlDataSource.java
@@ -77,6 +77,7 @@
return schemaTypes;
}
+ @Override
public INodeDomain getDomain() {
return domain;
}
diff --git a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/metadata/IDataSource.java b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/metadata/IDataSource.java
index 63ce5fa..e15b699 100644
--- a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/metadata/IDataSource.java
+++ b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/metadata/IDataSource.java
@@ -22,6 +22,7 @@
import org.apache.hyracks.algebricks.core.algebra.base.LogicalVariable;
import org.apache.hyracks.algebricks.core.algebra.properties.FunctionalDependency;
+import org.apache.hyracks.algebricks.core.algebra.properties.INodeDomain;
public interface IDataSource<T> {
public T getId();
@@ -34,4 +35,6 @@
// https://issues.apache.org/jira/browse/ASTERIXDB-1619
public boolean isScanAccessPathALeaf();
+
+ public INodeDomain getDomain();
}
diff --git a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/logical/visitors/CardinalityInferenceVisitor.java b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/logical/visitors/CardinalityInferenceVisitor.java
index 8d74de3..ae98d05 100644
--- a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/logical/visitors/CardinalityInferenceVisitor.java
+++ b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/logical/visitors/CardinalityInferenceVisitor.java
@@ -18,9 +18,16 @@
*/
package org.apache.hyracks.algebricks.core.algebra.operators.logical.visitors;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
import org.apache.commons.lang3.mutable.Mutable;
import org.apache.hyracks.algebricks.common.exceptions.AlgebricksException;
+import org.apache.hyracks.algebricks.core.algebra.base.ILogicalExpression;
import org.apache.hyracks.algebricks.core.algebra.base.ILogicalOperator;
+import org.apache.hyracks.algebricks.core.algebra.base.LogicalVariable;
+import org.apache.hyracks.algebricks.core.algebra.expressions.ConstantExpression;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.AggregateOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.AssignOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.DataSourceScanOperator;
@@ -36,11 +43,11 @@
import org.apache.hyracks.algebricks.core.algebra.operators.logical.IntersectOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.LeftOuterJoinOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.LeftOuterUnnestMapOperator;
+import org.apache.hyracks.algebricks.core.algebra.operators.logical.LeftOuterUnnestOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.LimitOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.MaterializeOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.NestedTupleSourceOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.OrderOperator;
-import org.apache.hyracks.algebricks.core.algebra.operators.logical.LeftOuterUnnestOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.PartitioningSplitOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.ProjectOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.ReplicateOperator;
@@ -58,13 +65,29 @@
import org.apache.hyracks.algebricks.core.algebra.visitors.ILogicalOperatorVisitor;
/**
- * A visitor that provides the basic inference of tuple cardinalities of an
- * operator's output. There are only two cases: 1. the cardinality is one in the
- * worst case; 2. the cardinality is some unknown value.
+ * A visitor that provides the basic, static inference of tuple cardinalities of an
+ * operator's output. There are only three cases:
+ * <p/>
+ * 1. ZERO_OR_ONE: the cardinality is either zero or one.
+ * <p/>
+ * 2. ONE: the cardinality is exactly one in any case;
+ * <p/>
+ * 3. ZERO_OR_ONE_GROUP: if we group output tuples of the operator by any variable in <code>keyVariables</code>, it will
+ * result in zero or one group;
+ * <p/>
+ * 4. ONE_GROUP: if we group output tuples of the operator by any variable in <code>keyVariables</code>, it will
+ * result in exact one group;
+ * <p/>
+ * 5. the cardinality is some unknown value.
*/
public class CardinalityInferenceVisitor implements ILogicalOperatorVisitor<Long, Void> {
- private static final Long ONE = 1L;
- private static final Long UNKNOWN = 1000L;
+ private static final long ZERO_OR_ONE = 0L;
+ private static final long ONE = 1L;
+ private static final long ZERO_OR_ONE_GROUP = 2L;
+ private static final long ONE_GROUP = 3L;
+ private static final long UNKNOWN = 1000L;
+
+ private final Set<LogicalVariable> keyVariables = new HashSet<>();
@Override
public Long visitAggregateOperator(AggregateOperator op, Void arg) throws AlgebricksException {
@@ -78,29 +101,43 @@
@Override
public Long visitEmptyTupleSourceOperator(EmptyTupleSourceOperator op, Void arg) throws AlgebricksException {
- // Empty tuple source sends one empty tuple to kick off the pipeline.
+ // Empty tuple source operator sends an empty tuple to downstream operators.
return ONE;
}
@Override
public Long visitGroupByOperator(GroupByOperator op, Void arg) throws AlgebricksException {
- return op.getInputs().get(0).getValue().accept(this, arg);
+ ILogicalOperator inputOp = op.getInputs().get(0).getValue();
+ long inputCardinality = inputOp.accept(this, arg);
+ List<LogicalVariable> gbyVar = op.getGbyVarList();
+ if (inputCardinality == ONE_GROUP && keyVariables.containsAll(gbyVar)) {
+ keyVariables.clear();
+ return ONE;
+ }
+ if (inputCardinality == ZERO_OR_ONE_GROUP && keyVariables.containsAll(gbyVar)) {
+ keyVariables.clear();
+ return ZERO_OR_ONE;
+ }
+ // ZERO_OR_ONE, ONE, ZERO_OR_ONE_GROUP, ONE_GROUP, OR UNKNOWN
+ return inputCardinality;
}
@Override
public Long visitLimitOperator(LimitOperator op, Void arg) throws AlgebricksException {
- // This is only a worst-case estimate
- return op.getInputs().get(0).getValue().accept(this, arg);
+ return adjustCardinalityForTupleReductionOperator(op.getInputs().get(0).getValue().accept(this, arg));
}
@Override
public Long visitInnerJoinOperator(InnerJoinOperator op, Void arg) throws AlgebricksException {
- return visitJoin(op, arg);
+ // Inner join can have 0 to M * N tuples, where M is the cardinality of the left input branch
+ // and N is the cardinality of the right input branch.
+ // We only provide inference for the case the JOIN condition is TRUE.
+ return visitInnerJoin(op, arg);
}
@Override
public Long visitLeftOuterJoinOperator(LeftOuterJoinOperator op, Void arg) throws AlgebricksException {
- return visitJoin(op, arg);
+ return visitLeftOuterUnnest(op, arg);
}
@Override
@@ -120,8 +157,7 @@
@Override
public Long visitSelectOperator(SelectOperator op, Void arg) throws AlgebricksException {
- // This is only a worst-case inference.
- return op.getInputs().get(0).getValue().accept(this, arg);
+ return adjustCardinalityForTupleReductionOperator(op.getInputs().get(0).getValue().accept(this, arg));
}
@Override
@@ -131,6 +167,7 @@
@Override
public Long visitProjectOperator(ProjectOperator op, Void arg) throws AlgebricksException {
+ keyVariables.retainAll(op.getVariables()); // Only returns live variables.
return op.getInputs().get(0).getValue().accept(this, arg);
}
@@ -176,7 +213,7 @@
@Override
public Long visitLeftOuterUnnestOperator(LeftOuterUnnestOperator op, Void arg) throws AlgebricksException {
- return UNKNOWN;
+ return visitLeftOuterUnnest(op, arg);
}
@Override
@@ -186,7 +223,7 @@
@Override
public Long visitLeftOuterUnnestMapOperator(LeftOuterUnnestMapOperator op, Void arg) throws AlgebricksException {
- return UNKNOWN;
+ return visitLeftOuterUnnest(op, arg);
}
@Override
@@ -239,23 +276,96 @@
public Long visitIntersectOperator(IntersectOperator op, Void arg) throws AlgebricksException {
long cardinality = UNKNOWN;
for (Mutable<ILogicalOperator> inputOpRef : op.getInputs()) {
- Long branchCardinality = inputOpRef.getValue().accept(this, arg);
- if (branchCardinality < cardinality) {
- cardinality = branchCardinality;
+ long inputCardinality = inputOpRef.getValue().accept(this, arg);
+ if (inputCardinality <= ONE) {
+ return ZERO_OR_ONE;
+ }
+ if (inputCardinality == ZERO_OR_ONE_GROUP || inputCardinality == ONE_GROUP) {
+ cardinality = ZERO_OR_ONE_GROUP;
}
}
return cardinality;
}
- private long visitJoin(ILogicalOperator op, Void arg) throws AlgebricksException {
- long cardinality = 1L;
- for (Mutable<ILogicalOperator> inputOpRef : op.getInputs()) {
- cardinality *= inputOpRef.getValue().accept(this, arg);
+ // Visits an operator that has the left outer semantics, e.g.,
+ // left outer join, left outer unnest, left outer unnest map.
+ private long visitLeftOuterUnnest(ILogicalOperator operator, Void arg) throws AlgebricksException {
+ ILogicalOperator left = operator.getInputs().get(0).getValue();
+ long leftCardinality = left.accept(this, arg);
+ if (leftCardinality == ONE) {
+ keyVariables.clear();
+ VariableUtilities.getLiveVariables(left, keyVariables);
+ return ONE_GROUP;
}
- if (cardinality > ONE) {
- cardinality = UNKNOWN;
+ if (leftCardinality == ZERO_OR_ONE) {
+ keyVariables.clear();
+ VariableUtilities.getLiveVariables(left, keyVariables);
+ return ZERO_OR_ONE_GROUP;
}
- return cardinality;
+ // ZERO_OR_ONE_GROUP, ONE_GROUP (maintained from the left branch) or UNKNOWN.
+ return leftCardinality;
+ }
+
+ // Visits an inner join operator, particularly, deals with the case the join is a cartesian product.
+ private long visitInnerJoin(InnerJoinOperator joinOperator, Void arg) throws AlgebricksException {
+ ILogicalExpression conditionExpr = joinOperator.getCondition().getValue();
+ if (!conditionExpr.equals(ConstantExpression.TRUE)) {
+ // Currently we are not able to estimate for more general join conditions.
+ return UNKNOWN;
+ }
+ Set<LogicalVariable> newKeyVars = new HashSet<>();
+
+ // Visits the left branch and adds left key variables (if the left branch delivers one group).
+ ILogicalOperator left = joinOperator.getInputs().get(0).getValue();
+ long leftCardinality = left.accept(this, arg);
+ newKeyVars.addAll(keyVariables);
+
+ // Visits the right branch and adds right key variables (if the right branch delivers one group).
+ ILogicalOperator right = joinOperator.getInputs().get(1).getValue();
+ long rightCardinality = right.accept(this, arg);
+ newKeyVars.addAll(keyVariables);
+
+ // If any branch has carinality zero or one, the result will have cardinality zero or one.
+ if (leftCardinality == ZERO_OR_ONE && rightCardinality == ZERO_OR_ONE) {
+ return ZERO_OR_ONE;
+ }
+
+ // If both branches has cardinality one, the result for sure has cardinality one.
+ if (leftCardinality == ONE && rightCardinality == ONE) {
+ return ONE;
+ }
+
+ keyVariables.clear();
+ // If one branch has cardinality zero_or_one, the result for sure has cardinality one.
+ if (leftCardinality == ZERO_OR_ONE || rightCardinality == ZERO_OR_ONE) {
+ VariableUtilities.getLiveVariables(leftCardinality == ONE ? left : right, keyVariables);
+ return ZERO_OR_ONE_GROUP;
+ }
+
+ // If one branch has cardinality one, the result has one group.
+ if (leftCardinality == ONE || rightCardinality == ONE) {
+ VariableUtilities.getLiveVariables(leftCardinality == ONE ? left : right, keyVariables);
+ return ONE_GROUP;
+ }
+
+ // If one branch has zero or one group, the result has one zero or one group.
+ if (leftCardinality == ONE_GROUP || rightCardinality == ONE_GROUP || leftCardinality == ZERO_OR_ONE_GROUP
+ || rightCardinality == ZERO_OR_ONE_GROUP) {
+ keyVariables.addAll(newKeyVars);
+ return Math.min(leftCardinality, rightCardinality);
+ }
+ return UNKNOWN;
+ }
+
+ // For operators including SELECT and LIMIT.
+ private long adjustCardinalityForTupleReductionOperator(long inputCardinality) {
+ if (inputCardinality == ONE) {
+ return ZERO_OR_ONE;
+ }
+ if (inputCardinality == ONE_GROUP) {
+ return ZERO_OR_ONE_GROUP;
+ }
+ return inputCardinality;
}
}
diff --git a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/logical/visitors/IsomorphismVariableMappingVisitor.java b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/logical/visitors/IsomorphismVariableMappingVisitor.java
index 7909499..965008c 100644
--- a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/logical/visitors/IsomorphismVariableMappingVisitor.java
+++ b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/logical/visitors/IsomorphismVariableMappingVisitor.java
@@ -22,8 +22,8 @@
import java.util.HashSet;
import java.util.List;
import java.util.Map;
-import java.util.Map.Entry;
import java.util.Set;
+import java.util.Map.Entry;
import org.apache.commons.lang3.mutable.Mutable;
import org.apache.commons.lang3.mutable.MutableObject;
@@ -293,10 +293,13 @@
}
private void mapChildren(ILogicalOperator op, ILogicalOperator opArg) throws AlgebricksException {
+ if (op.getOperatorTag() != opArg.getOperatorTag()) {
+ return;
+ }
List<Mutable<ILogicalOperator>> inputs = op.getInputs();
List<Mutable<ILogicalOperator>> inputsArg = opArg.getInputs();
if (inputs.size() != inputsArg.size()) {
- throw new AlgebricksException("children are not isomoprhic");
+ return;
}
for (int i = 0; i < inputs.size(); i++) {
ILogicalOperator input = inputs.get(i).getValue();
@@ -306,8 +309,11 @@
}
private void mapVariables(ILogicalOperator left, ILogicalOperator right) throws AlgebricksException {
- List<LogicalVariable> producedVarLeft = new ArrayList<LogicalVariable>();
- List<LogicalVariable> producedVarRight = new ArrayList<LogicalVariable>();
+ if (left.getOperatorTag() != right.getOperatorTag()) {
+ return;
+ }
+ List<LogicalVariable> producedVarLeft = new ArrayList<>();
+ List<LogicalVariable> producedVarRight = new ArrayList<>();
VariableUtilities.getProducedVariables(left, producedVarLeft);
VariableUtilities.getProducedVariables(right, producedVarRight);
mapVariables(producedVarLeft, producedVarRight);
@@ -327,6 +333,9 @@
private void mapVariablesForAbstractAssign(ILogicalOperator left, ILogicalOperator right)
throws AlgebricksException {
+ if (left.getOperatorTag() != right.getOperatorTag()) {
+ return;
+ }
AbstractAssignOperator leftOp = (AbstractAssignOperator) left;
AbstractAssignOperator rightOp = (AbstractAssignOperator) right;
List<LogicalVariable> producedVarLeft = new ArrayList<LogicalVariable>();
@@ -338,6 +347,9 @@
}
private void mapVariablesForGroupBy(ILogicalOperator left, ILogicalOperator right) throws AlgebricksException {
+ if (left.getOperatorTag() != right.getOperatorTag()) {
+ return;
+ }
GroupByOperator leftOp = (GroupByOperator) left;
GroupByOperator rightOp = (GroupByOperator) right;
List<Pair<LogicalVariable, Mutable<ILogicalExpression>>> leftPairs = leftOp.getGroupByList();
@@ -416,6 +428,9 @@
}
private void mapVariablesStandard(ILogicalOperator op, ILogicalOperator arg) throws AlgebricksException {
+ if (op.getOperatorTag() != arg.getOperatorTag()) {
+ return;
+ }
mapChildren(op, arg);
mapVariables(op, arg);
}
@@ -429,6 +444,9 @@
}
private void mapVariablesForUnion(ILogicalOperator op, ILogicalOperator arg) {
+ if (op.getOperatorTag() != arg.getOperatorTag()) {
+ return;
+ }
UnionAllOperator union = (UnionAllOperator) op;
UnionAllOperator unionArg = (UnionAllOperator) arg;
mapVarTripleList(union.getVariableMappings(), unionArg.getVariableMappings());
@@ -456,6 +474,9 @@
}
private void mapVariablesForIntersect(IntersectOperator op, ILogicalOperator arg) {
+ if (op.getOperatorTag() != arg.getOperatorTag()) {
+ return;
+ }
IntersectOperator opArg = (IntersectOperator) arg;
if (op.getNumInput() != opArg.getNumInput()) {
return;
diff --git a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/logical/visitors/LogicalOperatorDeepCopyWithNewVariablesVisitor.java b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/logical/visitors/LogicalOperatorDeepCopyWithNewVariablesVisitor.java
index 2c4dacf..54acf2f 100644
--- a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/logical/visitors/LogicalOperatorDeepCopyWithNewVariablesVisitor.java
+++ b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/logical/visitors/LogicalOperatorDeepCopyWithNewVariablesVisitor.java
@@ -19,7 +19,7 @@
package org.apache.hyracks.algebricks.core.algebra.operators.logical.visitors;
import java.util.ArrayList;
-import java.util.HashMap;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@@ -47,12 +47,11 @@
import org.apache.hyracks.algebricks.core.algebra.operators.logical.IntersectOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.LeftOuterJoinOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.LeftOuterUnnestMapOperator;
+import org.apache.hyracks.algebricks.core.algebra.operators.logical.LeftOuterUnnestOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.LimitOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.MaterializeOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.NestedTupleSourceOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.OrderOperator;
-import org.apache.hyracks.algebricks.core.algebra.operators.logical.OrderOperator.IOrder;
-import org.apache.hyracks.algebricks.core.algebra.operators.logical.LeftOuterUnnestOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.PartitioningSplitOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.ProjectOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.ReplicateOperator;
@@ -64,6 +63,7 @@
import org.apache.hyracks.algebricks.core.algebra.operators.logical.UnionAllOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.UnnestMapOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.UnnestOperator;
+import org.apache.hyracks.algebricks.core.algebra.operators.logical.OrderOperator.IOrder;
import org.apache.hyracks.algebricks.core.algebra.plan.ALogicalPlanImpl;
import org.apache.hyracks.algebricks.core.algebra.properties.FunctionalDependency;
import org.apache.hyracks.algebricks.core.algebra.typing.ITypingContext;
@@ -83,39 +83,43 @@
// Key: Variable in the original plan. Value: New variable replacing the
// original one in the copied plan.
- private final Map<LogicalVariable, LogicalVariable> inputVarToOutputVarMapping;
+ private final LinkedHashMap<LogicalVariable, LogicalVariable> inputVarToOutputVarMapping;
// Key: New variable in the new plan. Value: The old variable in the
// original plan.
- private final Map<LogicalVariable, LogicalVariable> outputVarToInputVarMapping;
+ private final LinkedHashMap<LogicalVariable, LogicalVariable> outputVarToInputVarMapping;
/**
- * @param IOptimizationContext
- * , the optimization context
+ * @param varContext
+ * , the variable context.
+ * @param typeContext
+ * the type context.
*/
public LogicalOperatorDeepCopyWithNewVariablesVisitor(IVariableContext varContext, ITypingContext typeContext) {
this.varContext = varContext;
this.typeContext = typeContext;
- this.inputVarToOutputVarMapping = new HashMap<>();
- this.outputVarToInputVarMapping = new HashMap<>();
+ this.inputVarToOutputVarMapping = new LinkedHashMap<>();
+ this.outputVarToInputVarMapping = new LinkedHashMap<>();
this.exprDeepCopyVisitor = new LogicalExpressionDeepCopyWithNewVariablesVisitor(varContext,
outputVarToInputVarMapping, inputVarToOutputVarMapping);
}
/**
- * @param IOptimizationContext
- * the optimization context
+ * @param varContext
+ * , the variable context.
+ * @param typeContext
+ * the type context.
* @param inVarMapping
* Variable mapping keyed by variables in the original plan.
* Those variables are replaced by their corresponding value in
* the map in the copied plan.
*/
public LogicalOperatorDeepCopyWithNewVariablesVisitor(IVariableContext varContext, ITypingContext typeContext,
- Map<LogicalVariable, LogicalVariable> inVarMapping) {
+ LinkedHashMap<LogicalVariable, LogicalVariable> inVarMapping) {
this.varContext = varContext;
this.typeContext = typeContext;
this.inputVarToOutputVarMapping = inVarMapping;
- this.outputVarToInputVarMapping = new HashMap<>();
+ this.outputVarToInputVarMapping = new LinkedHashMap<>();
exprDeepCopyVisitor = new LogicalExpressionDeepCopyWithNewVariablesVisitor(varContext, inVarMapping,
inputVarToOutputVarMapping);
}
@@ -560,11 +564,11 @@
return opCopy;
}
- public Map<LogicalVariable, LogicalVariable> getOutputToInputVariableMapping() {
+ public LinkedHashMap<LogicalVariable, LogicalVariable> getOutputToInputVariableMapping() {
return outputVarToInputVarMapping;
}
- public Map<LogicalVariable, LogicalVariable> getInputToOutputVariableMapping() {
+ public LinkedHashMap<LogicalVariable, LogicalVariable> getInputToOutputVariableMapping() {
return inputVarToOutputVarMapping;
}
diff --git a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/logical/visitors/VariableUtilities.java b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/logical/visitors/VariableUtilities.java
index 8859c03..7221e81 100644
--- a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/logical/visitors/VariableUtilities.java
+++ b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/logical/visitors/VariableUtilities.java
@@ -20,10 +20,11 @@
import java.util.Collection;
import java.util.HashSet;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
-import java.util.Map.Entry;
import java.util.Set;
+import java.util.Map.Entry;
import org.apache.commons.lang3.mutable.Mutable;
import org.apache.hyracks.algebricks.common.exceptions.AlgebricksException;
@@ -111,7 +112,25 @@
substituteVariables(op, v1, v2, true, ctx);
}
- public static void substituteVariables(ILogicalOperator op, Map<LogicalVariable, LogicalVariable> varMap,
+ /**
+ * Substitute variable references inside an operator according to a variable map.
+ *
+ * @param op,
+ * the operator for variable substitution.
+ * @param varMap,
+ * a map that maps old variables to new variables. Note that we enforce
+ * the map to be a LinkedHashMap so as to be able to perform recursive
+ * variable substitution within one pass. The order of variable substitution
+ * is the entry insertion order of the LinkedHashMap. Typically, the LinkedHashMap
+ * is populated by recursively, bottom-up traversing of the query plan.
+ * For example, if $1->$2 and $2->$3 are orderly inserted into the map, $1 will be
+ * replaced by $3 in the outcome of this method call.
+ *
+ * @param ctx,
+ * a typing context that keeps track of types of each variable.
+ * @throws AlgebricksException
+ */
+ public static void substituteVariables(ILogicalOperator op, LinkedHashMap<LogicalVariable, LogicalVariable> varMap,
ITypingContext ctx) throws AlgebricksException {
for (Map.Entry<LogicalVariable, LogicalVariable> entry : varMap.entrySet()) {
VariableUtilities.substituteVariables(op, entry.getKey(), entry.getValue(), ctx);
diff --git a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/physical/DataSourceScanPOperator.java b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/physical/DataSourceScanPOperator.java
index a087305..4a5ac5a 100644
--- a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/physical/DataSourceScanPOperator.java
+++ b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/physical/DataSourceScanPOperator.java
@@ -26,6 +26,7 @@
import org.apache.hyracks.algebricks.core.algebra.base.IHyracksJobBuilder;
import org.apache.hyracks.algebricks.core.algebra.base.ILogicalOperator;
import org.apache.hyracks.algebricks.core.algebra.base.IOptimizationContext;
+import org.apache.hyracks.algebricks.core.algebra.base.LogicalOperatorTag;
import org.apache.hyracks.algebricks.core.algebra.base.LogicalVariable;
import org.apache.hyracks.algebricks.core.algebra.base.PhysicalOperatorTag;
import org.apache.hyracks.algebricks.core.algebra.expressions.IVariableTypeEnvironment;
@@ -34,6 +35,12 @@
import org.apache.hyracks.algebricks.core.algebra.metadata.IMetadataProvider;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.DataSourceScanOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.IOperatorSchema;
+import org.apache.hyracks.algebricks.core.algebra.properties.BroadcastPartitioningProperty;
+import org.apache.hyracks.algebricks.core.algebra.properties.INodeDomain;
+import org.apache.hyracks.algebricks.core.algebra.properties.IPartitioningRequirementsCoordinator;
+import org.apache.hyracks.algebricks.core.algebra.properties.IPhysicalPropertiesVector;
+import org.apache.hyracks.algebricks.core.algebra.properties.PhysicalRequirements;
+import org.apache.hyracks.algebricks.core.algebra.properties.StructuralPropertiesVector;
import org.apache.hyracks.algebricks.core.jobgen.impl.JobGenContext;
import org.apache.hyracks.api.dataflow.IOperatorDescriptor;
@@ -73,6 +80,24 @@
deliveredProperties = dspp.computePropertiesVector(dssOp.getVariables());
}
+ @Override
+ public PhysicalRequirements getRequiredPropertiesForChildren(ILogicalOperator op,
+ IPhysicalPropertiesVector reqdByParent, IOptimizationContext context) {
+ if (op.getInputs().isEmpty()) {
+ return emptyUnaryRequirements();
+ }
+ ILogicalOperator childOp = op.getInputs().get(0).getValue();
+ // Empty tuple source is a special case that can be partitioned in the same way as the data scan.
+ if (childOp.getOperatorTag() == LogicalOperatorTag.EMPTYTUPLESOURCE) {
+ return emptyUnaryRequirements();
+ }
+ INodeDomain domain = dataSource.getDomain();
+ return new PhysicalRequirements(
+ new StructuralPropertiesVector[] {
+ new StructuralPropertiesVector(new BroadcastPartitioningProperty(domain), null) },
+ IPartitioningRequirementsCoordinator.NO_COORDINATION);
+ }
+
@SuppressWarnings("unchecked")
@Override
public void contributeRuntimeOperator(IHyracksJobBuilder builder, JobGenContext context, ILogicalOperator op,
diff --git a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/physical/EmptyTupleSourcePOperator.java b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/physical/EmptyTupleSourcePOperator.java
index dcb7e15..0718d13 100644
--- a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/physical/EmptyTupleSourcePOperator.java
+++ b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/physical/EmptyTupleSourcePOperator.java
@@ -26,7 +26,6 @@
import org.apache.hyracks.algebricks.core.algebra.base.IOptimizationContext;
import org.apache.hyracks.algebricks.core.algebra.base.PhysicalOperatorTag;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.IOperatorSchema;
-import org.apache.hyracks.algebricks.core.algebra.properties.ILocalStructuralProperty;
import org.apache.hyracks.algebricks.core.algebra.properties.IPartitioningProperty;
import org.apache.hyracks.algebricks.core.algebra.properties.IPhysicalPropertiesVector;
import org.apache.hyracks.algebricks.core.algebra.properties.PhysicalRequirements;
@@ -50,8 +49,7 @@
@Override
public void computeDeliveredProperties(ILogicalOperator op, IOptimizationContext context) {
- deliveredProperties = new StructuralPropertiesVector(IPartitioningProperty.UNPARTITIONED,
- new LinkedList<ILocalStructuralProperty>());
+ deliveredProperties = new StructuralPropertiesVector(IPartitioningProperty.UNPARTITIONED, new LinkedList<>());
}
@Override
diff --git a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/util/OperatorManipulationUtil.java b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/util/OperatorManipulationUtil.java
index 97aa853..4437822 100644
--- a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/util/OperatorManipulationUtil.java
+++ b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/util/OperatorManipulationUtil.java
@@ -86,7 +86,8 @@
AbstractLogicalOperator currentOp = op;
while (currentOp.getInputs().size() == 1) {
AbstractLogicalOperator child = (AbstractLogicalOperator) currentOp.getInputs().get(0).getValue();
- if (child.getOperatorTag() == LogicalOperatorTag.EXCHANGE) {
+ // Empty tuple source is a special case that can be partitioned in the same way as the data scan.
+ if (child.getOperatorTag() != LogicalOperatorTag.EMPTYTUPLESOURCE) {
break;
}
child.setExecutionMode(AbstractLogicalOperator.ExecutionMode.PARTITIONED);
diff --git a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/util/OperatorPropertiesUtil.java b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/util/OperatorPropertiesUtil.java
index 353a782..87ea627 100644
--- a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/util/OperatorPropertiesUtil.java
+++ b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/util/OperatorPropertiesUtil.java
@@ -263,11 +263,16 @@
return false;
}
- public static boolean isCardinalityOne(ILogicalOperator operator) throws AlgebricksException {
+ public static boolean isCardinalityExactOne(ILogicalOperator operator) throws AlgebricksException {
CardinalityInferenceVisitor visitor = new CardinalityInferenceVisitor();
return operator.accept(visitor, null) == 1L;
}
+ public static boolean isCardinalityZeroOrOne(ILogicalOperator operator) throws AlgebricksException {
+ CardinalityInferenceVisitor visitor = new CardinalityInferenceVisitor();
+ return operator.accept(visitor, null) <= 1;
+ }
+
/**
* Whether an operator can be moved around in the query plan.
*
diff --git a/hyracks-fullstack/algebricks/algebricks-examples/piglet-example/src/main/java/org/apache/hyracks/algebricks/examples/piglet/metadata/PigletFileDataSource.java b/hyracks-fullstack/algebricks/algebricks-examples/piglet-example/src/main/java/org/apache/hyracks/algebricks/examples/piglet/metadata/PigletFileDataSource.java
index 5b60022..cb62a34 100644
--- a/hyracks-fullstack/algebricks/algebricks-examples/piglet-example/src/main/java/org/apache/hyracks/algebricks/examples/piglet/metadata/PigletFileDataSource.java
+++ b/hyracks-fullstack/algebricks/algebricks-examples/piglet-example/src/main/java/org/apache/hyracks/algebricks/examples/piglet/metadata/PigletFileDataSource.java
@@ -27,6 +27,7 @@
import org.apache.hyracks.algebricks.core.algebra.properties.FileSplitDomain;
import org.apache.hyracks.algebricks.core.algebra.properties.FunctionalDependency;
import org.apache.hyracks.algebricks.core.algebra.properties.ILocalStructuralProperty;
+import org.apache.hyracks.algebricks.core.algebra.properties.INodeDomain;
import org.apache.hyracks.algebricks.core.algebra.properties.IPhysicalPropertiesVector;
import org.apache.hyracks.algebricks.core.algebra.properties.RandomPartitioningProperty;
import org.apache.hyracks.algebricks.core.algebra.properties.StructuralPropertiesVector;
@@ -53,6 +54,12 @@
return vec;
}
};
+
+ }
+
+ @Override
+ public INodeDomain getDomain() {
+ return new FileSplitDomain(fileSplits);
}
@Override
diff --git a/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/EnforceStructuralPropertiesRule.java b/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/EnforceStructuralPropertiesRule.java
index d07544b..93c9b77 100644
--- a/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/EnforceStructuralPropertiesRule.java
+++ b/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/EnforceStructuralPropertiesRule.java
@@ -68,10 +68,8 @@
import org.apache.hyracks.algebricks.core.algebra.prettyprint.PlanPrettyPrinter;
import org.apache.hyracks.algebricks.core.algebra.properties.FunctionalDependency;
import org.apache.hyracks.algebricks.core.algebra.properties.ILocalStructuralProperty;
-import org.apache.hyracks.algebricks.core.algebra.properties.ILocalStructuralProperty.PropertyType;
import org.apache.hyracks.algebricks.core.algebra.properties.INodeDomain;
import org.apache.hyracks.algebricks.core.algebra.properties.IPartitioningProperty;
-import org.apache.hyracks.algebricks.core.algebra.properties.IPartitioningProperty.PartitioningType;
import org.apache.hyracks.algebricks.core.algebra.properties.IPartitioningRequirementsCoordinator;
import org.apache.hyracks.algebricks.core.algebra.properties.IPhysicalPropertiesVector;
import org.apache.hyracks.algebricks.core.algebra.properties.LocalGroupingProperty;
@@ -83,6 +81,8 @@
import org.apache.hyracks.algebricks.core.algebra.properties.RandomPartitioningProperty;
import org.apache.hyracks.algebricks.core.algebra.properties.StructuralPropertiesVector;
import org.apache.hyracks.algebricks.core.algebra.properties.UnorderedPartitionedProperty;
+import org.apache.hyracks.algebricks.core.algebra.properties.ILocalStructuralProperty.PropertyType;
+import org.apache.hyracks.algebricks.core.algebra.properties.IPartitioningProperty.PartitioningType;
import org.apache.hyracks.algebricks.core.algebra.util.OperatorPropertiesUtil;
import org.apache.hyracks.algebricks.core.config.AlgebricksConfig;
import org.apache.hyracks.algebricks.core.rewriter.base.IAlgebraicRewriteRule;
@@ -428,15 +428,18 @@
IPhysicalPropertiesVector diffPropertiesVector, IPhysicalPropertiesVector required,
IPhysicalPropertiesVector deliveredByChild, INodeDomain domain, boolean nestedPlan,
IOptimizationContext context) throws AlgebricksException {
-
IPartitioningProperty pp = diffPropertiesVector.getPartitioningProperty();
if (pp == null || pp.getPartitioningType() == PartitioningType.UNPARTITIONED) {
addLocalEnforcers(op, childIndex, diffPropertiesVector.getLocalProperties(), nestedPlan, context);
IPhysicalPropertiesVector deliveredByNewChild =
((AbstractLogicalOperator) op.getInputs().get(0).getValue()).getDeliveredPhysicalProperties();
- addPartitioningEnforcers(op, childIndex, pp, required, deliveredByNewChild, domain, context);
+ if (!nestedPlan) {
+ addPartitioningEnforcers(op, childIndex, pp, required, deliveredByNewChild, domain, context);
+ }
} else {
- addPartitioningEnforcers(op, childIndex, pp, required, deliveredByChild, pp.getNodeDomain(), context);
+ if (!nestedPlan) {
+ addPartitioningEnforcers(op, childIndex, pp, required, deliveredByChild, pp.getNodeDomain(), context);
+ }
AbstractLogicalOperator newChild = (AbstractLogicalOperator) op.getInputs().get(childIndex).getValue();
IPhysicalPropertiesVector newDiff = newPropertiesDiff(newChild, required, true, context);
AlgebricksConfig.ALGEBRICKS_LOGGER.finest(">>>> New properties diff: " + newDiff + "\n");
diff --git a/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/RemoveCartesianProductWithEmptyBranchRule.java b/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/RemoveCartesianProductWithEmptyBranchRule.java
index bd5d646..bd3656e 100644
--- a/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/RemoveCartesianProductWithEmptyBranchRule.java
+++ b/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/RemoveCartesianProductWithEmptyBranchRule.java
@@ -30,6 +30,7 @@
import org.apache.hyracks.algebricks.core.algebra.expressions.ConstantExpression;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.AbstractBinaryJoinOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.visitors.VariableUtilities;
+import org.apache.hyracks.algebricks.core.algebra.util.OperatorPropertiesUtil;
import org.apache.hyracks.algebricks.core.rewriter.base.IAlgebraicRewriteRule;
/**
@@ -69,6 +70,9 @@
}
Set<LogicalVariable> liveVariables = new HashSet<>();
VariableUtilities.getLiveVariables(op, liveVariables);
- return liveVariables.isEmpty();
+ // No variables will be populated and the cardinality does not change.
+ // If there is only one tuple from the branch, the output cardinality
+ // of the cartesian product will not be changed.
+ return liveVariables.isEmpty() && OperatorPropertiesUtil.isCardinalityExactOne(op);
}
}
diff --git a/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/SimpleUnnestToProductRule.java b/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/SimpleUnnestToProductRule.java
index 06fb360c..58a15e2 100644
--- a/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/SimpleUnnestToProductRule.java
+++ b/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/SimpleUnnestToProductRule.java
@@ -26,14 +26,12 @@
import org.apache.commons.lang3.mutable.Mutable;
import org.apache.commons.lang3.mutable.MutableObject;
import org.apache.hyracks.algebricks.common.exceptions.AlgebricksException;
-import org.apache.hyracks.algebricks.core.algebra.base.ILogicalExpression;
import org.apache.hyracks.algebricks.core.algebra.base.ILogicalOperator;
import org.apache.hyracks.algebricks.core.algebra.base.IOptimizationContext;
import org.apache.hyracks.algebricks.core.algebra.base.LogicalOperatorTag;
import org.apache.hyracks.algebricks.core.algebra.base.LogicalVariable;
import org.apache.hyracks.algebricks.core.algebra.expressions.ConstantExpression;
-import org.apache.hyracks.algebricks.core.algebra.operators.logical.AbstractLogicalOperator;
-import org.apache.hyracks.algebricks.core.algebra.operators.logical.AbstractScanOperator;
+import org.apache.hyracks.algebricks.core.algebra.operators.logical.DataSourceScanOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.EmptyTupleSourceOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.InnerJoinOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.visitors.VariableUtilities;
@@ -50,16 +48,14 @@
@Override
public boolean rewritePost(Mutable<ILogicalOperator> opRef, IOptimizationContext context)
throws AlgebricksException {
- AbstractLogicalOperator op = (AbstractLogicalOperator) opRef.getValue();
- if (op.getOperatorTag() != LogicalOperatorTag.DATASOURCESCAN
- && op.getOperatorTag() != LogicalOperatorTag.UNNEST) {
+ ILogicalOperator op = opRef.getValue();
+ if (!isScanOrUnnest(op)) {
return false;
}
Mutable<ILogicalOperator> opRef2 = op.getInputs().get(0);
- AbstractLogicalOperator op2 = (AbstractLogicalOperator) opRef2.getValue();
-
- if (!(op2 instanceof AbstractScanOperator) && !descOrSelfIsSourceScan(op2)) {
+ ILogicalOperator op2 = opRef2.getValue();
+ if (!isScanOrUnnest(op2) && !descOrSelfIsSourceScan(op2)) {
return false;
}
// Make sure that op does not use any variables produced by op2.
@@ -88,7 +84,7 @@
* to check if currentOpRef holds a DataSourceScanOperator.
*/
&& currentOpRef.getValue().getOperatorTag() == LogicalOperatorTag.DATASOURCESCAN
- && descOrSelfIsSourceScan((AbstractLogicalOperator) currentOpRef.getValue())) {
+ && descOrSelfIsSourceScan(currentOpRef.getValue())) {
if (opsAreIndependent(currentOpRef.getValue(), tupleSourceOpRef.getValue())) {
/** move down the boundary if the operator is independent of the tuple source */
boundaryOpRef = currentOpRef.getValue().getInputs().get(0);
@@ -120,32 +116,57 @@
}
}
+ // If the left branch has cardinality one, we do not need to rewrite the unary pipeline
+ // into a cartesian product.
+ ILogicalOperator innerBranchOperator = opRef.getValue();
+ ILogicalOperator boundaryOperator = boundaryOpRef.getValue();
+ if (OperatorPropertiesUtil.isCardinalityZeroOrOne(boundaryOperator)
+ // Note that for an external data source, the following check returns false.
+ // Thus, it does not produce absolutely correct plan for the CASE expression
+ // that contains subqueries over external datasets in THEN/ELSE branches, in
+ // the sense that if the condition is not satisfied, the corresponding THEN/ELSE
+ // branch should not be evaluated at all. Rewriting to a join will actually
+ // evaluates those branches.
+ // Fixing ASTERIXDB-1620 will ensure correctness for external datasets.
+ && !descOrSelfIsLeafSourceScan(innerBranchOperator, boundaryOperator)) {
+ return false;
+ }
+
/** join the two independent branches */
- InnerJoinOperator join = new InnerJoinOperator(new MutableObject<ILogicalExpression>(ConstantExpression.TRUE),
- new MutableObject<ILogicalOperator>(boundaryOpRef.getValue()),
- new MutableObject<ILogicalOperator>(opRef.getValue()));
+ InnerJoinOperator join = new InnerJoinOperator(new MutableObject<>(ConstantExpression.TRUE),
+ new MutableObject<>(boundaryOperator), new MutableObject<>(innerBranchOperator));
opRef.setValue(join);
ILogicalOperator ets = new EmptyTupleSourceOperator();
context.computeAndSetTypeEnvironmentForOperator(ets);
boundaryOpRef.setValue(ets);
- context.computeAndSetTypeEnvironmentForOperator(boundaryOpRef.getValue());
- context.computeAndSetTypeEnvironmentForOperator(opRef.getValue());
+ context.computeAndSetTypeEnvironmentForOperator(boundaryOperator);
+ context.computeAndSetTypeEnvironmentForOperator(innerBranchOperator);
context.computeAndSetTypeEnvironmentForOperator(join);
return true;
}
- private boolean descOrSelfIsSourceScan(AbstractLogicalOperator op2) {
- // Disregard data source scans in a subplan.
- if (op2.getOperatorTag() == LogicalOperatorTag.SUBPLAN) {
- return false;
- }
- if (op2.getOperatorTag() == LogicalOperatorTag.DATASOURCESCAN
- && op2.getOperatorTag() != LogicalOperatorTag.UNNEST) {
+ private boolean descOrSelfIsSourceScan(ILogicalOperator op) {
+ if (op.getOperatorTag() == LogicalOperatorTag.DATASOURCESCAN) {
return true;
}
- for (Mutable<ILogicalOperator> cRef : op2.getInputs()) {
- AbstractLogicalOperator alo = (AbstractLogicalOperator) cRef.getValue();
- if (descOrSelfIsSourceScan(alo)) {
+ for (Mutable<ILogicalOperator> cRef : op.getInputs()) {
+ if (descOrSelfIsSourceScan(cRef.getValue())) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private boolean descOrSelfIsLeafSourceScan(ILogicalOperator op, ILogicalOperator bottomOp) {
+ if (op == bottomOp) {
+ return false;
+ }
+ if (op.getOperatorTag() == LogicalOperatorTag.DATASOURCESCAN) {
+ DataSourceScanOperator dataSourceScanOperator = (DataSourceScanOperator) op;
+ return dataSourceScanOperator.getDataSource().isScanAccessPathALeaf();
+ }
+ for (Mutable<ILogicalOperator> cRef : op.getInputs()) {
+ if (descOrSelfIsLeafSourceScan(cRef.getValue(), bottomOp)) {
return true;
}
}
@@ -168,4 +189,9 @@
return true;
}
+ private boolean isScanOrUnnest(ILogicalOperator op) {
+ LogicalOperatorTag opTag = op.getOperatorTag();
+ return opTag == LogicalOperatorTag.DATASOURCESCAN || opTag == LogicalOperatorTag.UNNEST;
+ }
+
}
diff --git a/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/SwitchInnerJoinBranchRule.java b/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/SwitchInnerJoinBranchRule.java
new file mode 100644
index 0000000..277353e
--- /dev/null
+++ b/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/SwitchInnerJoinBranchRule.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hyracks.algebricks.rewriter.rules;
+
+import org.apache.commons.lang3.mutable.Mutable;
+import org.apache.hyracks.algebricks.common.exceptions.AlgebricksException;
+import org.apache.hyracks.algebricks.core.algebra.base.ILogicalOperator;
+import org.apache.hyracks.algebricks.core.algebra.base.IOptimizationContext;
+import org.apache.hyracks.algebricks.core.algebra.base.LogicalOperatorTag;
+import org.apache.hyracks.algebricks.core.algebra.operators.logical.InnerJoinOperator;
+import org.apache.hyracks.algebricks.core.algebra.util.OperatorPropertiesUtil;
+import org.apache.hyracks.algebricks.core.rewriter.base.IAlgebraicRewriteRule;
+
+/**
+ * The rule switches the left branch of a join and the right branch if
+ * we know the left branch has cardinality one and the right branch does
+ * not have cardinality one. Therefore, the build (right) branch can
+ * potentially be smaller.
+ */
+public class SwitchInnerJoinBranchRule implements IAlgebraicRewriteRule {
+
+ @Override
+ public boolean rewritePost(Mutable<ILogicalOperator> opRef, IOptimizationContext context)
+ throws AlgebricksException {
+ ILogicalOperator op = opRef.getValue();
+ if (op.getOperatorTag() != LogicalOperatorTag.INNERJOIN) {
+ return false;
+ }
+ InnerJoinOperator joinOperator = (InnerJoinOperator) op;
+ Mutable<ILogicalOperator> leftRef = joinOperator.getInputs().get(0);
+ Mutable<ILogicalOperator> rightRef = joinOperator.getInputs().get(1);
+ ILogicalOperator left = leftRef.getValue();
+ ILogicalOperator right = rightRef.getValue();
+ boolean leftCardinalityOne = OperatorPropertiesUtil.isCardinalityZeroOrOne(left);
+ boolean rightCardinalityOne = OperatorPropertiesUtil.isCardinalityZeroOrOne(right);
+ if (!leftCardinalityOne || rightCardinalityOne) {
+ return false;
+ }
+ // The cardinality of the left branch is one and the cardinality of the right branch is not one.
+ leftRef.setValue(right);
+ rightRef.setValue(left);
+ return true;
+ }
+}
diff --git a/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/subplan/EliminateSubplanWithInputCardinalityOneRule.java b/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/subplan/EliminateSubplanWithInputCardinalityOneRule.java
index b01bb43..2cd857a 100644
--- a/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/subplan/EliminateSubplanWithInputCardinalityOneRule.java
+++ b/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/subplan/EliminateSubplanWithInputCardinalityOneRule.java
@@ -161,7 +161,7 @@
List<LogicalVariable> liveVars = new ArrayList<LogicalVariable>();
VariableUtilities.getLiveVariables(operator, liveVars);
- if (OperatorPropertiesUtil.isCardinalityOne(operator)) {
+ if (OperatorPropertiesUtil.isCardinalityZeroOrOne(operator)) {
for (LogicalVariable liveVar : liveVars) {
if (freeVars.contains(liveVar)) {
varsWithCardinalityOne.add(liveVar);
diff --git a/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/subplan/ReplaceNtsWithSubplanInputOperatorVisitor.java b/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/subplan/ReplaceNtsWithSubplanInputOperatorVisitor.java
index 7eb9ac8..39cac06 100644
--- a/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/subplan/ReplaceNtsWithSubplanInputOperatorVisitor.java
+++ b/hyracks-fullstack/algebricks/algebricks-rewriter/src/main/java/org/apache/hyracks/algebricks/rewriter/rules/subplan/ReplaceNtsWithSubplanInputOperatorVisitor.java
@@ -21,6 +21,7 @@
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -78,7 +79,7 @@
// The map that maps the input variables to the subplan to their deep-copied
// variables.
- private final Map<LogicalVariable, LogicalVariable> varMap = new HashMap<>();
+ private final LinkedHashMap<LogicalVariable, LogicalVariable> varMap = new LinkedHashMap<>();
// Whether the original copy has been used.
private boolean isOriginalCopyUsed = false;