[ASTERIXDB-3144][COMP] Remove partitions map null check
- user model changes: no
- storage format changes: no
- interface changes: no
Details:
Remove partitions map null check in index modification runtime.
- always contribute constraints for the data source scan op.
Change-Id: I871eeced711057d46cd682d51daf94b6b8f979d3
Reviewed-on: https://asterix-gerrit.ics.uci.edu/c/asterixdb/+/17538
Integration-Tests: Jenkins <jenkins@fulliautomatix.ics.uci.edu>
Tested-by: Jenkins <jenkins@fulliautomatix.ics.uci.edu>
Reviewed-by: Ali Alsuliman <ali.al.solaiman@gmail.com>
Reviewed-by: Murtadha Hubail <mhubail@apache.org>
diff --git a/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/algebra/operators/physical/InvertedIndexPOperator.java b/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/algebra/operators/physical/InvertedIndexPOperator.java
index 20334bf..5e66f68 100644
--- a/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/algebra/operators/physical/InvertedIndexPOperator.java
+++ b/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/algebra/operators/physical/InvertedIndexPOperator.java
@@ -184,7 +184,7 @@
((Index.TextIndexDetails) secondaryIndex.getIndexDetails()).getFullTextConfigName());
IIndexDataflowHelperFactory dataflowHelperFactory =
new IndexDataflowHelperFactory(metadataProvider.getStorageComponentProvider().getStorageManager(),
- partitioningProperties.getSpiltsProvider());
+ partitioningProperties.getSplitsProvider());
LSMInvertedIndexSearchOperatorDescriptor invIndexSearchOp =
new LSMInvertedIndexSearchOperatorDescriptor(jobSpec, outputRecDesc, queryField, dataflowHelperFactory,
queryTokenizerFactory, fullTextConfigEvaluatorFactory, searchModifierFactory, retainInput,
diff --git a/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/function/DumpIndexRewriter.java b/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/function/DumpIndexRewriter.java
index dac1ac7..9fb6385 100644
--- a/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/function/DumpIndexRewriter.java
+++ b/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/function/DumpIndexRewriter.java
@@ -71,7 +71,7 @@
metadataProvider.getPartitioningProperties(dataset, index.getIndexName());
IndexDataflowHelperFactory indexDataflowHelperFactory =
new IndexDataflowHelperFactory(metadataProvider.getStorageComponentProvider().getStorageManager(),
- partitioningProperties.getSpiltsProvider());
+ partitioningProperties.getSplitsProvider());
AlgebricksAbsolutePartitionConstraint secondaryPartitionConstraint =
(AlgebricksAbsolutePartitionConstraint) partitioningProperties.getConstraints();
return new DumpIndexDatasource(context.getComputationNodeDomain(), indexDataflowHelperFactory,
diff --git a/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/DataverseUtil.java b/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/DataverseUtil.java
index bfba414..61b526c 100644
--- a/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/DataverseUtil.java
+++ b/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/DataverseUtil.java
@@ -35,7 +35,7 @@
JobSpecification jobSpec = RuntimeUtils.createJobSpecification(metadata.getApplicationContext());
PartitioningProperties partitioningProperties = metadata.splitAndConstraints(dataverse.getDataverseName());
FileRemoveOperatorDescriptor frod = new FileRemoveOperatorDescriptor(jobSpec,
- partitioningProperties.getSpiltsProvider(), false, partitioningProperties.getComputeStorageMap());
+ partitioningProperties.getSplitsProvider(), false, partitioningProperties.getComputeStorageMap());
AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(jobSpec, frod,
partitioningProperties.getConstraints());
jobSpec.addRoot(frod);
diff --git a/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/FeedOperations.java b/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/FeedOperations.java
index 72961b8..44f8a80 100644
--- a/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/FeedOperations.java
+++ b/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/FeedOperations.java
@@ -153,7 +153,7 @@
JobSpecification spec = RuntimeUtils.createJobSpecification(appCtx);
PartitioningProperties partitioningProperties = metadataProvider.getPartitioningProperties(feed);
FileRemoveOperatorDescriptor frod = new FileRemoveOperatorDescriptor(spec,
- partitioningProperties.getSpiltsProvider(), true, partitioningProperties.getComputeStorageMap());
+ partitioningProperties.getSplitsProvider(), true, partitioningProperties.getComputeStorageMap());
AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, frod,
partitioningProperties.getConstraints());
spec.addRoot(frod);
diff --git a/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/FlushDatasetUtil.java b/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/FlushDatasetUtil.java
index f012a4e..e46738d 100644
--- a/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/FlushDatasetUtil.java
+++ b/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/FlushDatasetUtil.java
@@ -69,6 +69,7 @@
metadataProvider.getPartitioningProperties(dataset, dataset.getDatasetName());
AlgebricksPartitionConstraint primaryPartitionConstraint = partitioningProperties.getConstraints();
+ //TODO(partitioning) can make it run only at NC level since any flush call will flush all partitions in the NC
AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, emptySource,
primaryPartitionConstraint);
diff --git a/asterixdb/asterix-common/src/main/java/org/apache/asterix/common/cluster/PartitioningProperties.java b/asterixdb/asterix-common/src/main/java/org/apache/asterix/common/cluster/PartitioningProperties.java
index 1580ca4..3443b84 100644
--- a/asterixdb/asterix-common/src/main/java/org/apache/asterix/common/cluster/PartitioningProperties.java
+++ b/asterixdb/asterix-common/src/main/java/org/apache/asterix/common/cluster/PartitioningProperties.java
@@ -38,7 +38,7 @@
return new PartitioningProperties(splitsProvider, constraints, computeStorageMap);
}
- public IFileSplitProvider getSpiltsProvider() {
+ public IFileSplitProvider getSplitsProvider() {
return splitsProvider;
}
diff --git a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/declared/MetadataProvider.java b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/declared/MetadataProvider.java
index 3404ace..b07a03e 100644
--- a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/declared/MetadataProvider.java
+++ b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/declared/MetadataProvider.java
@@ -600,7 +600,7 @@
primaryKeyFields, primaryKeyFieldsInSecondaryIndex, proceedIndexOnlyPlan);
IStorageManager storageManager = getStorageComponentProvider().getStorageManager();
IIndexDataflowHelperFactory indexHelperFactory =
- new IndexDataflowHelperFactory(storageManager, datasetPartitioningProp.getSpiltsProvider());
+ new IndexDataflowHelperFactory(storageManager, datasetPartitioningProp.getSplitsProvider());
BTreeSearchOperatorDescriptor btreeSearchOp;
int[][] partitionsMap = datasetPartitioningProp.getComputeStorageMap();
@@ -678,7 +678,7 @@
primaryKeyFields, primaryKeyFieldsInSecondaryIndex, isIndexOnlyPlan);
RTreeSearchOperatorDescriptor rtreeSearchOp;
IIndexDataflowHelperFactory indexDataflowHelperFactory = new IndexDataflowHelperFactory(
- storageComponentProvider.getStorageManager(), partitioningProperties.getSpiltsProvider());
+ storageComponentProvider.getStorageManager(), partitioningProperties.getSplitsProvider());
if (dataset.getDatasetType() == DatasetType.INTERNAL) {
int[][] partitionsMap = partitioningProperties.getComputeStorageMap();
rtreeSearchOp = new RTreeSearchOperatorDescriptor(jobSpec, outputRecDesc, keyFields, true, true,
@@ -930,7 +930,7 @@
public FileSplit[] splitsForIndex(MetadataTransactionContext mdTxnCtx, Dataset dataset, String indexName)
throws AlgebricksException {
- return dataPartitioningProvider.getPartitioningProperties(mdTxnCtx, dataset, indexName).getSpiltsProvider()
+ return dataPartitioningProvider.getPartitioningProperties(mdTxnCtx, dataset, indexName).getSplitsProvider()
.getFileSplits();
}
@@ -980,6 +980,7 @@
ExternalScanOperatorDescriptor dataScanner = new ExternalScanOperatorDescriptor(jobSpec, scannerDesc,
adapterFactory, tupleFilterFactory, outputLimit);
+ //TODO(partitioning) check
AlgebricksPartitionConstraint constraint;
try {
constraint = adapterFactory.getPartitionConstraint();
@@ -1042,8 +1043,9 @@
IModificationOperationCallbackFactory modificationCallbackFactory = dataset
.getModificationCallbackFactory(storageComponentProvider, primaryIndex, indexOp, primaryKeyFields);
IIndexDataflowHelperFactory idfh = new IndexDataflowHelperFactory(storageComponentProvider.getStorageManager(),
- partitioningProperties.getSpiltsProvider());
+ partitioningProperties.getSplitsProvider());
IBinaryHashFunctionFactory[] pkHashFunFactories = dataset.getPrimaryHashFunctionFactories(this);
+ //TODO(partitioning) rename to static
ITuplePartitionerFactory partitionerFactory = new FieldHashPartitionerFactory(pkFields, pkHashFunFactories,
partitioningProperties.getNumberOfPartitions());
@@ -1067,7 +1069,7 @@
PartitioningProperties idxPartitioningProperties =
getPartitioningProperties(dataset, primaryKeyIndex.get().getIndexName());
pkidfh = new IndexDataflowHelperFactory(storageComponentProvider.getStorageManager(),
- idxPartitioningProperties.getSpiltsProvider());
+ idxPartitioningProperties.getSplitsProvider());
}
op = createLSMPrimaryInsertOperatorDescriptor(spec, inputRecordDesc, fieldPermutation, idfh, pkidfh,
modificationCallbackFactory, searchCallbackFactory, numKeys, filterFields, partitionerFactory,
@@ -1243,7 +1245,7 @@
IModificationOperationCallbackFactory modificationCallbackFactory = dataset.getModificationCallbackFactory(
storageComponentProvider, secondaryIndex, indexOp, modificationCallbackPrimaryKeyFields);
IIndexDataflowHelperFactory idfh = new IndexDataflowHelperFactory(
- storageComponentProvider.getStorageManager(), partitioningProperties.getSpiltsProvider());
+ storageComponentProvider.getStorageManager(), partitioningProperties.getSplitsProvider());
IBinaryHashFunctionFactory[] pkHashFunFactories = dataset.getPrimaryHashFunctionFactories(this);
ITuplePartitionerFactory partitionerFactory = new FieldHashPartitionerFactory(pkFields, pkHashFunFactories,
partitioningProperties.getNumberOfPartitions());
@@ -1313,7 +1315,7 @@
IModificationOperationCallbackFactory modificationCallbackFactory = dataset.getModificationCallbackFactory(
storageComponentProvider, secondaryIndex, indexOp, modificationCallbackPrimaryKeyFields);
IIndexDataflowHelperFactory idfh = new IndexDataflowHelperFactory(
- storageComponentProvider.getStorageManager(), partitioningProperties.getSpiltsProvider());
+ storageComponentProvider.getStorageManager(), partitioningProperties.getSplitsProvider());
IBinaryHashFunctionFactory[] pkHashFunFactories = dataset.getPrimaryHashFunctionFactories(this);
ITuplePartitionerFactory tuplePartitionerFactory = new FieldHashPartitionerFactory(pkFields,
pkHashFunFactories, partitioningProperties.getNumberOfPartitions());
@@ -1419,7 +1421,7 @@
IModificationOperationCallbackFactory modificationCallbackFactory = dataset.getModificationCallbackFactory(
storageComponentProvider, secondaryIndex, indexOp, modificationCallbackPrimaryKeyFields);
IIndexDataflowHelperFactory indexDataflowHelperFactory = new IndexDataflowHelperFactory(
- storageComponentProvider.getStorageManager(), partitioningProperties.getSpiltsProvider());
+ storageComponentProvider.getStorageManager(), partitioningProperties.getSplitsProvider());
IBinaryHashFunctionFactory[] pkHashFunFactories = dataset.getPrimaryHashFunctionFactories(this);
ITuplePartitionerFactory partitionerFactory = new FieldHashPartitionerFactory(pkFields, pkHashFunFactories,
partitioningProperties.getNumberOfPartitions());
@@ -1538,7 +1540,7 @@
IModificationOperationCallbackFactory modificationCallbackFactory = dataset.getModificationCallbackFactory(
storageComponentProvider, secondaryIndex, indexOp, modificationCallbackPrimaryKeyFields);
IIndexDataflowHelperFactory indexDataFlowFactory = new IndexDataflowHelperFactory(
- storageComponentProvider.getStorageManager(), partitioningProperties.getSpiltsProvider());
+ storageComponentProvider.getStorageManager(), partitioningProperties.getSplitsProvider());
IBinaryHashFunctionFactory[] pkHashFunFactories = dataset.getPrimaryHashFunctionFactories(this);
ITuplePartitionerFactory partitionerFactory = new FieldHashPartitionerFactory(pkFields, pkHashFunFactories,
partitioningProperties.getNumberOfPartitions());
@@ -1743,6 +1745,7 @@
keyFields[k] = k;
}
+ //TODO(partitioning) check
tokenizerOp = new BinaryTokenizerOperatorDescriptor(spec, tokenKeyPairRecDesc, tokenizerFactory,
fullTextConfigEvaluatorFactory, docField, keyFields, isPartitioned, true, false,
MissingWriterFactory.INSTANCE);
diff --git a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/entitytupletranslators/DatasetTupleTranslator.java b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/entitytupletranslators/DatasetTupleTranslator.java
index 70dfde1..af31c0d 100644
--- a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/entitytupletranslators/DatasetTupleTranslator.java
+++ b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/entitytupletranslators/DatasetTupleTranslator.java
@@ -73,7 +73,6 @@
import org.apache.asterix.om.types.ARecordType;
import org.apache.asterix.om.types.ATypeTag;
import org.apache.asterix.om.types.AUnorderedListType;
-import org.apache.asterix.om.types.BuiltinType;
import org.apache.asterix.om.types.IAType;
import org.apache.asterix.runtime.compression.CompressionManager;
import org.apache.hyracks.algebricks.common.exceptions.AlgebricksException;
@@ -221,8 +220,8 @@
}
datasetDetails = new InternalDatasetDetails(fileStructure, partitioningStrategy, partitioningKey,
- partitioningKey, keyFieldSourceIndicator, primaryKeyTypes, autogenerated,
- filterSourceIndicator, filterField);
+ partitioningKey, keyFieldSourceIndicator, primaryKeyTypes, autogenerated, filterSourceIndicator,
+ filterField);
break;
}
diff --git a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/DatasetUtil.java b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/DatasetUtil.java
index 2ce9435..277602d 100644
--- a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/DatasetUtil.java
+++ b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/DatasetUtil.java
@@ -313,7 +313,7 @@
PartitioningProperties partitioningProperties = metadataProvider.getPartitioningProperties(dataset);
IIndexDataflowHelperFactory indexHelperFactory =
new IndexDataflowHelperFactory(metadataProvider.getStorageComponentProvider().getStorageManager(),
- partitioningProperties.getSpiltsProvider());
+ partitioningProperties.getSplitsProvider());
IndexDropOperatorDescriptor primaryBtreeDrop = new IndexDropOperatorDescriptor(specPrimary, indexHelperFactory,
options, partitioningProperties.getComputeStorageMap());
AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(specPrimary, primaryBtreeDrop,
@@ -335,7 +335,7 @@
JobSpecification spec = RuntimeUtils.createJobSpecification(metadataProvider.getApplicationContext());
PartitioningProperties partitioningProperties = metadataProvider.getPartitioningProperties(dataset);
- FileSplit[] fs = partitioningProperties.getSpiltsProvider().getFileSplits();
+ FileSplit[] fs = partitioningProperties.getSplitsProvider().getFileSplits();
StringBuilder sb = new StringBuilder();
for (FileSplit f : fs) {
sb.append(f).append(" ");
@@ -349,7 +349,7 @@
compactionInfo.first, compactionInfo.second);
IndexBuilderFactory indexBuilderFactory =
new IndexBuilderFactory(metadataProvider.getStorageComponentProvider().getStorageManager(),
- partitioningProperties.getSpiltsProvider(), resourceFactory, true);
+ partitioningProperties.getSplitsProvider(), resourceFactory, true);
IndexCreateOperatorDescriptor indexCreateOp = new IndexCreateOperatorDescriptor(spec, indexBuilderFactory,
partitioningProperties.getComputeStorageMap());
AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, indexCreateOp,
@@ -369,7 +369,7 @@
PartitioningProperties partitioningProperties = metadataProvider.getPartitioningProperties(dataset);
IIndexDataflowHelperFactory indexHelperFactory =
new IndexDataflowHelperFactory(metadataProvider.getStorageComponentProvider().getStorageManager(),
- partitioningProperties.getSpiltsProvider());
+ partitioningProperties.getSplitsProvider());
LSMTreeIndexCompactOperatorDescriptor compactOp = new LSMTreeIndexCompactOperatorDescriptor(spec,
indexHelperFactory, partitioningProperties.getComputeStorageMap());
AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, compactOp,
@@ -395,7 +395,7 @@
public static IOperatorDescriptor createPrimaryIndexScanOp(JobSpecification spec, MetadataProvider metadataProvider,
Dataset dataset, ITupleProjectorFactory projectorFactory) throws AlgebricksException {
PartitioningProperties partitioningProperties = metadataProvider.getPartitioningProperties(dataset);
- IFileSplitProvider primaryFileSplitProvider = partitioningProperties.getSpiltsProvider();
+ IFileSplitProvider primaryFileSplitProvider = partitioningProperties.getSplitsProvider();
AlgebricksPartitionConstraint primaryPartitionConstraint = partitioningProperties.getConstraints();
// -Infinity
int[] lowKeyFields = null;
@@ -456,7 +456,7 @@
ISearchOperationCallbackFactory searchCallbackFactory = dataset.getSearchCallbackFactory(
storageComponentProvider, primaryIndex, IndexOperation.UPSERT, primaryKeyFields);
IIndexDataflowHelperFactory idfh = new IndexDataflowHelperFactory(storageComponentProvider.getStorageManager(),
- partitioningProperties.getSpiltsProvider());
+ partitioningProperties.getSplitsProvider());
LSMPrimaryUpsertOperatorDescriptor op;
ITypeTraits[] outputTypeTraits = new ITypeTraits[inputRecordDesc.getFieldCount() + 1
+ (dataset.hasMetaPart() ? 2 : 1) + numFilterFields];
diff --git a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SampleOperationsHelper.java b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SampleOperationsHelper.java
index 6b88960..7587769 100644
--- a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SampleOperationsHelper.java
+++ b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SampleOperationsHelper.java
@@ -144,7 +144,7 @@
// the index object information is fetched from the old source dataset
PartitioningProperties samplePartitioningProperties =
metadataProvider.getPartitioningProperties(dataset, sampleIdx.getIndexName());
- fileSplitProvider = samplePartitioningProperties.getSpiltsProvider();
+ fileSplitProvider = samplePartitioningProperties.getSplitsProvider();
partitionConstraint = samplePartitioningProperties.getConstraints();
computeStorageMap = samplePartitioningProperties.getComputeStorageMap();
numPartitions = samplePartitioningProperties.getNumberOfPartitions();
@@ -210,7 +210,7 @@
Index idx = dsIndexes.get(i);
PartitioningProperties idxPartitioningProps =
metadataProvider.getPartitioningProperties(dataset, idx.getIndexName());
- indexes[i] = new IndexDataflowHelperFactory(storageMgr, idxPartitioningProps.getSpiltsProvider());
+ indexes[i] = new IndexDataflowHelperFactory(storageMgr, idxPartitioningProps.getSplitsProvider());
names[i] = idx.getIndexName();
}
targetOp = new DatasetStreamStatsOperatorDescriptor(spec, recordDesc, DATASET_STATS_OPERATOR_NAME, indexes,
diff --git a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SecondaryIndexOperationsHelper.java b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SecondaryIndexOperationsHelper.java
index 1a58423..b824512 100644
--- a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SecondaryIndexOperationsHelper.java
+++ b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SecondaryIndexOperationsHelper.java
@@ -216,7 +216,7 @@
PartitioningProperties partitioningProperties;
partitioningProperties =
getSecondaryIndexBulkloadPartitioningProperties(metadataProvider, dataset, index.getIndexName());
- secondaryFileSplitProvider = partitioningProperties.getSpiltsProvider();
+ secondaryFileSplitProvider = partitioningProperties.getSplitsProvider();
secondaryPartitionConstraint = partitioningProperties.getConstraints();
numPrimaryKeys = dataset.getPrimaryKeys().size();
if (dataset.getDatasetType() == DatasetType.INTERNAL) {
@@ -228,7 +228,7 @@
}
PartitioningProperties datasetPartitioningProperties = getSecondaryIndexBulkloadPartitioningProperties(
metadataProvider, dataset, dataset.getDatasetName());
- primaryFileSplitProvider = datasetPartitioningProperties.getSpiltsProvider();
+ primaryFileSplitProvider = datasetPartitioningProperties.getSplitsProvider();
primaryPartitionConstraint = datasetPartitioningProperties.getConstraints();
setPrimaryRecDescAndComparators();
}
@@ -538,7 +538,7 @@
// to ensure correctness, we will run in as many locations as storage partitions
// this will not be needed once ASTERIXDB-3176 is implemented
if (this instanceof SecondaryCorrelatedTreeIndexOperationsHelper) {
- FileSplit[] fileSplits = partitioningProperties.getSpiltsProvider().getFileSplits();
+ FileSplit[] fileSplits = partitioningProperties.getSplitsProvider().getFileSplits();
Pair<IFileSplitProvider, AlgebricksPartitionConstraint> sp =
StoragePathUtil.splitProviderAndPartitionConstraints(fileSplits);
return PartitioningProperties.of(sp.getFirst(), sp.getSecond(),
diff --git a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SecondaryTreeIndexOperationsHelper.java b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SecondaryTreeIndexOperationsHelper.java
index 8dc0d96..0cda625 100644
--- a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SecondaryTreeIndexOperationsHelper.java
+++ b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SecondaryTreeIndexOperationsHelper.java
@@ -81,7 +81,7 @@
metadataProvider.getPartitioningProperties(dataset, index.getIndexName());
IIndexDataflowHelperFactory dataflowHelperFactory =
new IndexDataflowHelperFactory(metadataProvider.getStorageComponentProvider().getStorageManager(),
- partitioningProperties.getSpiltsProvider());
+ partitioningProperties.getSplitsProvider());
// The index drop operation should be persistent regardless of temp datasets or permanent dataset.
IndexDropOperatorDescriptor btreeDrop = new IndexDropOperatorDescriptor(spec, dataflowHelperFactory,
dropOptions, partitioningProperties.getComputeStorageMap());
@@ -99,7 +99,7 @@
metadataProvider.getPartitioningProperties(dataset, index.getIndexName());
IIndexDataflowHelperFactory dataflowHelperFactory =
new IndexDataflowHelperFactory(metadataProvider.getStorageComponentProvider().getStorageManager(),
- partitioningProperties.getSpiltsProvider());
+ partitioningProperties.getSplitsProvider());
LSMTreeIndexCompactOperatorDescriptor compactOp = new LSMTreeIndexCompactOperatorDescriptor(spec,
dataflowHelperFactory, partitioningProperties.getComputeStorageMap());
compactOp.setSourceLocation(sourceLoc);
diff --git a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/physical/DataSourceScanPOperator.java b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/physical/DataSourceScanPOperator.java
index ea19a78..8a4b3f0 100644
--- a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/physical/DataSourceScanPOperator.java
+++ b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/physical/DataSourceScanPOperator.java
@@ -124,9 +124,7 @@
IOperatorDescriptor opDesc = p.first;
opDesc.setSourceLocation(scan.getSourceLocation());
builder.contributeHyracksOperator(scan, opDesc);
- if (p.second != null) {
- builder.contributeAlgebricksPartitionConstraint(opDesc, p.second);
- }
+ builder.contributeAlgebricksPartitionConstraint(opDesc, p.second);
ILogicalOperator srcExchange = scan.getInputs().get(0).getValue();
builder.contributeGraphEdge(srcExchange, 0, scan, 0);
diff --git a/hyracks-fullstack/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/dataflow/IndexInsertUpdateDeleteOperatorNodePushable.java b/hyracks-fullstack/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/dataflow/IndexInsertUpdateDeleteOperatorNodePushable.java
index d3def46..422aef3 100644
--- a/hyracks-fullstack/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/dataflow/IndexInsertUpdateDeleteOperatorNodePushable.java
+++ b/hyracks-fullstack/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/dataflow/IndexInsertUpdateDeleteOperatorNodePushable.java
@@ -76,7 +76,7 @@
ITupleFilterFactory tupleFilterFactory, ITuplePartitionerFactory tuplePartitionerFactory,
int[][] partitionsMap) throws HyracksDataException {
this.ctx = ctx;
- this.partitions = partitionsMap != null ? partitionsMap[partition] : new int[] { partition };
+ this.partitions = partitionsMap[partition];
this.indexes = new IIndex[partitions.length];
this.indexAccessors = new IIndexAccessor[partitions.length];
this.modCallbacks = new IModificationOperationCallback[partitions.length];