[ASTERIXDB-3222][COMP] Align compute-to-storage map with constraints

- user model changes: no
- storage format changes: no
- interface changes: no

Details:
When constructing the compute-to-storage map that is used for the
PartitioningProperties, the map should be constructed such that it is
aligned with the AlgebricksPartitionConstraint

Change-Id: I0050538ab25134acacf74775909d83bbe050c6f0
Reviewed-on: https://asterix-gerrit.ics.uci.edu/c/asterixdb/+/17634
Integration-Tests: Jenkins <jenkins@fulliautomatix.ics.uci.edu>
Reviewed-by: Murtadha Al Hubail <mhubail@apache.org>
Tested-by: Murtadha Al Hubail <mhubail@apache.org>
diff --git a/asterixdb/asterix-common/src/main/java/org/apache/asterix/common/cluster/StorageComputePartitionsMap.java b/asterixdb/asterix-common/src/main/java/org/apache/asterix/common/cluster/StorageComputePartitionsMap.java
index a16204d..dc62cac 100644
--- a/asterixdb/asterix-common/src/main/java/org/apache/asterix/common/cluster/StorageComputePartitionsMap.java
+++ b/asterixdb/asterix-common/src/main/java/org/apache/asterix/common/cluster/StorageComputePartitionsMap.java
@@ -22,6 +22,7 @@
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -41,7 +42,7 @@
     }
 
     public int[][] getComputeToStorageMap(boolean metadataDataset) {
-        Map<Integer, List<Integer>> computeToStoragePartitions = new HashMap<>();
+        Map<Integer, List<Integer>> computeToStoragePartitions = new LinkedHashMap<>();
         if (metadataDataset) {
             final int computePartitionIdForMetadata = 0;
             computeToStoragePartitions.put(computePartitionIdForMetadata,
@@ -55,13 +56,13 @@
                 storagePartitions.add(i);
             }
         }
-        int[][] computerToStoArray = new int[computeToStoragePartitions.size()][];
+        int[][] computeToStoArray = new int[computeToStoragePartitions.size()][];
         int partitionIdx = 0;
-        for (Map.Entry<Integer, List<Integer>> integerListEntry : computeToStoragePartitions.entrySet()) {
-            computerToStoArray[partitionIdx] = integerListEntry.getValue().stream().mapToInt(i -> i).toArray();
+        for (Map.Entry<Integer, List<Integer>> computeToStoPartitions : computeToStoragePartitions.entrySet()) {
+            computeToStoArray[partitionIdx] = computeToStoPartitions.getValue().stream().mapToInt(i -> i).toArray();
             partitionIdx++;
         }
-        return computerToStoArray;
+        return computeToStoArray;
     }
 
     public int getStoragePartitionsCount() {
diff --git a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/StaticDataPartitioningProvider.java b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/StaticDataPartitioningProvider.java
index dbdcdc2..ffcdc57 100644
--- a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/StaticDataPartitioningProvider.java
+++ b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/StaticDataPartitioningProvider.java
@@ -60,11 +60,11 @@
     @Override
     public PartitioningProperties getPartitioningProperties(MetadataTransactionContext mdTxnCtx, Dataset ds,
             String indexName) throws AlgebricksException {
-        SplitComputeLocations dataverseSplits = getDatasetSplits(ds, indexName);
+        SplitComputeLocations datasetSplits = getDatasetSplits(ds, indexName);
         StorageComputePartitionsMap partitionMap = clusterStateManager.getStorageComputeMap();
         int[][] partitionsMap = partitionMap
                 .getComputeToStorageMap(MetadataIndexImmutableProperties.isMetadataDataset(ds.getDatasetId()));
-        return PartitioningProperties.of(dataverseSplits.getSplitsProvider(), dataverseSplits.getConstraints(),
+        return PartitioningProperties.of(datasetSplits.getSplitsProvider(), datasetSplits.getConstraints(),
                 partitionsMap);
     }
 
@@ -94,19 +94,20 @@
         List<String> locations = new ArrayList<>();
         Set<Integer> uniqueLocations = new HashSet<>();
         StorageComputePartitionsMap partitionMap = clusterStateManager.getStorageComputeMap();
-        final int datasetPartitons = getNumberOfPartitions(dataset);
+        final int datasetPartitions = getNumberOfPartitions(dataset);
         boolean metadataDataset = MetadataIndexImmutableProperties.isMetadataDataset(dataset.getDatasetId());
-        for (int i = 0; i < datasetPartitons; i++) {
+        for (int i = 0; i < datasetPartitions; i++) {
             int storagePartition = metadataDataset ? StorageConstants.METADATA_PARTITION : i;
             final String relPath = StoragePathUtil.prepareDataverseIndexName(dataset.getDataverseName(),
                     dataset.getDatasetName(), indexName, dataset.getRebalanceCount());
             File f = new File(StoragePathUtil.prepareStoragePartitionPath(storagePartition), relPath);
             ComputePartition computePartition = partitionMap.getComputePartition(storagePartition);
             splits.add(new MappedFileSplit(computePartition.getNodeId(), f.getPath(), 0));
-            if (!uniqueLocations.contains(computePartition.getId())) {
+            int computePartitionId = computePartition.getId();
+            if (!uniqueLocations.contains(computePartitionId)) {
                 locations.add(computePartition.getNodeId());
             }
-            uniqueLocations.add(computePartition.getId());
+            uniqueLocations.add(computePartitionId);
         }
         IFileSplitProvider splitProvider = StoragePathUtil.splitProvider(splits.toArray(new FileSplit[0]));
         AlgebricksPartitionConstraint constraints =