Merge branch 'gerrit/neo' into 'gerrit/trinity'
Ext-ref: MB-66663,MB-66818
Change-Id: I94ba023ac9b3a8a007f8fdac4216b758f7733835
diff --git a/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/NCQueryServiceServlet.java b/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/NCQueryServiceServlet.java
index a5e13c1..a593ad4 100644
--- a/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/NCQueryServiceServlet.java
+++ b/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/NCQueryServiceServlet.java
@@ -97,13 +97,15 @@
try {
responseMsg = (ExecuteStatementResponseMessage) responseFuture.get(timeout, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
- cancelQuery(ncMb, ncCtx.getNodeId(), requestReference.getUuid(), param.getClientContextID(), e, false);
+ cancelQuery(ncMb, ncCtx.getNodeId(), requestReference.getUuid(), param.getClientContextID(), e, false,
+ "interrupt");
throw e;
} catch (TimeoutException exception) {
RuntimeDataException hde = new RuntimeDataException(ErrorCode.REQUEST_TIMEOUT);
hde.addSuppressed(exception);
// cancel query
- cancelQuery(ncMb, ncCtx.getNodeId(), requestReference.getUuid(), param.getClientContextID(), hde, true);
+ cancelQuery(ncMb, ncCtx.getNodeId(), requestReference.getUuid(), param.getClientContextID(), hde, true,
+ "timeout");
throw hde;
}
executionState.end();
@@ -156,7 +158,7 @@
}
private void cancelQuery(INCMessageBroker messageBroker, String nodeId, String uuid, String clientContextID,
- Exception exception, boolean wait) {
+ Exception exception, boolean wait, String reason) {
if (uuid == null && clientContextID == null) {
return;
}
@@ -165,8 +167,7 @@
CancelQueryRequest cancelQueryMessage =
new CancelQueryRequest(nodeId, cancelQueryFuture.getFutureId(), uuid, clientContextID);
// TODO(mblow): multicc -- need to send cancellation to the correct cc
- LOGGER.info("Cancelling query with uuid:{}, clientContextID:{} due to {}", uuid, clientContextID,
- exception.getClass().getSimpleName());
+ LOGGER.info("Cancelling query with uuid:{}, clientContextID:{} due to {}", uuid, clientContextID, reason);
messageBroker.sendMessageToPrimaryCC(cancelQueryMessage);
if (wait) {
cancelQueryFuture.get(ExecuteStatementRequestMessage.DEFAULT_QUERY_CANCELLATION_WAIT_MILLIS,
diff --git a/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/QueryServiceRequestParameters.java b/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/QueryServiceRequestParameters.java
index 563f498..ed47832 100644
--- a/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/QueryServiceRequestParameters.java
+++ b/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/QueryServiceRequestParameters.java
@@ -19,8 +19,6 @@
package org.apache.asterix.api.http.server;
-import static org.apache.asterix.utils.RedactionUtil.REDACTED_SENSITIVE_ENTRY_VALUE;
-
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
@@ -428,7 +426,12 @@
object.put("source", source);
if (statementParams != null) {
for (Map.Entry<String, JsonNode> statementParam : statementParams.entrySet()) {
- object.set('$' + statementParam.getKey(), REDACTED_SENSITIVE_ENTRY_VALUE);
+ try {
+ String s = OBJECT_MAPPER.writeValueAsString(statementParam.getValue());
+ object.put('$' + statementParam.getKey(), LogRedactionUtil.userData(s));
+ } catch (JsonProcessingException e) {
+ // ignore
+ }
}
}
return object;
diff --git a/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/aws/AwsS3InputStream.java b/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/aws/AwsS3InputStream.java
index 77897ea..6b921ac 100644
--- a/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/aws/AwsS3InputStream.java
+++ b/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/aws/AwsS3InputStream.java
@@ -30,6 +30,7 @@
import org.apache.asterix.common.exceptions.ErrorCode;
import org.apache.asterix.common.exceptions.RuntimeDataException;
import org.apache.asterix.external.input.record.reader.abstracts.AbstractExternalInputStream;
+import org.apache.asterix.external.input.record.reader.stream.AvailableInputStream;
import org.apache.asterix.external.util.ExternalDataConstants;
import org.apache.asterix.external.util.aws.s3.S3Utils;
import org.apache.commons.lang3.StringUtils;
@@ -70,7 +71,7 @@
}
// Use gzip stream if needed
if (StringUtils.endsWithIgnoreCase(fileName, ".gz") || StringUtils.endsWithIgnoreCase(fileName, ".gzip")) {
- in = new GZIPInputStream(in, ExternalDataConstants.DEFAULT_BUFFER_SIZE);
+ in = new GZIPInputStream(new AvailableInputStream(in), ExternalDataConstants.DEFAULT_BUFFER_SIZE);
}
return true;
}
diff --git a/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/azure/blob/AzureBlobInputStream.java b/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/azure/blob/AzureBlobInputStream.java
index bbfece2..bf2a717 100644
--- a/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/azure/blob/AzureBlobInputStream.java
+++ b/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/azure/blob/AzureBlobInputStream.java
@@ -31,6 +31,7 @@
import org.apache.asterix.common.exceptions.ErrorCode;
import org.apache.asterix.common.exceptions.RuntimeDataException;
import org.apache.asterix.external.input.record.reader.abstracts.AbstractExternalInputStream;
+import org.apache.asterix.external.input.record.reader.stream.AvailableInputStream;
import org.apache.asterix.external.util.ExternalDataConstants;
import org.apache.hyracks.api.exceptions.HyracksDataException;
import org.apache.hyracks.util.LogRedactionUtil;
@@ -66,7 +67,7 @@
// Use gzip stream if needed
String lowerCaseFileName = fileName.toLowerCase();
if (lowerCaseFileName.endsWith(".gz") || lowerCaseFileName.endsWith(".gzip")) {
- in = new GZIPInputStream(in, ExternalDataConstants.DEFAULT_BUFFER_SIZE);
+ in = new GZIPInputStream(new AvailableInputStream(in), ExternalDataConstants.DEFAULT_BUFFER_SIZE);
}
} catch (BlobStorageException ex) {
if (ex.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) {
diff --git a/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/azure/datalake/AzureDataLakeInputStream.java b/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/azure/datalake/AzureDataLakeInputStream.java
index 7a95222..ce6c0e1 100644
--- a/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/azure/datalake/AzureDataLakeInputStream.java
+++ b/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/azure/datalake/AzureDataLakeInputStream.java
@@ -31,6 +31,7 @@
import org.apache.asterix.common.exceptions.ErrorCode;
import org.apache.asterix.common.exceptions.RuntimeDataException;
import org.apache.asterix.external.input.record.reader.abstracts.AbstractExternalInputStream;
+import org.apache.asterix.external.input.record.reader.stream.AvailableInputStream;
import org.apache.asterix.external.util.ExternalDataConstants;
import org.apache.hyracks.api.exceptions.HyracksDataException;
import org.apache.hyracks.util.LogRedactionUtil;
@@ -66,7 +67,7 @@
// Use gzip stream if needed
String lowerCaseFileName = fileName.toLowerCase();
if (lowerCaseFileName.endsWith(".gz") || lowerCaseFileName.endsWith(".gzip")) {
- in = new GZIPInputStream(in, ExternalDataConstants.DEFAULT_BUFFER_SIZE);
+ in = new GZIPInputStream(new AvailableInputStream(in), ExternalDataConstants.DEFAULT_BUFFER_SIZE);
}
} catch (BlobStorageException ex) {
if (ex.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) {
diff --git a/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/gcs/GCSInputStream.java b/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/gcs/GCSInputStream.java
index 4657bd0..7d3fb2f 100644
--- a/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/gcs/GCSInputStream.java
+++ b/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/gcs/GCSInputStream.java
@@ -31,6 +31,7 @@
import org.apache.asterix.common.exceptions.ErrorCode;
import org.apache.asterix.common.exceptions.RuntimeDataException;
import org.apache.asterix.external.input.record.reader.abstracts.AbstractExternalInputStream;
+import org.apache.asterix.external.input.record.reader.stream.AvailableInputStream;
import org.apache.asterix.external.util.ExternalDataConstants;
import org.apache.asterix.external.util.google.gcs.GCSUtils;
import org.apache.commons.lang3.StringUtils;
@@ -67,7 +68,7 @@
// Use gzip stream if needed
if (StringUtils.endsWithIgnoreCase(fileName, ".gz") || StringUtils.endsWithIgnoreCase(fileName, ".gzip")) {
- in = new GZIPInputStream(in, ExternalDataConstants.DEFAULT_BUFFER_SIZE);
+ in = new GZIPInputStream(new AvailableInputStream(in), ExternalDataConstants.DEFAULT_BUFFER_SIZE);
}
return true;
}
diff --git a/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/stream/AvailableInputStream.java b/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/stream/AvailableInputStream.java
new file mode 100644
index 0000000..22f8df4
--- /dev/null
+++ b/asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/input/record/reader/stream/AvailableInputStream.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.asterix.external.input.record.reader.stream;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+public class AvailableInputStream extends InputStream {
+ private final InputStream is;
+
+ public AvailableInputStream(InputStream inputstream) {
+ is = inputstream;
+ }
+
+ public int read() throws IOException {
+ return (is.read());
+ }
+
+ public int read(byte[] b) throws IOException {
+ return (is.read(b));
+ }
+
+ public int read(byte[] b, int off, int len) throws IOException {
+ return (is.read(b, off, len));
+ }
+
+ public void close() throws IOException {
+ is.close();
+ }
+
+ public int available() throws IOException {
+ // Always say that we have 1 more byte in the
+ // buffer, even when we don't
+ int a = is.available();
+ if (a == 0) {
+ return (1);
+ } else {
+ return (a);
+ }
+ }
+}
diff --git a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/job/JobManager.java b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/job/JobManager.java
index be3daae..64085bc 100644
--- a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/job/JobManager.java
+++ b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/job/JobManager.java
@@ -393,7 +393,7 @@
run.setStartTime(System.currentTimeMillis());
run.setStartTimeZoneId(ZoneId.systemDefault().getId());
JobId jobId = run.getJobId();
- logJobCapacity(run, "running", Level.DEBUG);
+ logJobCapacity(run, "running", Level.INFO);
activeRunMap.put(jobId, run);
run.setStatus(JobStatus.RUNNING, null);
executeJobInternal(run);
@@ -437,7 +437,7 @@
private void releaseJobCapacity(JobRun jobRun) {
final JobSpecification job = jobRun.getJobSpecification();
jobCapacityController.release(job);
- logJobCapacity(jobRun, "released", Level.DEBUG);
+ logJobCapacity(jobRun, "released", Level.INFO);
}
private void logJobCapacity(JobRun jobRun, String jobStateDesc, Level lvl) {
@@ -451,7 +451,8 @@
return;
}
IReadOnlyClusterCapacity clusterCapacity = jobCapacityController.getClusterCapacity();
- LOGGER.log(lvl, "{} {}, memory={}, cpu={}, (new) cluster memory={}, cpu={}, currently running={}, queued={}",
+ LOGGER.log(lvl,
+ "{} {}, job memory={}, cpu={}, (new) cluster memory={}, cpu={}, currently running={}, queued={}",
jobStateDesc, jobRun.getJobId(), requiredMemory, requiredCPUs,
clusterCapacity.getAggregatedMemoryByteSize(), clusterCapacity.getAggregatedCores(),
getRunningJobsCount(), jobQueue.size());
diff --git a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/buffermanager/VariableDeletableTupleMemoryManager.java b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/buffermanager/VariableDeletableTupleMemoryManager.java
index b053cac..40ff2e4 100644
--- a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/buffermanager/VariableDeletableTupleMemoryManager.java
+++ b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/buffermanager/VariableDeletableTupleMemoryManager.java
@@ -161,9 +161,7 @@
policy.close();
frames.clear();
numTuples = 0;
- if (LOG.isDebugEnabled()) {
- LOG.debug("VariableTupleMemoryManager has reorganized " + statsReOrg + " times");
- }
+ LOG.trace("VariableTupleMemoryManager has reorganized {} times", statsReOrg);
statsReOrg = 0;
}
diff --git a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/group/HashSpillableTableFactory.java b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/group/HashSpillableTableFactory.java
index 1e5c121..2f2153b 100644
--- a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/group/HashSpillableTableFactory.java
+++ b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/group/HashSpillableTableFactory.java
@@ -131,10 +131,8 @@
final int numPartitions = getNumOfPartitions(inputDataBytesSize / ctx.getInitialFrameSize(), memoryBudget);
final int entriesPerPartition = (int) Math.ceil(1.0 * tableSize / numPartitions);
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("created hashtable, table size:" + tableSize + " file size:" + inputDataBytesSize
- + " #partitions:" + numPartitions);
- }
+ LOGGER.trace("created hashtable, table size:{} file size:{} #partitions:{}", tableSize, inputDataBytesSize,
+ numPartitions);
final ArrayTupleBuilder outputTupleBuilder = new ArrayTupleBuilder(outRecordDescriptor.getFields().length);
@@ -185,10 +183,8 @@
if (force || hashTableForTuplePointer.isGarbageCollectionNeeded()) {
int numberOfFramesReclaimed =
hashTableForTuplePointer.collectGarbage(bufferAccessor, tpcIntermediate);
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("Garbage Collection on Hash table is done. Deallocated frames:"
- + numberOfFramesReclaimed);
- }
+ LOGGER.trace("Garbage Collection on Hash table is done. Deallocated frames:{}",
+ numberOfFramesReclaimed);
return numberOfFramesReclaimed != -1;
}
return false;
diff --git a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/join/OptimizedHybridHashJoinOperatorDescriptor.java b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/join/OptimizedHybridHashJoinOperatorDescriptor.java
index 91e94dc..f3aebd5 100644
--- a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/join/OptimizedHybridHashJoinOperatorDescriptor.java
+++ b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/join/OptimizedHybridHashJoinOperatorDescriptor.java
@@ -513,12 +513,10 @@
stats.getLevel().set(level);
}
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("\n>>>Joining Partition Pairs (thread_id " + Thread.currentThread().getId()
- + ") (pid " + ") - (level " + level + ")" + " - BuildSize:\t" + buildPartSize
- + "\tProbeSize:\t" + probePartSize + " - MemForJoin " + (state.memForJoin)
- + " - LeftOuter is " + isLeftOuter);
- }
+ LOGGER.trace(
+ "\n>>>Joining Partition Pairs (thread_id {}) (pid ) - (level {}) - BuildSize:\t{}\tProbeSize:\t{} - MemForJoin {} - LeftOuter is {}",
+ Thread.currentThread().getId(), level, buildPartSize, probePartSize, state.memForJoin,
+ isLeftOuter);
// Calculate the expected hash table size for the both side.
long expectedHashTableSizeForBuildInFrame =
@@ -534,10 +532,8 @@
int tabSize = -1;
if (!forceRoleReversal && (isLeftOuter || (buildPartSize < probePartSize))) {
//Case 1.1 - InMemHJ (without Role-Reversal)
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("\t>>>Case 1.1 (IsLeftOuter || buildSize<probe) AND ApplyInMemHJ - [Level "
- + level + "]");
- }
+ LOGGER.trace("\t>>>Case 1.1 (IsLeftOuter || buildSize<probe) AND ApplyInMemHJ - [Level {}]",
+ level);
tabSize = buildSizeInTuple;
if (tabSize == 0) {
throw new HyracksDataException(
@@ -547,10 +543,9 @@
applyInMemHashJoin(buildKeys, probeKeys, tabSize, buildRd, probeRd, buildHpc, probeHpc,
buildSideReader, probeSideReader, probComp); // checked-confirmed
} else { //Case 1.2 - InMemHJ with Role Reversal
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("\t>>>Case 1.2. (NoIsLeftOuter || probe<build) AND ApplyInMemHJ"
- + "WITH RoleReversal - [Level " + level + "]");
- }
+ LOGGER.trace(
+ "\t>>>Case 1.2. (NoIsLeftOuter || probe<build) AND ApplyInMemHJWITH RoleReversal - [Level {}]",
+ level);
tabSize = probeSizeInTuple;
if (tabSize == 0) {
throw new HyracksDataException(
@@ -563,25 +558,18 @@
}
//Apply (Recursive) HHJ
else {
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("\t>>>Case 2. ApplyRecursiveHHJ - [Level " + level + "]");
- }
+ LOGGER.trace("\t>>>Case 2. ApplyRecursiveHHJ - [Level {}]", level);
if (!forceRoleReversal && (isLeftOuter || buildPartSize < probePartSize)) {
//Case 2.1 - Recursive HHJ (without Role-Reversal)
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug(
- "\t\t>>>Case 2.1 - RecursiveHHJ WITH (isLeftOuter || build<probe) - [Level "
- + level + "]");
- }
+ LOGGER.trace(
+ "\t\t>>>Case 2.1 - RecursiveHHJ WITH (isLeftOuter || build<probe) - [Level {}]",
+ level);
applyHybridHashJoin((int) buildPartSize, PROBE_REL, BUILD_REL, probeKeys, buildKeys,
probeRd, buildRd, probeHpc, buildHpc, probeSideReader, buildSideReader, level,
beforeMax, probComp);
} else { //Case 2.2 - Recursive HHJ (with Role-Reversal)
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug(
- "\t\t>>>Case 2.2. - RecursiveHHJ WITH RoleReversal - [Level " + level + "]");
- }
+ LOGGER.trace("\t\t>>>Case 2.2. - RecursiveHHJ WITH RoleReversal - [Level {}]", level);
applyHybridHashJoin((int) probePartSize, BUILD_REL, PROBE_REL, buildKeys, probeKeys,
buildRd, probeRd, buildHpc, probeHpc, buildSideReader, probeSideReader, level,
@@ -647,10 +635,9 @@
BitSet rPStatus = rHHj.getPartitionStatus();
if (!forceNLJ && (afterMax < (NLJ_SWITCH_THRESHOLD * beforeMax))) {
//Case 2.1.1 - Keep applying HHJ
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("\t\t>>>Case 2.1.1 - KEEP APPLYING RecursiveHHJ WITH "
- + "(isLeftOuter || build<probe) - [Level " + level + "]");
- }
+ LOGGER.trace(
+ "\t\t>>>Case 2.1.1 - KEEP APPLYING RecursiveHHJ WITH (isLeftOuter || build<probe) - [Level {}]",
+ level);
for (int rPid = rPStatus.nextSetBit(0); rPid >= 0; rPid = rPStatus.nextSetBit(rPid + 1)) {
RunFileReader rbrfw = rHHj.getBuildRFReader(rPid);
RunFileReader rprfw = rHHj.getProbeRFReader(rPid);
@@ -679,10 +666,9 @@
}
} else { //Case 2.1.2 - Switch to NLJ
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("\t\t>>>Case 2.1.2 - SWITCHED to NLJ RecursiveHHJ WITH "
- + "(isLeftOuter || build<probe) - [Level " + level + "]");
- }
+ LOGGER.trace(
+ "\t\t>>>Case 2.1.2 - SWITCHED to NLJ RecursiveHHJ WITH (isLeftOuter || build<probe) - [Level {}]",
+ level);
for (int rPid = rPStatus.nextSetBit(0); rPid >= 0; rPid = rPStatus.nextSetBit(rPid + 1)) {
RunFileReader rbrfw = rHHj.getBuildRFReader(rPid);
RunFileReader rprfw = rHHj.getProbeRFReader(rPid);
diff --git a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/sort/AbstractExternalSortRunMerger.java b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/sort/AbstractExternalSortRunMerger.java
index 1beaab8..e573c1c 100644
--- a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/sort/AbstractExternalSortRunMerger.java
+++ b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/sort/AbstractExternalSortRunMerger.java
@@ -117,19 +117,14 @@
if (currentGenerationRunAvailable.isEmpty()) {
numberOfPasses++;
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("generated runs:" + stop);
- }
+ LOGGER.trace("generated runs: {}", stop);
runs.subList(0, stop).clear();
currentGenerationRunAvailable.clear();
currentGenerationRunAvailable.set(0, runs.size());
stop = runs.size();
}
} else {
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("final runs: {}", stop);
- LOGGER.debug("number of passes: " + numberOfPasses);
- }
+ LOGGER.trace("final runs: {}, number of passes: {}", stop, numberOfPasses);
merge(finalWriter, partialRuns);
break;
}
@@ -206,9 +201,7 @@
}
} finally {
merger.close();
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("Output " + io + " frames");
- }
+ LOGGER.trace("Output {} frames", io);
}
}