[NO ISSUE][OTH] query/job logs changes

- user model changes: no
- storage format changes: no
- interface changes: no

Details:
- log job running/release at info.
- log and redact query statement parameters.
- reduce some logs to trace.

Ext-ref: MB-66663
Change-Id: I67ae0dee5ca79bb03b982842f0b1a3f99e705905
Reviewed-on: https://asterix-gerrit.ics.uci.edu/c/asterixdb/+/19731
Tested-by: Jenkins <jenkins@fulliautomatix.ics.uci.edu>
Reviewed-by: Ali Alsuliman <ali.al.solaiman@gmail.com>
Reviewed-by: Ian Maxon <imaxon@apache.org>
Reviewed-by: Michael Blow <mblow@apache.org>
diff --git a/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/NCQueryServiceServlet.java b/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/NCQueryServiceServlet.java
index 9fa479c..54fb852 100644
--- a/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/NCQueryServiceServlet.java
+++ b/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/NCQueryServiceServlet.java
@@ -97,13 +97,15 @@
             try {
                 responseMsg = (ExecuteStatementResponseMessage) responseFuture.get(timeout, TimeUnit.MILLISECONDS);
             } catch (InterruptedException e) {
-                cancelQuery(ncMb, ncCtx.getNodeId(), requestReference.getUuid(), param.getClientContextID(), e, false);
+                cancelQuery(ncMb, ncCtx.getNodeId(), requestReference.getUuid(), param.getClientContextID(), e, false,
+                        "interrupt");
                 throw e;
             } catch (TimeoutException exception) {
                 RuntimeDataException hde = new RuntimeDataException(ErrorCode.REQUEST_TIMEOUT);
                 hde.addSuppressed(exception);
                 // cancel query
-                cancelQuery(ncMb, ncCtx.getNodeId(), requestReference.getUuid(), param.getClientContextID(), hde, true);
+                cancelQuery(ncMb, ncCtx.getNodeId(), requestReference.getUuid(), param.getClientContextID(), hde, true,
+                        "timeout");
                 throw hde;
             }
             executionState.end();
@@ -156,7 +158,7 @@
     }
 
     private void cancelQuery(INCMessageBroker messageBroker, String nodeId, String uuid, String clientContextID,
-            Exception exception, boolean wait) {
+            Exception exception, boolean wait, String reason) {
         if (uuid == null && clientContextID == null) {
             return;
         }
@@ -165,8 +167,7 @@
             CancelQueryRequest cancelQueryMessage =
                     new CancelQueryRequest(nodeId, cancelQueryFuture.getFutureId(), uuid, clientContextID);
             // TODO(mblow): multicc -- need to send cancellation to the correct cc
-            LOGGER.info("Cancelling query with uuid:{}, clientContextID:{} due to {}", uuid, clientContextID,
-                    exception.getClass().getSimpleName());
+            LOGGER.info("Cancelling query with uuid:{}, clientContextID:{} due to {}", uuid, clientContextID, reason);
             messageBroker.sendMessageToPrimaryCC(cancelQueryMessage);
             if (wait) {
                 cancelQueryFuture.get(ExecuteStatementRequestMessage.DEFAULT_QUERY_CANCELLATION_WAIT_MILLIS,
diff --git a/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/QueryServiceRequestParameters.java b/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/QueryServiceRequestParameters.java
index 563f498..ed47832 100644
--- a/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/QueryServiceRequestParameters.java
+++ b/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/QueryServiceRequestParameters.java
@@ -19,8 +19,6 @@
 
 package org.apache.asterix.api.http.server;
 
-import static org.apache.asterix.utils.RedactionUtil.REDACTED_SENSITIVE_ENTRY_VALUE;
-
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -428,7 +426,12 @@
         object.put("source", source);
         if (statementParams != null) {
             for (Map.Entry<String, JsonNode> statementParam : statementParams.entrySet()) {
-                object.set('$' + statementParam.getKey(), REDACTED_SENSITIVE_ENTRY_VALUE);
+                try {
+                    String s = OBJECT_MAPPER.writeValueAsString(statementParam.getValue());
+                    object.put('$' + statementParam.getKey(), LogRedactionUtil.userData(s));
+                } catch (JsonProcessingException e) {
+                    // ignore
+                }
             }
         }
         return object;
diff --git a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/job/JobManager.java b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/job/JobManager.java
index d803c88..36c03d7 100644
--- a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/job/JobManager.java
+++ b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/job/JobManager.java
@@ -316,7 +316,7 @@
         run.setStartTime(System.currentTimeMillis());
         run.setStartTimeZoneId(ZoneId.systemDefault().getId());
         JobId jobId = run.getJobId();
-        logJobCapacity(run, "running", Level.DEBUG);
+        logJobCapacity(run, "running", Level.INFO);
         activeRunMap.put(jobId, run);
         run.setStatus(JobStatus.RUNNING, null);
         executeJobInternal(run);
@@ -360,7 +360,7 @@
     private void releaseJobCapacity(JobRun jobRun) {
         final JobSpecification job = jobRun.getJobSpecification();
         jobCapacityController.release(job);
-        logJobCapacity(jobRun, "released", Level.DEBUG);
+        logJobCapacity(jobRun, "released", Level.INFO);
     }
 
     private void logJobCapacity(JobRun jobRun, String jobStateDesc, Level lvl) {
@@ -374,7 +374,8 @@
             return;
         }
         IReadOnlyClusterCapacity clusterCapacity = jobCapacityController.getClusterCapacity();
-        LOGGER.log(lvl, "{} {}, memory={}, cpu={}, (new) cluster memory={}, cpu={}, currently running={}, queued={}",
+        LOGGER.log(lvl,
+                "{} {}, job memory={}, cpu={}, (new) cluster memory={}, cpu={}, currently running={}, queued={}",
                 jobStateDesc, jobRun.getJobId(), requiredMemory, requiredCPUs,
                 clusterCapacity.getAggregatedMemoryByteSize(), clusterCapacity.getAggregatedCores(),
                 getRunningJobsCount(), jobQueue.size());
diff --git a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/buffermanager/VariableDeletableTupleMemoryManager.java b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/buffermanager/VariableDeletableTupleMemoryManager.java
index b053cac..40ff2e4 100644
--- a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/buffermanager/VariableDeletableTupleMemoryManager.java
+++ b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/buffermanager/VariableDeletableTupleMemoryManager.java
@@ -161,9 +161,7 @@
         policy.close();
         frames.clear();
         numTuples = 0;
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("VariableTupleMemoryManager has reorganized " + statsReOrg + " times");
-        }
+        LOG.trace("VariableTupleMemoryManager has reorganized {} times", statsReOrg);
         statsReOrg = 0;
     }
 
diff --git a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/group/HashSpillableTableFactory.java b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/group/HashSpillableTableFactory.java
index 1e5c121..2f2153b 100644
--- a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/group/HashSpillableTableFactory.java
+++ b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/group/HashSpillableTableFactory.java
@@ -131,10 +131,8 @@
 
         final int numPartitions = getNumOfPartitions(inputDataBytesSize / ctx.getInitialFrameSize(), memoryBudget);
         final int entriesPerPartition = (int) Math.ceil(1.0 * tableSize / numPartitions);
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("created hashtable, table size:" + tableSize + " file size:" + inputDataBytesSize
-                    + "  #partitions:" + numPartitions);
-        }
+        LOGGER.trace("created hashtable, table size:{} file size:{}  #partitions:{}", tableSize, inputDataBytesSize,
+                numPartitions);
 
         final ArrayTupleBuilder outputTupleBuilder = new ArrayTupleBuilder(outRecordDescriptor.getFields().length);
 
@@ -185,10 +183,8 @@
                 if (force || hashTableForTuplePointer.isGarbageCollectionNeeded()) {
                     int numberOfFramesReclaimed =
                             hashTableForTuplePointer.collectGarbage(bufferAccessor, tpcIntermediate);
-                    if (LOGGER.isDebugEnabled()) {
-                        LOGGER.debug("Garbage Collection on Hash table is done. Deallocated frames:"
-                                + numberOfFramesReclaimed);
-                    }
+                    LOGGER.trace("Garbage Collection on Hash table is done. Deallocated frames:{}",
+                            numberOfFramesReclaimed);
                     return numberOfFramesReclaimed != -1;
                 }
                 return false;
diff --git a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/join/OptimizedHybridHashJoinOperatorDescriptor.java b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/join/OptimizedHybridHashJoinOperatorDescriptor.java
index 0f31491..6fc9124 100644
--- a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/join/OptimizedHybridHashJoinOperatorDescriptor.java
+++ b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/join/OptimizedHybridHashJoinOperatorDescriptor.java
@@ -490,12 +490,10 @@
                     long probePartSize = (long) Math.ceil((double) probeSideReader.getFileSize() / (double) frameSize);
                     int beforeMax = Math.max(buildSizeInTuple, probeSizeInTuple);
 
-                    if (LOGGER.isDebugEnabled()) {
-                        LOGGER.debug("\n>>>Joining Partition Pairs (thread_id " + Thread.currentThread().getId()
-                                + ") (pid " + ") - (level " + level + ")" + " - BuildSize:\t" + buildPartSize
-                                + "\tProbeSize:\t" + probePartSize + " - MemForJoin " + (state.memForJoin)
-                                + "  - LeftOuter is " + isLeftOuter);
-                    }
+                    LOGGER.trace(
+                            "\n>>>Joining Partition Pairs (thread_id {}) (pid ) - (level {}) - BuildSize:\t{}\tProbeSize:\t{} - MemForJoin {}  - LeftOuter is {}",
+                            Thread.currentThread().getId(), level, buildPartSize, probePartSize, state.memForJoin,
+                            isLeftOuter);
 
                     // Calculate the expected hash table size for the both side.
                     long expectedHashTableSizeForBuildInFrame =
@@ -511,10 +509,8 @@
                         int tabSize = -1;
                         if (!forceRoleReversal && (isLeftOuter || (buildPartSize < probePartSize))) {
                             //Case 1.1 - InMemHJ (without Role-Reversal)
-                            if (LOGGER.isDebugEnabled()) {
-                                LOGGER.debug("\t>>>Case 1.1 (IsLeftOuter || buildSize<probe) AND ApplyInMemHJ - [Level "
-                                        + level + "]");
-                            }
+                            LOGGER.trace("\t>>>Case 1.1 (IsLeftOuter || buildSize<probe) AND ApplyInMemHJ - [Level {}]",
+                                    level);
                             tabSize = buildSizeInTuple;
                             if (tabSize == 0) {
                                 throw new HyracksDataException(
@@ -524,10 +520,9 @@
                             applyInMemHashJoin(buildKeys, probeKeys, tabSize, buildRd, probeRd, buildHpc, probeHpc,
                                     buildSideReader, probeSideReader, probComp); // checked-confirmed
                         } else { //Case 1.2 - InMemHJ with Role Reversal
-                            if (LOGGER.isDebugEnabled()) {
-                                LOGGER.debug("\t>>>Case 1.2. (NoIsLeftOuter || probe<build) AND ApplyInMemHJ"
-                                        + "WITH RoleReversal - [Level " + level + "]");
-                            }
+                            LOGGER.trace(
+                                    "\t>>>Case 1.2. (NoIsLeftOuter || probe<build) AND ApplyInMemHJWITH RoleReversal - [Level {}]",
+                                    level);
                             tabSize = probeSizeInTuple;
                             if (tabSize == 0) {
                                 throw new HyracksDataException(
@@ -540,25 +535,18 @@
                     }
                     //Apply (Recursive) HHJ
                     else {
-                        if (LOGGER.isDebugEnabled()) {
-                            LOGGER.debug("\t>>>Case 2. ApplyRecursiveHHJ - [Level " + level + "]");
-                        }
+                        LOGGER.trace("\t>>>Case 2. ApplyRecursiveHHJ - [Level {}]", level);
                         if (!forceRoleReversal && (isLeftOuter || buildPartSize < probePartSize)) {
                             //Case 2.1 - Recursive HHJ (without Role-Reversal)
-                            if (LOGGER.isDebugEnabled()) {
-                                LOGGER.debug(
-                                        "\t\t>>>Case 2.1 - RecursiveHHJ WITH (isLeftOuter || build<probe) - [Level "
-                                                + level + "]");
-                            }
+                            LOGGER.trace(
+                                    "\t\t>>>Case 2.1 - RecursiveHHJ WITH (isLeftOuter || build<probe) - [Level {}]",
+                                    level);
                             applyHybridHashJoin((int) buildPartSize, PROBE_REL, BUILD_REL, probeKeys, buildKeys,
                                     probeRd, buildRd, probeHpc, buildHpc, probeSideReader, buildSideReader, level,
                                     beforeMax, probComp);
 
                         } else { //Case 2.2 - Recursive HHJ (with Role-Reversal)
-                            if (LOGGER.isDebugEnabled()) {
-                                LOGGER.debug(
-                                        "\t\t>>>Case 2.2. - RecursiveHHJ WITH RoleReversal - [Level " + level + "]");
-                            }
+                            LOGGER.trace("\t\t>>>Case 2.2. - RecursiveHHJ WITH RoleReversal - [Level {}]", level);
 
                             applyHybridHashJoin((int) probePartSize, BUILD_REL, PROBE_REL, buildKeys, probeKeys,
                                     buildRd, probeRd, buildHpc, probeHpc, buildSideReader, probeSideReader, level,
@@ -624,10 +612,9 @@
                         BitSet rPStatus = rHHj.getPartitionStatus();
                         if (!forceNLJ && (afterMax < (NLJ_SWITCH_THRESHOLD * beforeMax))) {
                             //Case 2.1.1 - Keep applying HHJ
-                            if (LOGGER.isDebugEnabled()) {
-                                LOGGER.debug("\t\t>>>Case 2.1.1 - KEEP APPLYING RecursiveHHJ WITH "
-                                        + "(isLeftOuter || build<probe) - [Level " + level + "]");
-                            }
+                            LOGGER.trace(
+                                    "\t\t>>>Case 2.1.1 - KEEP APPLYING RecursiveHHJ WITH (isLeftOuter || build<probe) - [Level {}]",
+                                    level);
                             for (int rPid = rPStatus.nextSetBit(0); rPid >= 0; rPid = rPStatus.nextSetBit(rPid + 1)) {
                                 RunFileReader rbrfw = rHHj.getBuildRFReader(rPid);
                                 RunFileReader rprfw = rHHj.getProbeRFReader(rPid);
@@ -656,10 +643,9 @@
                             }
 
                         } else { //Case 2.1.2 - Switch to NLJ
-                            if (LOGGER.isDebugEnabled()) {
-                                LOGGER.debug("\t\t>>>Case 2.1.2 - SWITCHED to NLJ RecursiveHHJ WITH "
-                                        + "(isLeftOuter || build<probe) - [Level " + level + "]");
-                            }
+                            LOGGER.trace(
+                                    "\t\t>>>Case 2.1.2 - SWITCHED to NLJ RecursiveHHJ WITH (isLeftOuter || build<probe) - [Level {}]",
+                                    level);
                             for (int rPid = rPStatus.nextSetBit(0); rPid >= 0; rPid = rPStatus.nextSetBit(rPid + 1)) {
                                 RunFileReader rbrfw = rHHj.getBuildRFReader(rPid);
                                 RunFileReader rprfw = rHHj.getProbeRFReader(rPid);
diff --git a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/sort/AbstractExternalSortRunMerger.java b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/sort/AbstractExternalSortRunMerger.java
index 1beaab8..e573c1c 100644
--- a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/sort/AbstractExternalSortRunMerger.java
+++ b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/sort/AbstractExternalSortRunMerger.java
@@ -117,19 +117,14 @@
 
                     if (currentGenerationRunAvailable.isEmpty()) {
                         numberOfPasses++;
-                        if (LOGGER.isDebugEnabled()) {
-                            LOGGER.debug("generated runs:" + stop);
-                        }
+                        LOGGER.trace("generated runs: {}", stop);
                         runs.subList(0, stop).clear();
                         currentGenerationRunAvailable.clear();
                         currentGenerationRunAvailable.set(0, runs.size());
                         stop = runs.size();
                     }
                 } else {
-                    if (LOGGER.isDebugEnabled()) {
-                        LOGGER.debug("final runs: {}", stop);
-                        LOGGER.debug("number of passes: " + numberOfPasses);
-                    }
+                    LOGGER.trace("final runs: {}, number of passes: {}", stop, numberOfPasses);
                     merge(finalWriter, partialRuns);
                     break;
                 }
@@ -206,9 +201,7 @@
             }
         } finally {
             merger.close();
-            if (LOGGER.isDebugEnabled()) {
-                LOGGER.debug("Output " + io + " frames");
-            }
+            LOGGER.trace("Output {} frames", io);
         }
     }