Merge "Merge branch 'gerrit/trinity' into 'master'"
diff --git a/asterixdb/asterix-active/src/main/java/org/apache/asterix/active/ActiveManager.java b/asterixdb/asterix-active/src/main/java/org/apache/asterix/active/ActiveManager.java
index 636279c..bafaea4 100644
--- a/asterixdb/asterix-active/src/main/java/org/apache/asterix/active/ActiveManager.java
+++ b/asterixdb/asterix-active/src/main/java/org/apache/asterix/active/ActiveManager.java
@@ -104,15 +104,17 @@
     }
 
     public void handle(ActiveManagerMessage message) throws HyracksDataException {
-        LOGGER.debug("NC handling {}({})({})", message.getKind(), message.getRuntimeId(), message.getDesc());
         switch (message.getKind()) {
             case STOP_ACTIVITY:
+                logHandle(Level.DEBUG, message);
                 stopRuntime(message);
                 break;
             case REQUEST_STATS:
+                logHandle(Level.TRACE, message);
                 requestStats((ActiveStatsRequestMessage) message);
                 break;
             case GENERIC_EVENT:
+                logHandle(Level.DEBUG, message);
                 deliverGenericEvent(message);
                 break;
             default:
@@ -120,6 +122,10 @@
         }
     }
 
+    private void logHandle(Level level, ActiveManagerMessage message) {
+        LOGGER.log(level, "NC handling {}({})({})", message.getKind(), message.getRuntimeId(), message.getDesc());
+    }
+
     private void deliverGenericEvent(ActiveManagerMessage message) throws HyracksDataException {
         try {
             ActiveRuntimeId runtimeId = message.getRuntimeId();
@@ -140,8 +146,6 @@
             IActiveRuntime runtime = runtimes.get(runtimeId);
             long reqId = message.getReqId();
             if (runtime == null) {
-                LOGGER.warn("Request stats of a runtime that is not registered {}; sending failure response",
-                        runtimeId);
                 // Send a failure message
                 ((NodeControllerService) serviceCtx.getControllerService()).sendApplicationMessageToCC(
                         message.getCcId(),
@@ -151,7 +155,6 @@
                 return;
             }
             String stats = runtime.getStats();
-            LOGGER.debug("Sending stats response for {} ", runtimeId);
             ActiveStatsResponse response = new ActiveStatsResponse(reqId, stats, null);
             ((NodeControllerService) serviceCtx.getControllerService()).sendRealTimeApplicationMessageToCC(
                     message.getCcId(), JavaSerializationUtils.serialize(response), null);
diff --git a/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/QueryServiceRequestParameters.java b/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/QueryServiceRequestParameters.java
index 20d79d5..563f498 100644
--- a/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/QueryServiceRequestParameters.java
+++ b/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/QueryServiceRequestParameters.java
@@ -127,6 +127,7 @@
     private String path;
     private String statement;
     private String clientContextID;
+    private String requestId;
     private String dataverse;
     private String source;
     private ClientType clientType = ClientType.ASTERIX;
@@ -389,6 +390,14 @@
         return maxWarnings;
     }
 
+    public void setRequestId(String requestId) {
+        this.requestId = requestId;
+    }
+
+    public String getRequestId() {
+        return requestId;
+    }
+
     public ObjectNode asJson() {
         ObjectNode object = OBJECT_MAPPER.createObjectNode();
         object.put("host", host);
diff --git a/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/QueryServiceServlet.java b/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/QueryServiceServlet.java
index 93531bd..4798f9e 100644
--- a/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/QueryServiceServlet.java
+++ b/asterixdb/asterix-app/src/main/java/org/apache/asterix/api/http/server/QueryServiceServlet.java
@@ -90,6 +90,7 @@
 import org.apache.hyracks.http.api.IServletRequest;
 import org.apache.hyracks.http.api.IServletResponse;
 import org.apache.hyracks.http.server.utils.HttpUtil;
+import org.apache.logging.log4j.Level;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
@@ -269,6 +270,7 @@
         ResponsePrinter responsePrinter = new ResponsePrinter(sessionOutput);
         ResultDelivery delivery = ResultDelivery.IMMEDIATE;
         QueryServiceRequestParameters param = newQueryRequestParameters();
+        param.setRequestId(requestRef.getUuid());
         RequestExecutionState executionState = newRequestExecutionState();
         try {
             // buffer the output until we are ready to set the status of the response message correctly
@@ -437,12 +439,13 @@
                     executionState.setStatus(ResultStatus.FATAL, HttpResponseStatus.BAD_REQUEST);
                     return true;
                 case REQUEST_TIMEOUT:
-                    LOGGER.info(() -> "handleException: request execution timed out: " + param.toString());
+                    logException(Level.INFO, "request execution timed out", param.getRequestId(),
+                            param.getClientContextID());
                     executionState.setStatus(ResultStatus.TIMEOUT, HttpResponseStatus.OK);
                     return true;
                 case REJECT_NODE_UNREGISTERED:
                 case REJECT_BAD_CLUSTER_STATE:
-                    LOGGER.warn(() -> "handleException: " + ex.getMessage() + ": " + param.toString());
+                    logException(Level.WARN, ex.getMessage(), param.getRequestId(), param.getClientContextID());
                     executionState.setStatus(ResultStatus.FATAL, HttpResponseStatus.SERVICE_UNAVAILABLE);
                     return true;
                 default:
@@ -462,9 +465,9 @@
             QueryServiceRequestParameters param, IServletResponse response) {
         if (t instanceof org.apache.asterix.lang.sqlpp.parser.TokenMgrError || t instanceof AlgebricksException) {
             if (LOGGER.isDebugEnabled()) {
-                LOGGER.debug("handleException: {}: {}", t.getMessage(), param.toString(), t);
+                logException(Level.DEBUG, t.getMessage(), param.getRequestId(), param.getClientContextID(), t);
             } else {
-                LOGGER.info(() -> "handleException: " + t.getMessage() + ": " + param.toString());
+                logException(Level.INFO, t.getMessage(), param.getRequestId(), param.getClientContextID());
             }
             executionState.setStatus(ResultStatus.FATAL, HttpResponseStatus.BAD_REQUEST);
             return;
@@ -476,7 +479,7 @@
                 return;
             }
         }
-        LOGGER.warn(() -> "handleException: unexpected exception: " + param.toString(), t);
+        logException(Level.WARN, "unexpected exception", param.getRequestId(), param.getClientContextID(), t);
         executionState.setStatus(ResultStatus.FATAL, HttpResponseStatus.INTERNAL_SERVER_ERROR);
     }
 
@@ -548,4 +551,12 @@
     protected String getApplicationVersion() {
         return ApplicationConfigurator.getApplicationVersion(appCtx.getBuildProperties());
     }
+
+    private void logException(Level lvl, String msg, String clientCtxId, String uuid) {
+        LOGGER.log(lvl, "handleException: {}: uuid={}, clientContextID={}", msg, uuid, clientCtxId);
+    }
+
+    private void logException(Level lvl, String msg, String clientCtxId, String uuid, Throwable t) {
+        LOGGER.log(lvl, "handleException: {}: uuid={}, clientContextID={}", msg, uuid, clientCtxId, t);
+    }
 }
diff --git a/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/active/ActiveEntityEventsListener.java b/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/active/ActiveEntityEventsListener.java
index 697cf21..0dc761b 100644
--- a/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/active/ActiveEntityEventsListener.java
+++ b/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/active/ActiveEntityEventsListener.java
@@ -501,9 +501,7 @@
         ICCMessageBroker messageBroker = (ICCMessageBroker) applicationCtx.getServiceContext().getMessageBroker();
         AlgebricksAbsolutePartitionConstraint runtimeLocations = getLocations();
         int partition = 0;
-        if (LOGGER.isInfoEnabled()) {
-            LOGGER.log(Level.INFO, "Sending stop messages to " + runtimeLocations);
-        }
+        LOGGER.log(Level.INFO, "sending stop messages to {}", runtimeLocations);
         for (String location : runtimeLocations.getLocations()) {
             ActiveRuntimeId runtimeId = getActiveRuntimeId(partition++);
             messageBroker.sendApplicationMessageToNC(new ActiveManagerMessage(ActiveManagerMessage.Kind.STOP_ACTIVITY,
@@ -566,14 +564,10 @@
         WaitForStateSubscriber subscriber;
         Future<Void> suspendTask;
         synchronized (this) {
-            if (LOGGER.isEnabled(level)) {
-                LOGGER.log(level, "suspending entity " + entityId);
-                LOGGER.log(level, "Waiting for ongoing activities");
-            }
+            LOGGER.log(level, "{} suspending entity {}", jobId, entityId);
+            LOGGER.log(level, "{} waiting for ongoing activities", jobId);
             waitForNonTransitionState();
-            if (LOGGER.isEnabled(level)) {
-                LOGGER.log(level, "Proceeding with suspension. Current state is " + state);
-            }
+            LOGGER.log(level, "{} proceeding with suspension. current state is {}", jobId, state);
             if (state == ActivityState.STOPPED) {
                 suspended = true;
                 return;
@@ -594,12 +588,12 @@
                         doSuspend(metadataProvider);
                         return null;
                     });
-            LOGGER.log(level, "Suspension task has been submitted");
+            LOGGER.log(level, "{} suspension task has been submitted", jobId);
         }
         try {
-            LOGGER.log(level, "Waiting for suspension task to complete");
+            LOGGER.log(level, "{} waiting for suspension task to complete", jobId);
             suspendTask.get();
-            LOGGER.log(level, "waiting for state to become SUSPENDED or TEMPORARILY_FAILED");
+            LOGGER.log(level, "{} waiting for state to become SUSPENDED or TEMPORARILY_FAILED", jobId);
             subscriber.sync();
             suspended = true;
         } catch (Exception e) {
diff --git a/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/active/ActiveNotificationHandler.java b/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/active/ActiveNotificationHandler.java
index 4d654d5..5fe082b 100644
--- a/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/active/ActiveNotificationHandler.java
+++ b/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/active/ActiveNotificationHandler.java
@@ -95,7 +95,7 @@
         Object property = jobSpecification.getProperty(ACTIVE_ENTITY_PROPERTY_NAME);
         if (!(property instanceof EntityId)) {
             if (property != null) {
-                LOGGER.debug("{} is not an active job. job property={}", jobId, property);
+                LOGGER.debug("{} is not an ingestion job. job property={}", jobId, property);
             }
             return;
         }
diff --git a/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/nc/RecoveryManager.java b/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/nc/RecoveryManager.java
index a7d30a3..77a1e01 100644
--- a/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/nc/RecoveryManager.java
+++ b/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/nc/RecoveryManager.java
@@ -637,9 +637,6 @@
         boolean infoEnabled = LOGGER.isInfoEnabled();
         // check if the transaction actually wrote some logs.
         if (firstLSN == TransactionManagementConstants.LogManagerConstants.TERMINAL_LSN || firstLSN > lastLSN) {
-            if (infoEnabled) {
-                LOGGER.info("no need to rollback as there were no operations by " + txnContext.getTxnId());
-            }
             return;
         }
         if (infoEnabled) {
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.1.query.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.1.query.sqlpp
new file mode 100644
index 0000000..f69df6e
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.1.query.sqlpp
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*
+ * Description: This test case is to verify the fix for ASTERIXDB-3418
+ */
+
+select value 0.0=-0.0
\ No newline at end of file
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.2.query.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.2.query.sqlpp
new file mode 100644
index 0000000..0080dcf
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.2.query.sqlpp
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*
+ * Description: This test case is to verify the fix for ASTERIXDB-3418
+ */
+
+select x.a, count(*) from [{"a":0.0, "b":1}, {"a":-0.0, "b":2}] x group by x.a;
\ No newline at end of file
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.3.query.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.3.query.sqlpp
new file mode 100644
index 0000000..f460366
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.3.query.sqlpp
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*
+ * Description: This test case is to verify the fix for ASTERIXDB-3418
+ */
+
+select distinct(x.a) from [{"a":0.0, "b":1}, {"a":-0.0, "b":2}] x;
\ No newline at end of file
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.4.query.sqlpp b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.4.query.sqlpp
new file mode 100644
index 0000000..e5e3bb7
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/queries_sqlpp/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.4.query.sqlpp
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*
+ * Description: This test case is to verify the fix for ASTERIXDB-3418
+ */
+
+select value -0.0<0.0
\ No newline at end of file
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/results/comparison/arrays/arrays.020.adm b/asterixdb/asterix-app/src/test/resources/runtimets/results/comparison/arrays/arrays.020.adm
index 7b82038..dc424d3 100644
--- a/asterixdb/asterix-app/src/test/resources/runtimets/results/comparison/arrays/arrays.020.adm
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/results/comparison/arrays/arrays.020.adm
@@ -1 +1 @@
-{ "t1": { "c": "[0,1] = [double('0'), float('1')]", "r": true }, "t2": { "c": "[-0, -1] = [float('-0'), -1]", "r": false }, "t3": { "c": "[double('INF')] > [0]", "r": true }, "t4": { "c": "[double('-INF')] < [0]", "r": true }, "t5": { "c": "[double('INF')] > [-0]", "r": true }, "t6": { "c": "[double('-INF')] < [-0]", "r": true }, "t7": { "c": "[double('INF')] > [double('-0')]", "r": true }, "t8": { "c": "[double('-INF')] < [double('-0')]", "r": true }, "t9": { "c": "[double('NaN')] > [0]", "r": true }, "t10": { "c": "[double('NaN')] < [0]", "r": false }, "t11": { "c": "[double('NaN')] > [-0]", "r": true }, "t12": { "c": "[double('NaN')] < [-0]", "r": false }, "t13": { "c": "[double('NaN')] > [double('-0')]", "r": true }, "t14": { "c": "[double('NaN')] < [double('-0')]", "r": false }, "t15": { "c": "[double('-INF')] < [double('INF')]", "r": true }, "t16": { "c": "[double('INF')] > [double('NaN')]", "r": false }, "t17": { "c": "[double('-INF')] < [double('NaN')]", "r": true } }
\ No newline at end of file
+{ "t1": { "c": "[0,1] = [double('0'), float('1')]", "r": true }, "t2": { "c": "[-0, -1] = [float('-0'), -1]", "r": true }, "t3": { "c": "[double('INF')] > [0]", "r": true }, "t4": { "c": "[double('-INF')] < [0]", "r": true }, "t5": { "c": "[double('INF')] > [-0]", "r": true }, "t6": { "c": "[double('-INF')] < [-0]", "r": true }, "t7": { "c": "[double('INF')] > [double('-0')]", "r": true }, "t8": { "c": "[double('-INF')] < [double('-0')]", "r": true }, "t9": { "c": "[double('NaN')] > [0]", "r": true }, "t10": { "c": "[double('NaN')] < [0]", "r": false }, "t11": { "c": "[double('NaN')] > [-0]", "r": true }, "t12": { "c": "[double('NaN')] < [-0]", "r": false }, "t13": { "c": "[double('NaN')] > [double('-0')]", "r": true }, "t14": { "c": "[double('NaN')] < [double('-0')]", "r": false }, "t15": { "c": "[double('-INF')] < [double('INF')]", "r": true }, "t16": { "c": "[double('INF')] > [double('NaN')]", "r": false }, "t17": { "c": "[double('-INF')] < [double('NaN')]", "r": true } }
\ No newline at end of file
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/results/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.1.adm b/asterixdb/asterix-app/src/test/resources/runtimets/results/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.1.adm
new file mode 100644
index 0000000..f32a580
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/results/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.1.adm
@@ -0,0 +1 @@
+true
\ No newline at end of file
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/results/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.2.adm b/asterixdb/asterix-app/src/test/resources/runtimets/results/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.2.adm
new file mode 100644
index 0000000..6a6d86c
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/results/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.2.adm
@@ -0,0 +1 @@
+{ "a": -0.0, "$1": 2 }
\ No newline at end of file
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/results/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.3.adm b/asterixdb/asterix-app/src/test/resources/runtimets/results/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.3.adm
new file mode 100644
index 0000000..767521e
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/results/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.3.adm
@@ -0,0 +1 @@
+{ "a": -0.0 }
\ No newline at end of file
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/results/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.4.adm b/asterixdb/asterix-app/src/test/resources/runtimets/results/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.4.adm
new file mode 100644
index 0000000..02e4a84
--- /dev/null
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/results/misc/query-ASTERIXDB-3418/query-ASTERIXDB-3418.4.adm
@@ -0,0 +1 @@
+false
\ No newline at end of file
diff --git a/asterixdb/asterix-app/src/test/resources/runtimets/sqlpp_queries.xml b/asterixdb/asterix-app/src/test/resources/runtimets/sqlpp_queries.xml
index d9d57a4..6f5eedd 100644
--- a/asterixdb/asterix-app/src/test/resources/runtimets/sqlpp_queries.xml
+++ b/asterixdb/asterix-app/src/test/resources/runtimets/sqlpp_queries.xml
@@ -7377,6 +7377,11 @@
       </compilation-unit>
     </test-case>
     <test-case FilePath="misc">
+      <compilation-unit name="query-ASTERIXDB-3418">
+        <output-dir compare="Text">query-ASTERIXDB-3418</output-dir>
+      </compilation-unit>
+    </test-case>
+    <test-case FilePath="misc">
       <compilation-unit name="query-ASTERIXDB-3403">
         <output-dir compare="Text">query-ASTERIXDB-3403</output-dir>
       </compilation-unit>
diff --git a/asterixdb/asterix-om/src/main/java/org/apache/asterix/dataflow/data/nontagged/comparators/ComparatorUtil.java b/asterixdb/asterix-om/src/main/java/org/apache/asterix/dataflow/data/nontagged/comparators/ComparatorUtil.java
index 4db23b2..0eb30f8 100644
--- a/asterixdb/asterix-om/src/main/java/org/apache/asterix/dataflow/data/nontagged/comparators/ComparatorUtil.java
+++ b/asterixdb/asterix-om/src/main/java/org/apache/asterix/dataflow/data/nontagged/comparators/ComparatorUtil.java
@@ -84,9 +84,9 @@
     // start points to the value; checking left and right are compatible and numbers has to be done before calling this
     static int compareNumbers(ATypeTag lTag, byte[] l, int lStart, ATypeTag rTag, byte[] r, int rStart) {
         if (lTag == DOUBLE || rTag == DOUBLE) {
-            return Double.compare(getDoubleValue(lTag, l, lStart), getDoubleValue(rTag, r, rStart));
+            return compareDoubles(getDoubleValue(lTag, l, lStart), getDoubleValue(rTag, r, rStart));
         } else if (lTag == FLOAT || rTag == FLOAT) {
-            return Float.compare((float) getDoubleValue(lTag, l, lStart), (float) getDoubleValue(rTag, r, rStart));
+            return compareFloats((float) getDoubleValue(lTag, l, lStart), (float) getDoubleValue(rTag, r, rStart));
         } else if (lTag == BIGINT || rTag == BIGINT) {
             return Long.compare(getLongValue(lTag, l, lStart), getLongValue(rTag, r, rStart));
         } else if (lTag == INTEGER || lTag == SMALLINT || lTag == TINYINT) {
@@ -103,10 +103,10 @@
         byte[] leftBytes = left.getByteArray();
         int start = left.getStartOffset();
         if (leftTag == DOUBLE || rightTag == DOUBLE) {
-            return asResult(Double.compare(getDoubleValue(leftTag, leftBytes, start), getConstantDouble(right)));
+            return asResult(compareDoubles(getDoubleValue(leftTag, leftBytes, start), getConstantDouble(right)));
         } else if (leftTag == FLOAT || rightTag == FLOAT) {
             return asResult(
-                    Float.compare((float) getDoubleValue(leftTag, leftBytes, start), (float) getConstantDouble(right)));
+                    compareFloats((float) getDoubleValue(leftTag, leftBytes, start), (float) getConstantDouble(right)));
         } else if (leftTag == BIGINT || rightTag == BIGINT) {
             return asResult(Long.compare(getLongValue(leftTag, leftBytes, start), getConstantLong(right)));
         } else if (leftTag == INTEGER || leftTag == SMALLINT || leftTag == TINYINT) {
@@ -122,10 +122,10 @@
         ATypeTag leftTag = leftConstant.getType().getTypeTag();
         ATypeTag rightTag = rightConstant.getType().getTypeTag();
         if (leftTag == DOUBLE || rightTag == DOUBLE) {
-            return asResult(Double.compare(getConstantDouble(leftConstant), getConstantDouble(rightConstant)));
+            return asResult(compareDoubles(getConstantDouble(leftConstant), getConstantDouble(rightConstant)));
         } else if (leftTag == FLOAT || rightTag == FLOAT) {
             return asResult(
-                    Float.compare((float) getConstantDouble(leftConstant), (float) getConstantDouble(rightConstant)));
+                    compareFloats((float) getConstantDouble(leftConstant), (float) getConstantDouble(rightConstant)));
         } else if (leftTag == BIGINT || rightTag == BIGINT) {
             return asResult(Long.compare(getConstantLong(leftConstant), getConstantLong(rightConstant)));
         } else if (leftTag == INTEGER || leftTag == SMALLINT || leftTag == TINYINT) {
@@ -208,4 +208,12 @@
                 throw new UnsupportedOperationException();
         }
     }
+
+    public static int compareDoubles(double d1, double d2) {
+        return d1 == d2 ? 0 : Double.compare(d1, d2);
+    }
+
+    public static int compareFloats(float f1, float f2) {
+        return f1 == f2 ? 0 : Float.compare(f1, f2);
+    }
 }
diff --git a/asterixdb/asterix-runtime/src/main/java/org/apache/asterix/runtime/evaluators/functions/SleepDescriptor.java b/asterixdb/asterix-runtime/src/main/java/org/apache/asterix/runtime/evaluators/functions/SleepDescriptor.java
index ac87f7e..ef348fe 100644
--- a/asterixdb/asterix-runtime/src/main/java/org/apache/asterix/runtime/evaluators/functions/SleepDescriptor.java
+++ b/asterixdb/asterix-runtime/src/main/java/org/apache/asterix/runtime/evaluators/functions/SleepDescriptor.java
@@ -64,16 +64,16 @@
                         final long time = ATypeHierarchy.getLongValue(getIdentifier().getName(), 1, bytes, offset);
 
                         try {
-                            if (LOGGER.isInfoEnabled()) {
-                                LOGGER.log(Level.INFO,
+                            if (LOGGER.isTraceEnabled()) {
+                                LOGGER.log(Level.TRACE,
                                         ctx.getTaskContext().getTaskAttemptId() + " sleeping for " + time + " ms");
                             }
                             Thread.sleep(time);
                         } catch (InterruptedException e) {
                             Thread.currentThread().interrupt();
                         } finally {
-                            if (LOGGER.isInfoEnabled()) {
-                                LOGGER.log(Level.INFO,
+                            if (LOGGER.isTraceEnabled()) {
+                                LOGGER.log(Level.TRACE,
                                         ctx.getTaskContext().getTaskAttemptId() + " done sleeping for " + time + " ms");
                             }
                         }
diff --git a/asterixdb/asterix-runtime/src/main/java/org/apache/asterix/runtime/job/resource/JobCapacityController.java b/asterixdb/asterix-runtime/src/main/java/org/apache/asterix/runtime/job/resource/JobCapacityController.java
index b123a5e..ae903d1 100644
--- a/asterixdb/asterix-runtime/src/main/java/org/apache/asterix/runtime/job/resource/JobCapacityController.java
+++ b/asterixdb/asterix-runtime/src/main/java/org/apache/asterix/runtime/job/resource/JobCapacityController.java
@@ -77,6 +77,11 @@
         ensureMaxCapacity();
     }
 
+    @Override
+    public IReadOnlyClusterCapacity getClusterCapacity() {
+        return resourceManager.getCurrentCapacity();
+    }
+
     private void ensureMaxCapacity() {
         final IClusterCapacity currentCapacity = resourceManager.getCurrentCapacity();
         final IReadOnlyClusterCapacity maximumCapacity = resourceManager.getMaximumCapacity();
diff --git a/hyracks-fullstack/algebricks/algebricks-runtime/src/main/java/org/apache/hyracks/algebricks/runtime/operators/base/AbstractOneInputOneOutputOneFramePushRuntime.java b/hyracks-fullstack/algebricks/algebricks-runtime/src/main/java/org/apache/hyracks/algebricks/runtime/operators/base/AbstractOneInputOneOutputOneFramePushRuntime.java
index 9f4541f5..0c74260 100644
--- a/hyracks-fullstack/algebricks/algebricks-runtime/src/main/java/org/apache/hyracks/algebricks/runtime/operators/base/AbstractOneInputOneOutputOneFramePushRuntime.java
+++ b/hyracks-fullstack/algebricks/algebricks-runtime/src/main/java/org/apache/hyracks/algebricks/runtime/operators/base/AbstractOneInputOneOutputOneFramePushRuntime.java
@@ -74,7 +74,7 @@
     }
 
     protected void flushIfNotFailed() throws HyracksDataException {
-        if (!failed && appender.getTupleCount() > 0) {
+        if (!failed && appender != null && appender.getTupleCount() > 0) {
             flushAndReset();
         }
     }
diff --git a/hyracks-fullstack/hyracks/hyracks-api/src/main/java/org/apache/hyracks/api/job/resource/DefaultJobCapacityController.java b/hyracks-fullstack/hyracks/hyracks-api/src/main/java/org/apache/hyracks/api/job/resource/DefaultJobCapacityController.java
index 9e38a20..b18bcb1 100644
--- a/hyracks-fullstack/hyracks/hyracks-api/src/main/java/org/apache/hyracks/api/job/resource/DefaultJobCapacityController.java
+++ b/hyracks-fullstack/hyracks/hyracks-api/src/main/java/org/apache/hyracks/api/job/resource/DefaultJobCapacityController.java
@@ -24,6 +24,11 @@
 public class DefaultJobCapacityController implements IJobCapacityController {
 
     public static final DefaultJobCapacityController INSTANCE = new DefaultJobCapacityController();
+    private static final IClusterCapacity CAPACITY = new ClusterCapacity();
+    static {
+        CAPACITY.setAggregatedCores(Integer.MAX_VALUE);
+        CAPACITY.setAggregatedMemoryByteSize(Long.MAX_VALUE);
+    }
 
     private DefaultJobCapacityController() {
     }
@@ -37,4 +42,9 @@
     public void release(JobSpecification job) {
         // No operation here.
     }
+
+    @Override
+    public IReadOnlyClusterCapacity getClusterCapacity() {
+        return CAPACITY;
+    }
 }
diff --git a/hyracks-fullstack/hyracks/hyracks-api/src/main/java/org/apache/hyracks/api/job/resource/IJobCapacityController.java b/hyracks-fullstack/hyracks/hyracks-api/src/main/java/org/apache/hyracks/api/job/resource/IJobCapacityController.java
index 5fa4bd9..f88baa2 100644
--- a/hyracks-fullstack/hyracks/hyracks-api/src/main/java/org/apache/hyracks/api/job/resource/IJobCapacityController.java
+++ b/hyracks-fullstack/hyracks/hyracks-api/src/main/java/org/apache/hyracks/api/job/resource/IJobCapacityController.java
@@ -57,4 +57,10 @@
      */
     void release(JobSpecification job);
 
+    /**
+     * The cluster current capacity.
+     *
+     * @return the cluster current capacity.
+     */
+    IReadOnlyClusterCapacity getClusterCapacity();
 }
diff --git a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/executor/JobExecutor.java b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/executor/JobExecutor.java
index b476993..567d20c 100644
--- a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/executor/JobExecutor.java
+++ b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/executor/JobExecutor.java
@@ -654,7 +654,11 @@
             }
             TaskAttempt.TaskStatus taStatus = ta.getStatus();
             if (taStatus != TaskAttempt.TaskStatus.RUNNING) {
-                LOGGER.warn(() -> "Spurious task complete notification: " + taId + " Current state = " + taStatus);
+                // don't log if aborted/failed because a task could complete just before the job was aborted/failed
+                if (taStatus != TaskAttempt.TaskStatus.ABORTED && taStatus != TaskAttempt.TaskStatus.FAILED) {
+                    LOGGER.warn("spurious task complete notification {}:{}. current state {}", jobRun.getJobId(), taId,
+                            taStatus);
+                }
                 return;
             }
             ta.setStatus(TaskAttempt.TaskStatus.COMPLETED, null);
diff --git a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/job/JobManager.java b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/job/JobManager.java
index 2b03da5..be3daae 100644
--- a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/job/JobManager.java
+++ b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/job/JobManager.java
@@ -41,7 +41,9 @@
 import org.apache.hyracks.api.job.JobId;
 import org.apache.hyracks.api.job.JobSpecification;
 import org.apache.hyracks.api.job.JobStatus;
+import org.apache.hyracks.api.job.resource.IClusterCapacity;
 import org.apache.hyracks.api.job.resource.IJobCapacityController;
+import org.apache.hyracks.api.job.resource.IReadOnlyClusterCapacity;
 import org.apache.hyracks.api.util.ExceptionUtils;
 import org.apache.hyracks.control.cc.ClusterControllerService;
 import org.apache.hyracks.control.cc.NodeControllerState;
@@ -391,6 +393,7 @@
         run.setStartTime(System.currentTimeMillis());
         run.setStartTimeZoneId(ZoneId.systemDefault().getId());
         JobId jobId = run.getJobId();
+        logJobCapacity(run, "running", Level.DEBUG);
         activeRunMap.put(jobId, run);
         run.setStatus(JobStatus.RUNNING, null);
         executeJobInternal(run);
@@ -398,6 +401,7 @@
 
     // Queue a job when the required capacity for the job is not met.
     private void queueJob(JobRun jobRun) throws HyracksException {
+        logJobCapacity(jobRun, "queueing", Level.INFO);
         jobRun.setStatus(JobStatus.PENDING, null);
         jobQueue.add(jobRun);
     }
@@ -433,6 +437,24 @@
     private void releaseJobCapacity(JobRun jobRun) {
         final JobSpecification job = jobRun.getJobSpecification();
         jobCapacityController.release(job);
+        logJobCapacity(jobRun, "released", Level.DEBUG);
+    }
+
+    private void logJobCapacity(JobRun jobRun, String jobStateDesc, Level lvl) {
+        IClusterCapacity requiredResources = jobRun.getJobSpecification().getRequiredClusterCapacity();
+        if (requiredResources == null) {
+            return;
+        }
+        long requiredMemory = requiredResources.getAggregatedMemoryByteSize();
+        int requiredCPUs = requiredResources.getAggregatedCores();
+        if (requiredMemory == 0 && requiredCPUs == 0) {
+            return;
+        }
+        IReadOnlyClusterCapacity clusterCapacity = jobCapacityController.getClusterCapacity();
+        LOGGER.log(lvl, "{} {}, memory={}, cpu={}, (new) cluster memory={}, cpu={}, currently running={}, queued={}",
+                jobStateDesc, jobRun.getJobId(), requiredMemory, requiredCPUs,
+                clusterCapacity.getAggregatedMemoryByteSize(), clusterCapacity.getAggregatedCores(),
+                getRunningJobsCount(), jobQueue.size());
     }
 
     private void handleException(HyracksException ex) {
diff --git a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/partitions/PartitionMatchMaker.java b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/partitions/PartitionMatchMaker.java
index 8f91944..6278693 100644
--- a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/partitions/PartitionMatchMaker.java
+++ b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/partitions/PartitionMatchMaker.java
@@ -150,7 +150,7 @@
     }
 
     public void removeUncommittedPartitions(Set<PartitionId> partitionIds, Set<TaskAttemptId> taIds, JobId jobId) {
-        if (!partitionIds.isEmpty()) {
+        if (partitionIds != null && !partitionIds.isEmpty()) {
             LOGGER.debug("Removing uncommitted partitions {}: {}", jobId, partitionIds);
         }
         IEntryFilter<PartitionDescriptor> filter =
@@ -167,7 +167,7 @@
     }
 
     public void removePartitionRequests(Set<PartitionId> partitionIds, Set<TaskAttemptId> taIds, JobId jobId) {
-        if (!partitionIds.isEmpty()) {
+        if (partitionIds != null && !partitionIds.isEmpty()) {
             LOGGER.debug("Removing partition requests {}: {}", jobId, partitionIds);
         }
         IEntryFilter<PartitionRequest> filter = o -> taIds.contains(o.getRequestingTaskAttemptId());
diff --git a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/result/ResultDirectoryService.java b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/result/ResultDirectoryService.java
index f56ec33..b6274d9 100644
--- a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/result/ResultDirectoryService.java
+++ b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/result/ResultDirectoryService.java
@@ -80,7 +80,6 @@
     @Override
     public synchronized void notifyJobCreation(JobId jobId, JobSpecification spec,
             IJobCapacityController.JobSubmissionStatus status) throws HyracksException {
-        LOGGER.debug("{} notified of new job {}", getClass().getSimpleName(), jobId);
         if (jobResultLocations.get(jobId) != null) {
             throw HyracksDataException.create(ErrorCode.MORE_THAN_ONE_RESULT, jobId);
         }
diff --git a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/scheduler/FIFOJobQueue.java b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/scheduler/FIFOJobQueue.java
index 260c6b9..38277c2 100644
--- a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/scheduler/FIFOJobQueue.java
+++ b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/scheduler/FIFOJobQueue.java
@@ -121,4 +121,9 @@
     public void clear() {
         jobListMap.clear();
     }
+
+    @Override
+    public int size() {
+        return jobListMap.size();
+    }
 }
diff --git a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/scheduler/IJobQueue.java b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/scheduler/IJobQueue.java
index be40883..1f2c29a 100644
--- a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/scheduler/IJobQueue.java
+++ b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/scheduler/IJobQueue.java
@@ -73,4 +73,11 @@
      * Clears the job queue
      */
     void clear();
+
+    /**
+     * Returns the number of queued jobs.
+     *
+     * @return the number of queued jobs.
+     */
+    int size();
 }
diff --git a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/GetResultPartitionLocationsWork.java b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/GetResultPartitionLocationsWork.java
index d1d2269..1e34b96 100644
--- a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/GetResultPartitionLocationsWork.java
+++ b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/GetResultPartitionLocationsWork.java
@@ -28,6 +28,7 @@
 import org.apache.hyracks.control.cc.result.IResultDirectoryService;
 import org.apache.hyracks.control.common.work.IResultCallback;
 import org.apache.hyracks.control.common.work.SynchronizableWork;
+import org.apache.logging.log4j.Level;
 
 public class GetResultPartitionLocationsWork extends SynchronizableWork {
     private final ClusterControllerService ccs;
@@ -68,4 +69,9 @@
     public String toString() {
         return getName() + ": JobId@" + jobId + " ResultSetId@" + rsId + " Known@" + Arrays.toString(knownRecords);
     }
+
+    @Override
+    public Level logLevel() {
+        return Level.TRACE;
+    }
 }
diff --git a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/JobCleanupWork.java b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/JobCleanupWork.java
index f065940..6fe9909 100644
--- a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/JobCleanupWork.java
+++ b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/JobCleanupWork.java
@@ -28,6 +28,7 @@
 import org.apache.hyracks.control.cc.job.JobRun;
 import org.apache.hyracks.control.common.work.AbstractWork;
 import org.apache.hyracks.control.common.work.IResultCallback;
+import org.apache.logging.log4j.Level;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
@@ -52,6 +53,7 @@
 
     @Override
     public void run() {
+        LOGGER.info("cleaning up {} on NCs, status={}", jobId, status);
         final JobRun jobRun = jobManager.get(jobId);
         if (jobRun == null) {
             LOGGER.debug("ignoring cleanup for unknown {}", jobId);
@@ -78,4 +80,9 @@
         return getName() + ": JobId@" + jobId + " Status@" + status
                 + (exceptions == null ? "" : " Exceptions@" + exceptions);
     }
+
+    @Override
+    public Level logLevel() {
+        return Level.TRACE;
+    }
 }
diff --git a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/RegisterResultPartitionLocationWork.java b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/RegisterResultPartitionLocationWork.java
index b788e27..d517761 100644
--- a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/RegisterResultPartitionLocationWork.java
+++ b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/RegisterResultPartitionLocationWork.java
@@ -90,4 +90,9 @@
                 + nPartitions + " ResultPartitionLocation@" + networkAddress + " metadata@" + metadata + " EmptyResult@"
                 + emptyResult;
     }
+
+    @Override
+    public Level logLevel() {
+        return Level.TRACE;
+    }
 }
diff --git a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/RemoveDeadNodesWork.java b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/RemoveDeadNodesWork.java
index cd9b6d0..9f740ef 100644
--- a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/RemoveDeadNodesWork.java
+++ b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/RemoveDeadNodesWork.java
@@ -69,6 +69,6 @@
 
     @Override
     public Level logLevel() {
-        return Level.DEBUG;
+        return Level.TRACE;
     }
 }
diff --git a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/TaskFailureWork.java b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/TaskFailureWork.java
index 33d391f..48fd403 100644
--- a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/TaskFailureWork.java
+++ b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/TaskFailureWork.java
@@ -22,6 +22,7 @@
 
 import org.apache.hyracks.api.dataflow.TaskAttemptId;
 import org.apache.hyracks.api.job.JobId;
+import org.apache.hyracks.api.util.ErrorMessageUtil;
 import org.apache.hyracks.control.cc.ClusterControllerService;
 import org.apache.hyracks.control.cc.job.IJobManager;
 import org.apache.hyracks.control.cc.job.JobRun;
@@ -50,6 +51,7 @@
 
     @Override
     public String toString() {
-        return getName() + ": [" + jobId + ":" + taId + ":" + nodeId + "]";
+        return getName() + ": [" + jobId + ":" + taId + ":" + nodeId + "] "
+                + ErrorMessageUtil.getCauseMessage(exceptions.get(0));
     }
 }
diff --git a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/WaitForJobCompletionWork.java b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/WaitForJobCompletionWork.java
index ed3e574..63d5340 100644
--- a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/WaitForJobCompletionWork.java
+++ b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-cc/src/main/java/org/apache/hyracks/control/cc/work/WaitForJobCompletionWork.java
@@ -29,6 +29,7 @@
 import org.apache.hyracks.control.cc.job.JobRun;
 import org.apache.hyracks.control.common.work.IResultCallback;
 import org.apache.hyracks.control.common.work.SynchronizableWork;
+import org.apache.logging.log4j.Level;
 
 public class WaitForJobCompletionWork extends SynchronizableWork {
     private final ClusterControllerService ccs;
@@ -92,4 +93,9 @@
     public String toString() {
         return getName() + " jobId:" + jobId;
     }
+
+    @Override
+    public Level logLevel() {
+        return Level.TRACE;
+    }
 }
diff --git a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/partitions/MaterializingPipelinedPartition.java b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/partitions/MaterializingPipelinedPartition.java
index eee8950..e52e3ac 100644
--- a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/partitions/MaterializingPipelinedPartition.java
+++ b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/partitions/MaterializingPipelinedPartition.java
@@ -52,7 +52,6 @@
     private boolean failed;
     protected boolean flushRequest;
     private boolean deallocated;
-    private Level openCloseLevel = Level.DEBUG;
     private Thread dataConsumerThread;
 
     public MaterializingPipelinedPartition(IHyracksTaskContext ctx, PartitionManager manager, PartitionId pid,
@@ -181,9 +180,6 @@
 
     @Override
     public void open() throws HyracksDataException {
-        if (LOGGER.isEnabled(openCloseLevel)) {
-            LOGGER.log(openCloseLevel, "open(" + pid + " by " + taId);
-        }
         size = 0;
         eos = false;
         failed = false;
@@ -215,9 +211,6 @@
 
     @Override
     public void close() throws HyracksDataException {
-        if (LOGGER.isEnabled(openCloseLevel)) {
-            LOGGER.log(openCloseLevel, "close(" + pid + " by " + taId);
-        }
         if (writeHandle != null) {
             ctx.getIoManager().close(writeHandle);
         }
diff --git a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/work/CleanupJobletWork.java b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/work/CleanupJobletWork.java
index efc8467..2036d72 100644
--- a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/work/CleanupJobletWork.java
+++ b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/work/CleanupJobletWork.java
@@ -47,7 +47,7 @@
 
     @Override
     public void run() {
-        LOGGER.debug("cleaning up {}", jobId);
+        LOGGER.debug("cleaning up {}, status:{}", jobId, status);
         ncs.removeJobParameterByteStore(jobId);
         ncs.getPartitionManager().jobCompleted(jobId, status);
         Map<JobId, Joblet> jobletMap = ncs.getJobletMap();
diff --git a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/work/NotifyTaskFailureWork.java b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/work/NotifyTaskFailureWork.java
index 6dd4307..cd79da7 100644
--- a/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/work/NotifyTaskFailureWork.java
+++ b/hyracks-fullstack/hyracks/hyracks-control/hyracks-control-nc/src/main/java/org/apache/hyracks/control/nc/work/NotifyTaskFailureWork.java
@@ -23,6 +23,7 @@
 import org.apache.hyracks.api.dataflow.TaskAttemptId;
 import org.apache.hyracks.api.job.JobId;
 import org.apache.hyracks.api.result.IResultPartitionManager;
+import org.apache.hyracks.api.util.ErrorMessageUtil;
 import org.apache.hyracks.control.common.work.AbstractWork;
 import org.apache.hyracks.control.nc.NodeControllerService;
 import org.apache.hyracks.control.nc.Task;
@@ -65,6 +66,8 @@
 
     @Override
     public String toString() {
-        return getName() + ": [" + ncs.getId() + "[" + jobId + ":" + taskId + "]";
+        return getName() + ": [" + ncs.getId() + "[" + jobId + ":" + taskId + "]"
+                + ((exceptions != null && !exceptions.isEmpty())
+                        ? " " + ErrorMessageUtil.getCauseMessage(exceptions.get(0)) : "");
     }
 }
diff --git a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/join/OptimizedHybridHashJoinOperatorDescriptor.java b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/join/OptimizedHybridHashJoinOperatorDescriptor.java
index e04eebe..ed53a7e 100644
--- a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/join/OptimizedHybridHashJoinOperatorDescriptor.java
+++ b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/join/OptimizedHybridHashJoinOperatorDescriptor.java
@@ -323,9 +323,6 @@
                         if (!failed) {
                             state.hybridHJ.closeBuild();
                             ctx.setStateObject(state);
-                            if (LOGGER.isTraceEnabled()) {
-                                LOGGER.trace("OptimizedHybridHashJoin closed its build phase");
-                            }
                         } else {
                             state.hybridHJ.clearBuildTempFiles();
                         }
@@ -413,10 +410,6 @@
                     writer.open();
                     state.hybridHJ.initProbe(probComp);
                     state.hybridHJ.setOperatorStats(stats);
-
-                    if (LOGGER.isDebugEnabled()) {
-                        LOGGER.debug("OptimizedHybridHashJoin is starting the probe phase.");
-                    }
                 }
 
                 @Override
@@ -427,7 +420,7 @@
                 @Override
                 public void fail() throws HyracksDataException {
                     failed = true;
-                    if (state.hybridHJ != null) {
+                    if (state != null && state.hybridHJ != null) {
                         state.hybridHJ.fail();
                     }
                     writer.fail();
@@ -438,12 +431,13 @@
                     if (failed) {
                         try {
                             // Clear temp files if fail() was called.
-                            state.hybridHJ.clearBuildTempFiles();
-                            state.hybridHJ.clearProbeTempFiles();
+                            if (state != null && state.hybridHJ != null) {
+                                state.hybridHJ.clearBuildTempFiles();
+                                state.hybridHJ.clearProbeTempFiles();
+                            }
                         } finally {
                             writer.close(); // writer should always be closed.
                         }
-                        logProbeComplete();
                         return;
                     }
                     try {
@@ -488,11 +482,7 @@
                         // Re-throw the whatever is caught.
                         throw e;
                     } finally {
-                        try {
-                            logProbeComplete();
-                        } finally {
-                            writer.close();
-                        }
+                        writer.close();
                     }
                 }
 
@@ -501,12 +491,6 @@
                     this.stats = stats;
                 }
 
-                private void logProbeComplete() {
-                    if (LOGGER.isDebugEnabled()) {
-                        LOGGER.debug("OptimizedHybridHashJoin closed its probe phase");
-                    }
-                }
-
                 //The buildSideReader should be always the original buildSideReader, so should the probeSideReader
                 private void joinPartitionPair(RunFileReader buildSideReader, RunFileReader probeSideReader,
                         int buildSizeInTuple, int probeSizeInTuple, int level) throws HyracksDataException {
diff --git a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/sort/TupleSorterHeapSort.java b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/sort/TupleSorterHeapSort.java
index 08f15b3..a1704ec 100644
--- a/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/sort/TupleSorterHeapSort.java
+++ b/hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/sort/TupleSorterHeapSort.java
@@ -41,13 +41,9 @@
 import org.apache.hyracks.dataflow.std.structures.IResetableComparableFactory;
 import org.apache.hyracks.dataflow.std.structures.MaxHeap;
 import org.apache.hyracks.dataflow.std.structures.TuplePointer;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
 
 public class TupleSorterHeapSort implements ITupleSorter {
 
-    private static final Logger LOGGER = LogManager.getLogger();
-
     class HeapEntryFactory implements IResetableComparableFactory<HeapEntry> {
         @Override
         public IResetableComparable<HeapEntry> createResetableComparable() {
@@ -288,7 +284,6 @@
         int maxFrameSize = outputFrame.getFrameSize();
         int numEntries = heap.getNumEntries();
         IResetableComparable[] entries = heap.getEntries();
-        int io = 0;
         for (int i = 0; i < numEntries; i++) {
             HeapEntry minEntry = (HeapEntry) entries[i];
             bufferAccessor1.reset(minEntry.tuplePointer);
@@ -296,14 +291,10 @@
                     bufferAccessor1.getTupleStartOffset(), bufferAccessor1.getTupleLength());
             if (flushed > 0) {
                 maxFrameSize = Math.max(maxFrameSize, flushed);
-                io++;
             }
         }
         maxFrameSize = Math.max(maxFrameSize, outputFrame.getFrameSize());
         outputAppender.write(writer, true);
-        if (LOGGER.isInfoEnabled()) {
-            LOGGER.info("Flushed records:" + numEntries + "; Flushed through " + (io + 1) + " frames");
-        }
         return maxFrameSize;
     }
 
diff --git a/hyracks-fullstack/hyracks/hyracks-examples/hyracks-integration-tests/src/test/java/org/apache/hyracks/tests/integration/AbstractMultiNCIntegrationTest.java b/hyracks-fullstack/hyracks/hyracks-examples/hyracks-integration-tests/src/test/java/org/apache/hyracks/tests/integration/AbstractMultiNCIntegrationTest.java
index be22b9c..7a75a0f 100644
--- a/hyracks-fullstack/hyracks/hyracks-examples/hyracks-integration-tests/src/test/java/org/apache/hyracks/tests/integration/AbstractMultiNCIntegrationTest.java
+++ b/hyracks-fullstack/hyracks/hyracks-examples/hyracks-integration-tests/src/test/java/org/apache/hyracks/tests/integration/AbstractMultiNCIntegrationTest.java
@@ -34,7 +34,9 @@
 import org.apache.hyracks.api.job.JobId;
 import org.apache.hyracks.api.job.JobSpecification;
 import org.apache.hyracks.api.job.JobStatus;
+import org.apache.hyracks.api.job.resource.ClusterCapacity;
 import org.apache.hyracks.api.job.resource.IJobCapacityController;
+import org.apache.hyracks.api.job.resource.IReadOnlyClusterCapacity;
 import org.apache.hyracks.api.result.IResultSet;
 import org.apache.hyracks.api.result.IResultSetReader;
 import org.apache.hyracks.client.result.ResultSet;
@@ -254,6 +256,14 @@
                 public void release(JobSpecification job) {
 
                 }
+
+                @Override
+                public IReadOnlyClusterCapacity getClusterCapacity() {
+                    ClusterCapacity clusterCapacity = new ClusterCapacity();
+                    clusterCapacity.setAggregatedMemoryByteSize(maxRAM);
+                    clusterCapacity.setAggregatedCores(Integer.MAX_VALUE);
+                    return clusterCapacity;
+                }
             };
         }
     }