diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSupervisorTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSupervisorTask.java index d0afc0756176..894fe1e5b06a 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSupervisorTask.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSupervisorTask.java @@ -752,7 +752,7 @@ TaskStatus runHashPartitionMultiPhaseParallel(TaskToolbox toolbox) throws Except ); // This is for potential debugging in case we suspect bad estimation of cardinalities etc, - LOG.debug("intervalToNumShards: %s", intervalToNumShards.toString()); + LOG.debug("intervalToNumShards: %s", intervalToNumShards); } else { intervalToNumShards = CollectionUtils.mapValues( diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java index 62d3caa83d68..5ea7a4168cb1 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java @@ -1155,14 +1155,13 @@ public void tryInit() Instant handleNoticeEndTime = Instant.now(); Duration timeElapsed = Duration.between(handleNoticeStartTime, handleNoticeEndTime); String noticeType = notice.getType(); - log.debug( - "Handled notice [%s] from notices queue in [%d] ms, " - + "current notices queue size [%d] for datasource [%s]", - noticeType, - timeElapsed.toMillis(), - getNoticesQueueSize(), - dataSource - ); + if (log.isDebugEnabled()) { + log.debug( + "Handled notice [%s] from notices queue in [%d] ms, " + + "current notices queue size [%d] for datasource [%s]", + noticeType, timeElapsed.toMillis(), getNoticesQueueSize(), dataSource + ); + } emitNoticeProcessTime(noticeType, timeElapsed.toMillis()); } catch (Throwable e) { diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/autoscaler/LagBasedAutoScaler.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/autoscaler/LagBasedAutoScaler.java index 8c645278d55b..a06f358435f8 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/autoscaler/LagBasedAutoScaler.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/autoscaler/LagBasedAutoScaler.java @@ -149,7 +149,7 @@ private Runnable computeAndCollectLag() long totalLags = lagStats.getTotalLag(); lagMetricsQueue.offer(totalLags > 0 ? totalLags : 0L); } - log.debug("Current lags [%s] for dataSource [%s].", new ArrayList<>(lagMetricsQueue), dataSource); + log.debug("Current lags for dataSource[%s] are [%s].", dataSource, lagMetricsQueue); } else { log.warn("[%s] supervisor is suspended, skipping lag collection", dataSource); } diff --git a/integration-tests/src/main/java/org/apache/druid/testing/clients/OverlordResourceTestClient.java b/integration-tests/src/main/java/org/apache/druid/testing/clients/OverlordResourceTestClient.java index bf0f549b190f..95f56ad4b759 100644 --- a/integration-tests/src/main/java/org/apache/druid/testing/clients/OverlordResourceTestClient.java +++ b/integration-tests/src/main/java/org/apache/druid/testing/clients/OverlordResourceTestClient.java @@ -185,7 +185,9 @@ private List getTasks(String identifier) HttpMethod.GET, StringUtils.format("%s%s", getIndexerURL(), identifier) ); - LOG.debug("Tasks %s response %s", identifier, response.getContent()); + if (LOG.isDebugEnabled()) { + LOG.debug("Tasks %s response %s", identifier, response.getContent()); + } return jsonMapper.readValue( response.getContent(), new TypeReference>() { @@ -204,7 +206,9 @@ public TaskPayloadResponse getTaskPayload(String taskId) HttpMethod.GET, StringUtils.format("%stask/%s", getIndexerURL(), StringUtils.urlEncode(taskId)) ); - LOG.debug("Task %s response %s", taskId, response.getContent()); + if (LOG.isDebugEnabled()) { + LOG.debug("Task %s response %s", taskId, response.getContent()); + } return jsonMapper.readValue( response.getContent(), new TypeReference() { diff --git a/processing/src/main/java/org/apache/druid/frame/processor/FrameProcessorExecutor.java b/processing/src/main/java/org/apache/druid/frame/processor/FrameProcessorExecutor.java index 02161d5ad5ad..c0f79d30e581 100644 --- a/processing/src/main/java/org/apache/druid/frame/processor/FrameProcessorExecutor.java +++ b/processing/src/main/java/org/apache/druid/frame/processor/FrameProcessorExecutor.java @@ -595,7 +595,6 @@ private static void logProcessorStatusString( sb.append("; cancel=").append(finishedFuture.isCancelled() ? "y" : "n"); sb.append("; done=").append(finishedFuture.isDone() ? "y" : "n"); - log.debug(StringUtils.encodeForFormat(sb.toString())); } } diff --git a/processing/src/main/java/org/apache/druid/java/util/common/guava/ParallelMergeCombiningSequence.java b/processing/src/main/java/org/apache/druid/java/util/common/guava/ParallelMergeCombiningSequence.java index 1df3c9a9dc16..517235a99f98 100644 --- a/processing/src/main/java/org/apache/druid/java/util/common/guava/ParallelMergeCombiningSequence.java +++ b/processing/src/main/java/org/apache/druid/java/util/common/guava/ParallelMergeCombiningSequence.java @@ -475,20 +475,17 @@ private int computeNumTasks() final int computedNumParallelTasks = Math.max(computedOptimalParallelism, 1); - LOG.debug( - "Computed parallel tasks: [%s]; ForkJoinPool details - sequence parallelism: [%s] " - + "active threads: [%s] running threads: [%s] queued submissions: [%s] queued tasks: [%s] " - + "pool parallelism: [%s] pool size: [%s] steal count: [%s]", - computedNumParallelTasks, - parallelism, - getPool().getActiveThreadCount(), - runningThreadCount, - submissionCount, - getPool().getQueuedTaskCount(), - getPool().getParallelism(), - getPool().getPoolSize(), - getPool().getStealCount() - ); + if (LOG.isDebugEnabled()) { + ForkJoinPool pool = getPool(); + LOG.debug( + "Computed parallel tasks: [%s]; ForkJoinPool details - sequence parallelism: [%s] " + + "active threads: [%s] running threads: [%s] queued submissions: [%s] queued tasks: [%s] " + + "pool parallelism: [%s] pool size: [%s] steal count: [%s]", + computedNumParallelTasks, parallelism, + pool.getActiveThreadCount(), runningThreadCount, submissionCount, pool.getQueuedTaskCount(), + pool.getParallelism(), pool.getPoolSize(), pool.getStealCount() + ); + } return computedNumParallelTasks; } diff --git a/processing/src/main/java/org/apache/druid/java/util/common/io/smoosh/FileSmoosher.java b/processing/src/main/java/org/apache/druid/java/util/common/io/smoosh/FileSmoosher.java index 17eb469e9497..f4103ec18473 100644 --- a/processing/src/main/java/org/apache/druid/java/util/common/io/smoosh/FileSmoosher.java +++ b/processing/src/main/java/org/apache/druid/java/util/common/io/smoosh/FileSmoosher.java @@ -490,11 +490,9 @@ public boolean isOpen() public void close() throws IOException { closer.close(); - FileSmoosher.LOG.debug( - "Created smoosh file [%s] of size [%s] bytes.", - outFile.getAbsolutePath(), - outFile.length() - ); + if (LOG.isDebugEnabled()) { + LOG.debug("Created smoosh file [%s] of size [%s] bytes.", outFile.getAbsolutePath(), outFile.length()); + } } } } diff --git a/server/src/main/java/org/apache/druid/discovery/DataServerClient.java b/server/src/main/java/org/apache/druid/discovery/DataServerClient.java index 479ba9d4142d..ce7ac325b62b 100644 --- a/server/src/main/java/org/apache/druid/discovery/DataServerClient.java +++ b/server/src/main/java/org/apache/druid/discovery/DataServerClient.java @@ -91,7 +91,9 @@ public Sequence run(Query query, ResponseContext responseContext, Java requestBuilder = requestBuilder.jsonContent(objectMapper, query); } - log.debug("Sending request to servers for query[%s], request[%s]", query.getId(), requestBuilder.toString()); + if (log.isDebugEnabled()) { + log.debug("Sending request to servers for query[%s], request[%s]", query.getId(), requestBuilder); + } ListenableFuture resultStreamFuture = serviceClient.asyncRequest( requestBuilder, new DataServerResponseHandler(query, responseContext, objectMapper) diff --git a/server/src/main/java/org/apache/druid/server/coordinator/DruidCoordinator.java b/server/src/main/java/org/apache/druid/server/coordinator/DruidCoordinator.java index 5558d204e81a..caa1c7fb66f0 100644 --- a/server/src/main/java/org/apache/druid/server/coordinator/DruidCoordinator.java +++ b/server/src/main/java/org/apache/druid/server/coordinator/DruidCoordinator.java @@ -538,10 +538,9 @@ List makeIndexingServiceDuties() if (getCompactSegmentsDutyFromCustomGroups().isEmpty()) { duties.add(compactSegments); } - log.debug( - "Initialized indexing service duties [%s].", - duties.stream().map(duty -> duty.getClass().getName()).collect(Collectors.toList()) - ); + if (log.isDebugEnabled()) { + log.debug("Initialized indexing service duties [%s].", duties.stream().map(duty -> duty.getClass().getName()).collect(Collectors.toList())); + } return ImmutableList.copyOf(duties); } diff --git a/server/src/main/java/org/apache/druid/server/coordinator/loading/CuratorLoadQueuePeon.java b/server/src/main/java/org/apache/druid/server/coordinator/loading/CuratorLoadQueuePeon.java index 2303be26d8b5..7ea78a132fe9 100644 --- a/server/src/main/java/org/apache/druid/server/coordinator/loading/CuratorLoadQueuePeon.java +++ b/server/src/main/java/org/apache/druid/server/coordinator/loading/CuratorLoadQueuePeon.java @@ -230,12 +230,12 @@ public void run() final String path = ZKPaths.makePath(basePath, segmentHolder.getSegmentIdentifier()); final byte[] payload = jsonMapper.writeValueAsBytes(segmentHolder.getChangeRequest()); curator.create().withMode(CreateMode.EPHEMERAL).forPath(path, payload); - log.debug( - "ZKNode created for server to [%s] %s [%s]", - basePath, - segmentHolder.getAction(), - segmentHolder.getSegmentIdentifier() - ); + if (log.isDebugEnabled()) { + log.debug( + "ZKNode created for server to [%s] %s [%s]", + basePath, segmentHolder.getAction(), segmentHolder.getSegmentIdentifier() + ); + } final ScheduledFuture nodeDeletedCheck = scheduleNodeDeletedCheck(path); final Stat stat = curator.checkExists().usingWatcher( (CuratorWatcher) watchedEvent -> {