From 7eebe061a69b20430d911383a199b381c4e4fbfa Mon Sep 17 00:00:00 2001 From: Andrew Lamb Date: Wed, 27 Jul 2022 15:35:31 -0400 Subject: [PATCH] fix: reduce log verbosity for `found compaction candidates` message (#5225) * fix: reduce log verbosity * refactor: sleep for a sec if no work, print debug Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com> --- compactor/src/handler.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/compactor/src/handler.rs b/compactor/src/handler.rs index 91434a9cce..24ead85527 100644 --- a/compactor/src/handler.rs +++ b/compactor/src/handler.rs @@ -15,7 +15,10 @@ use observability_deps::tracing::*; use parquet_file::storage::ParquetStorage; use std::sync::Arc; use thiserror::Error; -use tokio::task::{JoinError, JoinHandle}; +use tokio::{ + task::{JoinError, JoinHandle}, + time::Duration, +}; use tokio_util::sync::CancellationToken; use crate::compact::Compactor; @@ -230,6 +233,10 @@ impl CompactorConfig { } } +/// How long to pause before checking for more work again if there was +/// no work to do +const PAUSE_BETWEEN_NO_WORK: Duration = Duration::from_secs(1); + /// Checks for candidate partitions to compact and spawns tokio tasks to compact as many /// as the configuration will allow. Once those are done it rechecks the catalog for the /// next top partitions to compact. @@ -281,7 +288,13 @@ async fn run_compactor(compactor: Arc, shutdown: CancellationToken) { } let n_candidates = candidates.len(); - debug!(n_candidates, "found compaction candidates"); + if n_candidates == 0 { + // sleep for a second to avoid a hot busy loop when the + // catalog is polled + tokio::time::sleep(PAUSE_BETWEEN_NO_WORK).await + } else { + debug!(n_candidates, "found compaction candidates"); + } // Serially compact all candidates // TODO: we will parallelize this when everything runs smoothly in serial