From c68f7cd5b1a3ec96218abd631b895e9c67014fa6 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 21 Jan 2021 21:11:39 +0300 Subject: [PATCH] Measure time that spend during flush of the Buffer to the underlying --- src/Storages/StorageBuffer.cpp | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index e0657f31de5..77da8c9c972 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -736,10 +736,11 @@ void StorageBuffer::flushBuffer(Buffer & buffer, bool check_thresholds, bool loc ProfileEvents::increment(ProfileEvents::StorageBufferFlush); - LOG_TRACE(log, "Flushing buffer with {} rows, {} bytes, age {} seconds {}.", rows, bytes, time_passed, (check_thresholds ? "(bg)" : "(direct)")); - if (!destination_id) + { + LOG_TRACE(log, "Flushing buffer with {} rows (discarded), {} bytes, age {} seconds {}.", rows, bytes, time_passed, (check_thresholds ? "(bg)" : "(direct)")); return; + } /** For simplicity, buffer is locked during write. * We could unlock buffer temporary, but it would lead to too many difficulties: @@ -747,6 +748,8 @@ void StorageBuffer::flushBuffer(Buffer & buffer, bool check_thresholds, bool loc * - new data could be appended to buffer, and in case of exception, we must merge it with old data, that has not been written; * - this could lead to infinite memory growth. */ + + Stopwatch watch; try { writeBlockToDestination(block_to_write, DatabaseCatalog::instance().tryGetTable(destination_id, global_context)); @@ -770,6 +773,9 @@ void StorageBuffer::flushBuffer(Buffer & buffer, bool check_thresholds, bool loc /// After a while, the next write attempt will happen. throw; } + + UInt64 milliseconds = watch.elapsedMilliseconds(); + LOG_TRACE(log, "Flushing buffer with {} rows, {} bytes, age {} seconds, took {} ms {}.", rows, bytes, time_passed, milliseconds, (check_thresholds ? "(bg)" : "(direct)")); }