mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 09:32:01 +00:00
Measure time that spend during flush of the Buffer to the underlying
This commit is contained in:
parent
dca0cbf4eb
commit
c68f7cd5b1
@ -736,10 +736,11 @@ void StorageBuffer::flushBuffer(Buffer & buffer, bool check_thresholds, bool loc
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::StorageBufferFlush);
|
||||
|
||||
LOG_TRACE(log, "Flushing buffer with {} rows, {} bytes, age {} seconds {}.", rows, bytes, time_passed, (check_thresholds ? "(bg)" : "(direct)"));
|
||||
|
||||
if (!destination_id)
|
||||
{
|
||||
LOG_TRACE(log, "Flushing buffer with {} rows (discarded), {} bytes, age {} seconds {}.", rows, bytes, time_passed, (check_thresholds ? "(bg)" : "(direct)"));
|
||||
return;
|
||||
}
|
||||
|
||||
/** For simplicity, buffer is locked during write.
|
||||
* We could unlock buffer temporary, but it would lead to too many difficulties:
|
||||
@ -747,6 +748,8 @@ void StorageBuffer::flushBuffer(Buffer & buffer, bool check_thresholds, bool loc
|
||||
* - new data could be appended to buffer, and in case of exception, we must merge it with old data, that has not been written;
|
||||
* - this could lead to infinite memory growth.
|
||||
*/
|
||||
|
||||
Stopwatch watch;
|
||||
try
|
||||
{
|
||||
writeBlockToDestination(block_to_write, DatabaseCatalog::instance().tryGetTable(destination_id, global_context));
|
||||
@ -770,6 +773,9 @@ void StorageBuffer::flushBuffer(Buffer & buffer, bool check_thresholds, bool loc
|
||||
/// After a while, the next write attempt will happen.
|
||||
throw;
|
||||
}
|
||||
|
||||
UInt64 milliseconds = watch.elapsedMilliseconds();
|
||||
LOG_TRACE(log, "Flushing buffer with {} rows, {} bytes, age {} seconds, took {} ms {}.", rows, bytes, time_passed, milliseconds, (check_thresholds ? "(bg)" : "(direct)"));
|
||||
}
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user