From 9d24908e536d08ba5a72de882e5650003ee72553 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 23 May 2020 20:52:11 +0300 Subject: [PATCH] Progress on task --- src/Access/AccessRights.cpp | 11 +++++----- src/Access/AllowedClientHosts.cpp | 12 +++++------ src/Access/ContextAccess.cpp | 15 ++++++++------ src/Client/Connection.cpp | 19 ++++++++---------- .../ParallelAggregatingBlockInputStream.cpp | 8 ++++---- src/Databases/DatabaseOrdinary.cpp | 2 +- src/Dictionaries/CacheDictionary.cpp | 3 +-- src/Dictionaries/DictionaryFactory.cpp | 4 +--- src/Dictionaries/MySQLDictionarySource.cpp | 2 +- src/Disks/DiskLocal.cpp | 6 ++---- src/Disks/DiskSelector.cpp | 2 +- src/Disks/IDisk.cpp | 4 +--- src/Disks/S3/DiskS3.cpp | 17 ++++++---------- src/Interpreters/Aggregator.cpp | 20 +++++++++---------- src/Processors/Executors/PipelineExecutor.cpp | 12 +++++------ .../Transforms/AggregatingTransform.cpp | 8 ++++---- .../MergeTree/MergeTreeDataMergerMutator.cpp | 4 ++-- src/Storages/MergeTree/MergeTreeReadPool.cpp | 4 ++-- 18 files changed, 70 insertions(+), 83 deletions(-) diff --git a/src/Access/AccessRights.cpp b/src/Access/AccessRights.cpp index ed5e1abdb2a..966fcdfc062 100644 --- a/src/Access/AccessRights.cpp +++ b/src/Access/AccessRights.cpp @@ -251,12 +251,11 @@ public: void logTree(Poco::Logger * log) const { - LOG_TRACE(log, "Tree(" << level << "): name=" << (node_name ? *node_name : "NULL") - << ", access=" << access.toString() - << ", final_access=" << final_access.toString() - << ", min_access=" << min_access.toString() - << ", max_access=" << max_access.toString() - << ", num_children=" << (children ? children->size() : 0)); + LOG_TRACE_FORMATTED(log, "Tree({}): name={}, access={}, final_access={}, min_access={}, max_access={}, num_children={}", + level, node_name ? *node_name : "NULL", access.toString(), + final_access.toString(), min_access.toString(), max_access.toString(), + (children ? children->size() : 0)); + if (children) { for (auto & child : *children | boost::adaptors::map_values) diff --git a/src/Access/AllowedClientHosts.cpp b/src/Access/AllowedClientHosts.cpp index 1db7e6d508d..201efa499be 100644 --- a/src/Access/AllowedClientHosts.cpp +++ b/src/Access/AllowedClientHosts.cpp @@ -308,10 +308,10 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const if (e.code() != ErrorCodes::DNS_ERROR) throw; /// Try to ignore DNS errors: if host cannot be resolved, skip it and try next. - LOG_WARNING( + LOG_WARNING_FORMATTED( &Logger::get("AddressPatterns"), - "Failed to check if the allowed client hosts contain address " << client_address.toString() << ". " << e.displayText() - << ", code = " << e.code()); + "Failed to check if the allowed client hosts contain address {}. {}, code = {}", + client_address.toString(), e.displayText(), e.code()); return false; } }; @@ -341,10 +341,10 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const if (e.code() != ErrorCodes::DNS_ERROR) throw; /// Try to ignore DNS errors: if host cannot be resolved, skip it and try next. - LOG_WARNING( + LOG_WARNING_FORMATTED( &Logger::get("AddressPatterns"), - "Failed to check if the allowed client hosts contain address " << client_address.toString() << ". " << e.displayText() - << ", code = " << e.code()); + "Failed to check if the allowed client hosts contain address {}. {}, code = {}", + client_address.toString(), e.displayText(), e.code()); return false; } }; diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index 40eadd1bee0..644f4a5817d 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -219,7 +219,7 @@ bool ContextAccess::calculateResultAccessAndCheck(Poco::Logger * log_, const Acc if constexpr (mode == THROW_IF_ACCESS_DENIED) throw Exception(user_name + ": " + msg, error_code); else if constexpr (mode == LOG_WARNING_IF_ACCESS_DENIED) - LOG_WARNING(log_, user_name + ": " + msg + formatSkippedMessage(args...)); + LOG_WARNING_FORMATTED(log_, "{}: {}{}", user_name, msg, formatSkippedMessage(args...)); }; if (!user) @@ -451,13 +451,16 @@ boost::shared_ptr ContextAccess::calculateResultAccess(bool if (trace_log && (params.readonly == readonly_) && (params.allow_ddl == allow_ddl_) && (params.allow_introspection == allow_introspection_)) { - LOG_TRACE(trace_log, "List of all grants: " << merged_access->toString() << (grant_option ? " WITH GRANT OPTION" : "")); + if (grant_option) + LOG_TRACE_FORMATTED(trace_log, "List of all grants: {} WITH GRANT OPTION", merged_access->toString()); + else + LOG_TRACE_FORMATTED(trace_log, "List of all grants: {}", merged_access->toString()); + if (roles_info && !roles_info->getCurrentRolesNames().empty()) { - LOG_TRACE( - trace_log, - "Current_roles: " << boost::algorithm::join(roles_info->getCurrentRolesNames(), ", ") - << ", enabled_roles: " << boost::algorithm::join(roles_info->getEnabledRolesNames(), ", ")); + LOG_TRACE_FORMATTED(trace_log, "Current_roles: {}, enabled_roles: {}", + boost::algorithm::join(roles_info->getCurrentRolesNames(), ", "), + boost::algorithm::join(roles_info->getEnabledRolesNames(), ", ")); } LOG_TRACE_FORMATTED(trace_log, "Settings: readonly={}, allow_ddl={}, allow_introspection_functions={}", readonly_, allow_ddl_, allow_introspection_); } diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 1b9dfde9177..0173291e76c 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -61,11 +61,11 @@ void Connection::connect(const ConnectionTimeouts & timeouts) if (connected) disconnect(); - LOG_TRACE(log_wrapper.get(), "Connecting. Database: " - << (default_database.empty() ? "(not specified)" : default_database) - << ". User: " << user - << (static_cast(secure) ? ". Secure" : "") - << (static_cast(compression) ? "" : ". Uncompressed")); + LOG_TRACE_FORMATTED(log_wrapper.get(), "Connecting. Database: {}. User: {}{}{}", + default_database.empty() ? "(not specified)" : default_database, + user, + static_cast(secure) ? ". Secure" : "", + static_cast(compression) ? "" : ". Uncompressed"); if (static_cast(secure)) { @@ -107,11 +107,8 @@ void Connection::connect(const ConnectionTimeouts & timeouts) sendHello(); receiveHello(); - LOG_TRACE(log_wrapper.get(), "Connected to " << server_name - << " server version " << server_version_major - << "." << server_version_minor - << "." << server_version_patch - << "."); + LOG_TRACE_FORMATTED(log_wrapper.get(), "Connected to {} server version {}.{}.{}.", + server_name, server_version_major, server_version_minor, server_version_patch); } catch (Poco::Net::NetException & e) { @@ -324,7 +321,7 @@ bool Connection::ping() } catch (const Poco::Exception & e) { - LOG_TRACE(log_wrapper.get(), e.displayText()); + LOG_TRACE_FORMATTED(log_wrapper.get(), e.displayText()); return false; } diff --git a/src/DataStreams/ParallelAggregatingBlockInputStream.cpp b/src/DataStreams/ParallelAggregatingBlockInputStream.cpp index 91663d9fc7a..979999ab745 100644 --- a/src/DataStreams/ParallelAggregatingBlockInputStream.cpp +++ b/src/DataStreams/ParallelAggregatingBlockInputStream.cpp @@ -179,8 +179,8 @@ void ParallelAggregatingBlockInputStream::execute() for (size_t i = 0; i < max_threads; ++i) { size_t rows = many_data[i]->size(); - LOG_TRACE(log, std::fixed << std::setprecision(3) - << "Aggregated. " << threads_data[i].src_rows << " to " << rows << " rows" + LOG_TRACE(log, + "Aggregated. " << threads_data[i].src_rows << " to " << rows << " rows" << " (from " << threads_data[i].src_bytes / 1048576.0 << " MiB)" << " in " << elapsed_seconds << " sec." << " (" << threads_data[i].src_rows / elapsed_seconds << " rows/sec., " @@ -189,8 +189,8 @@ void ParallelAggregatingBlockInputStream::execute() total_src_rows += threads_data[i].src_rows; total_src_bytes += threads_data[i].src_bytes; } - LOG_TRACE(log, std::fixed << std::setprecision(3) - << "Total aggregated. " << total_src_rows << " rows (from " << total_src_bytes / 1048576.0 << " MiB)" + LOG_TRACE(log, + "Total aggregated. " << total_src_rows << " rows (from " << total_src_bytes / 1048576.0 << " MiB)" << " in " << elapsed_seconds << " sec." << " (" << total_src_rows / elapsed_seconds << " rows/sec., " << total_src_bytes / elapsed_seconds / 1048576.0 << " MiB/sec.)"); diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 09a4d28c6e7..5c93d2cbfca 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -95,7 +95,7 @@ namespace { if (processed % PRINT_MESSAGE_EACH_N_OBJECTS == 0 || watch.compareAndRestart(PRINT_MESSAGE_EACH_N_SECONDS)) { - LOG_INFO(log, std::fixed << std::setprecision(2) << processed * 100.0 / total << "%"); + LOG_INFO_FORMATTED(log, "{}%", processed * 100.0 / total); watch.restart(); } } diff --git a/src/Dictionaries/CacheDictionary.cpp b/src/Dictionaries/CacheDictionary.cpp index 479f6ba8922..7ac9e5401e5 100644 --- a/src/Dictionaries/CacheDictionary.cpp +++ b/src/Dictionaries/CacheDictionary.cpp @@ -768,8 +768,7 @@ void CacheDictionary::updateThreadFunction() const size_t current_queue_size = update_queue.size(); if (current_queue_size > 0) - LOG_TRACE(log, "Performing bunch of keys update in cache dictionary with " - << current_queue_size + 1 << " keys"); + LOG_TRACE_FORMATTED(log, "Performing bunch of keys update in cache dictionary with {} keys", current_queue_size + 1); std::vector update_request; update_request.reserve(current_queue_size + 1); diff --git a/src/Dictionaries/DictionaryFactory.cpp b/src/Dictionaries/DictionaryFactory.cpp index 326e0f08d89..056a6861a33 100644 --- a/src/Dictionaries/DictionaryFactory.cpp +++ b/src/Dictionaries/DictionaryFactory.cpp @@ -43,9 +43,7 @@ DictionaryPtr DictionaryFactory::create( const DictionaryStructure dict_struct{config, config_prefix + ".structure"}; DictionarySourcePtr source_ptr = DictionarySourceFactory::instance().create(name, config, config_prefix + ".source", dict_struct, context, check_source_config); - LOG_TRACE(&Poco::Logger::get("DictionaryFactory"), - "Created dictionary source '" << source_ptr->toString() - << "' for dictionary '" << name << "'"); + LOG_TRACE_FORMATTED(&Poco::Logger::get("DictionaryFactory"), "Created dictionary source '{}' for dictionary '{}'", source_ptr->toString(), name); const auto & layout_type = keys.front(); diff --git a/src/Dictionaries/MySQLDictionarySource.cpp b/src/Dictionaries/MySQLDictionarySource.cpp index 0c4ce686f62..f8d035f2037 100644 --- a/src/Dictionaries/MySQLDictionarySource.cpp +++ b/src/Dictionaries/MySQLDictionarySource.cpp @@ -213,7 +213,7 @@ LocalDateTime MySQLDictionarySource::getLastModification(mysqlxx::Pool::Entry & { auto query = connection->query("SHOW TABLE STATUS LIKE " + quoteForLike(table)); - LOG_TRACE(log, query.str()); + LOG_TRACE_FORMATTED(log, query.str()); auto result = query.use(); diff --git a/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp index f7c1d836085..31ea07482a7 100644 --- a/src/Disks/DiskLocal.cpp +++ b/src/Disks/DiskLocal.cpp @@ -100,10 +100,8 @@ bool DiskLocal::tryReserve(UInt64 bytes) UInt64 unreserved_space = available_space - std::min(available_space, reserved_bytes); if (unreserved_space >= bytes) { - LOG_DEBUG( - &Logger::get("DiskLocal"), - "Reserving " << formatReadableSizeWithBinarySuffix(bytes) << " on disk " << backQuote(name) << ", having unreserved " - << formatReadableSizeWithBinarySuffix(unreserved_space) << "."); + LOG_DEBUG_FORMATTED(&Logger::get("DiskLocal"), "Reserving {} on disk {}, having unreserved {}.", + formatReadableSizeWithBinarySuffix(bytes), backQuote(name), formatReadableSizeWithBinarySuffix(unreserved_space)); ++reservation_count; reserved_bytes += bytes; return true; diff --git a/src/Disks/DiskSelector.cpp b/src/Disks/DiskSelector.cpp index 69549e4520d..fc74f8f789b 100644 --- a/src/Disks/DiskSelector.cpp +++ b/src/Disks/DiskSelector.cpp @@ -99,7 +99,7 @@ DiskSelectorPtr DiskSelector::updateFromConfig( } writeString(" disappeared from configuration, this change will be applied after restart of ClickHouse", warning); - LOG_WARNING(&Logger::get("DiskSelector"), warning.str()); + LOG_WARNING_FORMATTED(&Logger::get("DiskSelector"), warning.str()); } return result; diff --git a/src/Disks/IDisk.cpp b/src/Disks/IDisk.cpp index 36ab2a49573..aeb54deead8 100644 --- a/src/Disks/IDisk.cpp +++ b/src/Disks/IDisk.cpp @@ -15,9 +15,7 @@ bool IDisk::isDirectoryEmpty(const String & path) void copyFile(IDisk & from_disk, const String & from_path, IDisk & to_disk, const String & to_path) { - LOG_DEBUG( - &Poco::Logger::get("IDisk"), - "Copying from " << from_disk.getName() << " " << from_path << " to " << to_disk.getName() << " " << to_path); + LOG_DEBUG_FORMATTED(&Poco::Logger::get("IDisk"), "Copying from {} {} to {} {}.", from_disk.getName(), from_path, to_disk.getName(), to_path); auto in = from_disk.readFile(from_path); auto out = to_disk.writeFile(to_path); diff --git a/src/Disks/S3/DiskS3.cpp b/src/Disks/S3/DiskS3.cpp index 8dd7be00465..7bf151fba5f 100644 --- a/src/Disks/S3/DiskS3.cpp +++ b/src/Disks/S3/DiskS3.cpp @@ -515,9 +515,8 @@ std::unique_ptr DiskS3::readFile(const String & path, si { Metadata metadata(metadata_path, path); - LOG_DEBUG( - &Logger::get("DiskS3"), - "Read from file by path: " << backQuote(metadata_path + path) << " Existing S3 objects: " << metadata.s3_objects.size()); + LOG_DEBUG_FORMATTED(&Logger::get("DiskS3"), "Read from file by path: {}. Existing S3 objects: {}", + backQuote(metadata_path + path), metadata.s3_objects.size()); return std::make_unique(client, bucket, metadata, buf_size); } @@ -545,10 +544,8 @@ std::unique_ptr DiskS3::writeFile(const String & path, { Metadata metadata(metadata_path, path); - LOG_DEBUG( - &Logger::get("DiskS3"), - "Append to file by path: " << backQuote(metadata_path + path) << " New S3 path: " << s3_path - << " Existing S3 objects: " << metadata.s3_objects.size()); + LOG_DEBUG_FORMATTED(&Logger::get("DiskS3"), "Append to file by path: {}. New S3 path: {}. Existing S3 objects: {}.", + backQuote(metadata_path + path), s3_path, metadata.s3_objects.size()); return std::make_unique(client, bucket, metadata, s3_path, min_upload_part_size, buf_size); } @@ -619,10 +616,8 @@ bool DiskS3::tryReserve(UInt64 bytes) UInt64 unreserved_space = available_space - std::min(available_space, reserved_bytes); if (unreserved_space >= bytes) { - LOG_DEBUG( - &Logger::get("DiskS3"), - "Reserving " << formatReadableSizeWithBinarySuffix(bytes) << " on disk " << backQuote(name) << ", having unreserved " - << formatReadableSizeWithBinarySuffix(unreserved_space) << "."); + LOG_DEBUG_FORMATTED(&Logger::get("DiskS3"), "Reserving {} on disk {}, having unreserved {}.", + formatReadableSizeWithBinarySuffix(bytes), backQuote(name), formatReadableSizeWithBinarySuffix(unreserved_space)); ++reservation_count; reserved_bytes += bytes; return true; diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 51fdf57c50c..6dc53befc9d 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -761,8 +761,8 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, co ProfileEvents::increment(ProfileEvents::ExternalAggregationCompressedBytes, compressed_bytes); ProfileEvents::increment(ProfileEvents::ExternalAggregationUncompressedBytes, uncompressed_bytes); - LOG_TRACE(log, std::fixed << std::setprecision(3) - << "Written part in " << elapsed_seconds << " sec., " + LOG_TRACE(log, + "Written part in " << elapsed_seconds << " sec., " << rows << " rows, " << (uncompressed_bytes / 1048576.0) << " MiB uncompressed, " << (compressed_bytes / 1048576.0) << " MiB compressed, " @@ -867,8 +867,8 @@ void Aggregator::writeToTemporaryFileImpl( /// `data_variants` will not destroy them in the destructor, they are now owned by ColumnAggregateFunction objects. data_variants.aggregator = nullptr; - LOG_TRACE(log, std::fixed << std::setprecision(3) - << "Max size of temporary block: " << max_temporary_block_size_rows << " rows, " + LOG_TRACE(log, + "Max size of temporary block: " << max_temporary_block_size_rows << " rows, " << (max_temporary_block_size_bytes / 1048576.0) << " MiB."); } @@ -939,8 +939,8 @@ void Aggregator::execute(const BlockInputStreamPtr & stream, AggregatedDataVaria double elapsed_seconds = watch.elapsedSeconds(); size_t rows = result.sizeWithoutOverflowRow(); - LOG_TRACE(log, std::fixed << std::setprecision(3) - << "Aggregated. " << src_rows << " to " << rows << " rows (from " << src_bytes / 1048576.0 << " MiB)" + LOG_TRACE(log, + "Aggregated. " << src_rows << " to " << rows << " rows (from " << src_bytes / 1048576.0 << " MiB)" << " in " << elapsed_seconds << " sec." << " (" << src_rows / elapsed_seconds << " rows/sec., " << src_bytes / elapsed_seconds / 1048576.0 << " MiB/sec.)"); } @@ -1308,8 +1308,8 @@ BlocksList Aggregator::convertToBlocks(AggregatedDataVariants & data_variants, b } double elapsed_seconds = watch.elapsedSeconds(); - LOG_TRACE(log, std::fixed << std::setprecision(3) - << "Converted aggregated data to blocks. " + LOG_TRACE(log, + "Converted aggregated data to blocks. " << rows << " rows, " << bytes / 1048576.0 << " MiB" << " in " << elapsed_seconds << " sec." << " (" << rows / elapsed_seconds << " rows/sec., " << bytes / elapsed_seconds / 1048576.0 << " MiB/sec.)"); @@ -2175,8 +2175,8 @@ Block Aggregator::mergeBlocks(BlocksList & blocks, bool final) size_t rows = block.rows(); size_t bytes = block.bytes(); double elapsed_seconds = watch.elapsedSeconds(); - LOG_TRACE(log, std::fixed << std::setprecision(3) - << "Merged partially aggregated blocks. " + LOG_TRACE(log, + "Merged partially aggregated blocks. " << rows << " rows, " << bytes / 1048576.0 << " MiB." << " in " << elapsed_seconds << " sec." << " (" << rows / elapsed_seconds << " rows/sec., " << bytes / elapsed_seconds / 1048576.0 << " MiB/sec.)"); diff --git a/src/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp index 5bf7ec0ff15..2a843f23319 100644 --- a/src/Processors/Executors/PipelineExecutor.cpp +++ b/src/Processors/Executors/PipelineExecutor.cpp @@ -547,12 +547,12 @@ void PipelineExecutor::executeSingleThread(size_t thread_num, size_t num_threads #ifndef NDEBUG auto & context = executor_contexts[thread_num]; - LOG_TRACE(log, std::fixed << std::setprecision(3) - << "Thread finished." - << " Total time: " << (context->total_time_ns / 1e9) << " sec." - << " Execution time: " << (context->execution_time_ns / 1e9) << " sec." - << " Processing time: " << (context->processing_time_ns / 1e9) << " sec." - << " Wait time: " << (context->wait_time_ns / 1e9) << " sec."); + LOG_TRACE(log, + "Thread finished." + << " Total time: " << (context->total_time_ns / 1e9) << " sec." + << " Execution time: " << (context->execution_time_ns / 1e9) << " sec." + << " Processing time: " << (context->processing_time_ns / 1e9) << " sec." + << " Wait time: " << (context->wait_time_ns / 1e9) << " sec."); #endif } diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp index d0d521e3e2c..a14e649b4d1 100644 --- a/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -540,10 +540,10 @@ void AggregatingTransform::initGenerate() double elapsed_seconds = watch.elapsedSeconds(); size_t rows = variants.sizeWithoutOverflowRow(); - LOG_TRACE(log, std::fixed << std::setprecision(3) - << "Aggregated. " << src_rows << " to " << rows << " rows (from " << src_bytes / 1048576.0 << " MiB)" - << " in " << elapsed_seconds << " sec." - << " (" << src_rows / elapsed_seconds << " rows/sec., " << src_bytes / elapsed_seconds / 1048576.0 << " MiB/sec.)"); + LOG_TRACE(log, + "Aggregated. " << src_rows << " to " << rows << " rows (from " << src_bytes / 1048576.0 << " MiB)" + << " in " << elapsed_seconds << " sec." + << " (" << src_rows / elapsed_seconds << " rows/sec., " << src_bytes / elapsed_seconds / 1048576.0 << " MiB/sec.)"); if (params->aggregator.hasTemporaryFiles()) { diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 76ce3daf820..11cac2245cb 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -949,8 +949,8 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor /// Print overall profiling info. NOTE: it may duplicates previous messages { double elapsed_seconds = merge_entry->watch.elapsedSeconds(); - LOG_DEBUG(log, std::fixed << std::setprecision(2) - << "Merge sorted " << merge_entry->rows_read << " rows" + LOG_DEBUG(log, + "Merge sorted " << merge_entry->rows_read << " rows" << ", containing " << all_column_names.size() << " columns" << " (" << merging_column_names.size() << " merged, " << gathering_column_names.size() << " gathered)" << " in " << elapsed_seconds << " sec., " diff --git a/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp index 8c8f240203b..5a51b9ee216 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -166,8 +166,8 @@ void MergeTreeReadPool::profileFeedback(const ReadBufferFromFileBase::ProfileInf ++backoff_state.num_events; ProfileEvents::increment(ProfileEvents::SlowRead); - LOG_DEBUG(log, std::fixed << std::setprecision(3) - << "Slow read, event №" << backoff_state.num_events + LOG_DEBUG(log, + "Slow read, event №" << backoff_state.num_events << ": read " << info.bytes_read << " bytes in " << info.nanoseconds / 1000000000.0 << " sec., " << info.bytes_read * 1000.0 / info.nanoseconds << " MB/s.");