From ee4ffbc33279bfd65bff8fcf72994ed6015dfa83 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 23 May 2020 19:47:56 +0300 Subject: [PATCH] find {base,src,programs} -name '*.h' -or -name '*.cpp' | xargs grep -l -P 'LOG_\w+\([^,]+, "[^"]+" << [^<]+\);' | xargs sed -i -r -e 's/(LOG_\w+)\(([^,]+), "([^"]+)" << ([^<]+)\);/\1_FORMATTED(\2, "\3{}", \4);/' --- base/daemon/BaseDaemon.cpp | 2 +- programs/copier/ClusterCopier.cpp | 34 +++++++++---------- programs/copier/ZooKeeperStaff.h | 4 +-- programs/local/LocalServer.cpp | 2 +- programs/odbc-bridge/MainHandler.cpp | 2 +- programs/server/HTTPHandler.cpp | 2 +- programs/server/InterserverIOHTTPHandler.cpp | 2 +- programs/server/MySQLHandler.cpp | 4 +-- programs/server/MySQLHandlerFactory.cpp | 4 +-- programs/server/Server.cpp | 8 ++--- programs/server/TCPHandlerFactory.h | 2 +- src/Common/DNSResolver.cpp | 2 +- src/Common/PipeFDs.cpp | 2 +- src/Common/ShellCommand.cpp | 2 +- src/Core/MySQLProtocol.h | 2 +- ...regatedMemoryEfficientBlockInputStream.cpp | 2 +- src/DataStreams/ParallelInputsProcessor.h | 2 +- src/Databases/DatabaseOnDisk.cpp | 4 +-- .../Embedded/RegionsHierarchies.cpp | 2 +- src/Dictionaries/Embedded/RegionsNames.cpp | 2 +- src/Dictionaries/MySQLDictionarySource.cpp | 2 +- src/Disks/DiskLocal.cpp | 2 +- src/Disks/S3/DiskS3.cpp | 4 +-- src/Disks/S3/ProxyListConfiguration.cpp | 2 +- src/Disks/S3/ProxyResolverConfiguration.cpp | 2 +- src/Disks/S3/registerDiskS3.cpp | 2 +- src/IO/AIOContextPool.cpp | 2 +- src/IO/ReadWriteBufferFromHTTP.h | 2 +- src/IO/WriteBufferFromHTTP.cpp | 2 +- src/IO/WriteBufferFromS3.cpp | 4 +-- src/Interpreters/Aggregator.cpp | 2 +- src/Interpreters/Context.cpp | 2 +- src/Interpreters/DDLWorker.cpp | 8 ++--- src/Interpreters/HashJoin.cpp | 2 +- src/Interpreters/ThreadStatusExt.cpp | 4 +-- src/Interpreters/executeQuery.cpp | 2 +- src/Processors/Executors/PipelineExecutor.cpp | 2 +- .../Formats/Impl/AvroRowInputFormat.cpp | 2 +- .../Kafka/ReadBufferFromKafkaConsumer.cpp | 16 ++++----- src/Storages/MergeTree/DataPartsExchange.cpp | 2 +- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 4 +-- src/Storages/MergeTree/MergeTreeData.cpp | 24 ++++++------- .../MergeTree/MergeTreeDataMergerMutator.cpp | 2 +- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 10 +++--- .../MergeTree/MergeTreePartsMover.cpp | 2 +- src/Storages/MergeTree/MergeTreeReadPool.cpp | 2 +- .../ReplicatedMergeTreePartCheckThread.cpp | 2 +- .../MergeTree/ReplicatedMergeTreeQueue.cpp | 4 +-- src/Storages/StorageDistributed.cpp | 4 +-- src/Storages/StorageMergeTree.cpp | 12 +++---- src/Storages/StorageReplicatedMergeTree.cpp | 30 ++++++++-------- 51 files changed, 124 insertions(+), 124 deletions(-) diff --git a/base/daemon/BaseDaemon.cpp b/base/daemon/BaseDaemon.cpp index c703dc29b1b..692c1809139 100644 --- a/base/daemon/BaseDaemon.cpp +++ b/base/daemon/BaseDaemon.cpp @@ -180,7 +180,7 @@ public: // levels and more info, but for completeness we log all signals // here at trace level. // Don't use strsignal here, because it's not thread-safe. - LOG_TRACE(log, "Received signal " << sig); + LOG_TRACE_FORMATTED(log, "Received signal {}", sig); if (sig == Signals::StopThread) { diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index dbd2d96c6f2..d9a75ec080d 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -26,7 +26,7 @@ void ClusterCopier::init() if (response.error != Coordination::ZOK) return; UInt64 version = ++task_description_version; - LOG_DEBUG(log, "Task description should be updated, local version " << version); + LOG_DEBUG_FORMATTED(log, "Task description should be updated, local version {}", version); }; task_description_path = task_zookeeper_path + "/description"; @@ -85,7 +85,7 @@ void ClusterCopier::discoverShardPartitions(const ConnectionTimeouts & timeouts, { TaskTable & task_table = task_shard->task_table; - LOG_INFO(log, "Discover partitions of shard " << task_shard->getDescription()); + LOG_INFO_FORMATTED(log, "Discover partitions of shard {}", task_shard->getDescription()); auto get_partitions = [&] () { return getShardPartitions(timeouts, *task_shard); }; auto existing_partitions_names = retry(get_partitions, 60); @@ -221,7 +221,7 @@ void ClusterCopier::reloadTaskDescription() if (code) throw Exception("Can't get description node " + task_description_path, ErrorCodes::BAD_ARGUMENTS); - LOG_DEBUG(log, "Loading description, zxid=" << task_description_current_stat.czxid); + LOG_DEBUG_FORMATTED(log, "Loading description, zxid={}", task_description_current_stat.czxid); auto config = getConfigurationFromXMLString(task_config_str); /// Setup settings @@ -548,7 +548,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t { if (e.code == Coordination::ZNODEEXISTS) { - LOG_DEBUG(log, "Someone is already moving pieces " << current_partition_attach_is_active); + LOG_DEBUG_FORMATTED(log, "Someone is already moving pieces {}", current_partition_attach_is_active); return TaskStatus::Active; } @@ -614,7 +614,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t " ATTACH PARTITION " + partition_name + " FROM " + getQuotedTable(helping_table); - LOG_DEBUG(log, "Executing ALTER query: " << query_alter_ast_string); + LOG_DEBUG_FORMATTED(log, "Executing ALTER query: {}", query_alter_ast_string); try { @@ -626,7 +626,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t PoolMode::GET_MANY, ClusterExecutionMode::ON_EACH_NODE); - LOG_INFO(log, "Number of nodes that executed ALTER query successfully : " << toString(num_nodes)); + LOG_INFO_FORMATTED(log, "Number of nodes that executed ALTER query successfully : {}", toString(num_nodes)); } catch (...) { @@ -647,7 +647,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t query_deduplicate_ast_string += " OPTIMIZE TABLE " + getQuotedTable(original_table) + " PARTITION " + partition_name + " DEDUPLICATE;"; - LOG_DEBUG(log, "Executing OPTIMIZE DEDUPLICATE query: " << query_alter_ast_string); + LOG_DEBUG_FORMATTED(log, "Executing OPTIMIZE DEDUPLICATE query: {}", query_alter_ast_string); UInt64 num_nodes = executeQueryOnCluster( task_table.cluster_push, @@ -832,7 +832,7 @@ bool ClusterCopier::tryDropPartitionPiece( /// It is important, DROP PARTITION must be done synchronously settings_push.replication_alter_partitions_sync = 2; - LOG_DEBUG(log, "Execute distributed DROP PARTITION: " << query); + LOG_DEBUG_FORMATTED(log, "Execute distributed DROP PARTITION: {}", query); /// We have to drop partition_piece on each replica size_t num_shards = executeQueryOnCluster( cluster_push, query, @@ -1210,7 +1210,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( /// Load balancing auto worker_node_holder = createTaskWorkerNodeAndWaitIfNeed(zookeeper, current_task_piece_status_path, is_unprioritized_task); - LOG_DEBUG(log, "Processing " << current_task_piece_status_path); + LOG_DEBUG_FORMATTED(log, "Processing {}", current_task_piece_status_path); const String piece_status_path = partition_piece.getPartitionPieceShardsPath(); @@ -1253,7 +1253,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( { if (e.code == Coordination::ZNODEEXISTS) { - LOG_DEBUG(log, "Someone is already processing " << current_task_piece_is_active_path); + LOG_DEBUG_FORMATTED(log, "Someone is already processing {}", current_task_piece_is_active_path); return TaskStatus::Active; } @@ -1387,7 +1387,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( create_query_push_ast->as().if_not_exists = true; String query = queryToString(create_query_push_ast); - LOG_DEBUG(log, "Create destination tables. Query: " << query); + LOG_DEBUG_FORMATTED(log, "Create destination tables. Query: {}", query); UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query, create_query_push_ast, &task_cluster->settings_push, PoolMode::GET_MANY); @@ -1419,7 +1419,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( const auto & settings = context.getSettingsRef(); query_insert_ast = parseQuery(p_query, query, settings.max_query_size, settings.max_parser_depth); - LOG_DEBUG(log, "Executing INSERT query: " << query); + LOG_DEBUG_FORMATTED(log, "Executing INSERT query: {}", query); } try @@ -1513,7 +1513,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( create_query_push_ast->as().if_not_exists = true; String query = queryToString(create_query_push_ast); - LOG_DEBUG(log, "Create destination tables. Query: " << query); + LOG_DEBUG_FORMATTED(log, "Create destination tables. Query: {}", query); UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query, create_query_push_ast, &task_cluster->settings_push, PoolMode::GET_MANY); @@ -1582,7 +1582,7 @@ void ClusterCopier::dropHelpingTables(const TaskTable & task_table) const ClusterPtr & cluster_push = task_table.cluster_push; Settings settings_push = task_cluster->settings_push; - LOG_DEBUG(log, "Execute distributed DROP TABLE: " << query); + LOG_DEBUG_FORMATTED(log, "Execute distributed DROP TABLE: {}", query); /// We have to drop partition_piece on each replica UInt64 num_nodes = executeQueryOnCluster( cluster_push, query, @@ -1609,7 +1609,7 @@ void ClusterCopier::dropParticularPartitionPieceFromAllHelpingTables(const TaskT const ClusterPtr & cluster_push = task_table.cluster_push; Settings settings_push = task_cluster->settings_push; - LOG_DEBUG(log, "Execute distributed DROP PARTITION: " << query); + LOG_DEBUG_FORMATTED(log, "Execute distributed DROP PARTITION: {}", query); /// We have to drop partition_piece on each replica UInt64 num_nodes = executeQueryOnCluster( cluster_push, query, @@ -1620,7 +1620,7 @@ void ClusterCopier::dropParticularPartitionPieceFromAllHelpingTables(const TaskT LOG_DEBUG(log, "DROP PARTITION query was successfully executed on " << toString(num_nodes) << " nodes."); } - LOG_DEBUG(log, "All helping tables dropped partition " << partition_name); + LOG_DEBUG_FORMATTED(log, "All helping tables dropped partition {}", partition_name); } String ClusterCopier::getRemoteCreateTable(const DatabaseAndTableName & table, Connection & connection, const Settings * settings) @@ -1724,7 +1724,7 @@ std::set ClusterCopier::getShardPartitions(const ConnectionTimeouts & ti const auto & settings = context.getSettingsRef(); ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth); - LOG_DEBUG(log, "Computing destination partition set, executing query: " << query); + LOG_DEBUG_FORMATTED(log, "Computing destination partition set, executing query: {}", query); Context local_context = context; local_context.setSettings(task_cluster->settings_pull); diff --git a/programs/copier/ZooKeeperStaff.h b/programs/copier/ZooKeeperStaff.h index 2fc4d35400d..47a2df51ffb 100644 --- a/programs/copier/ZooKeeperStaff.h +++ b/programs/copier/ZooKeeperStaff.h @@ -183,11 +183,11 @@ public: switch (rsp.type) { case Coordination::CREATED: - LOG_DEBUG(logger, "CleanStateClock change: CREATED, at " << rsp.path); + LOG_DEBUG_FORMATTED(logger, "CleanStateClock change: CREATED, at {}", rsp.path); stale->store(true); break; case Coordination::CHANGED: - LOG_DEBUG(logger, "CleanStateClock change: CHANGED, at" << rsp.path); + LOG_DEBUG_FORMATTED(logger, "CleanStateClock change: CHANGED, at{}", rsp.path); stale->store(true); } } diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 7a164e2b108..0ac05658181 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -211,7 +211,7 @@ try /// Lock path directory before read status.emplace(context->getPath() + "status"); - LOG_DEBUG(log, "Loading metadata from " << context->getPath()); + LOG_DEBUG_FORMATTED(log, "Loading metadata from {}", context->getPath()); loadMetadataSystem(*context); attachSystemTables(); loadMetadata(*context); diff --git a/programs/odbc-bridge/MainHandler.cpp b/programs/odbc-bridge/MainHandler.cpp index edc92552c44..05d1cb5d162 100644 --- a/programs/odbc-bridge/MainHandler.cpp +++ b/programs/odbc-bridge/MainHandler.cpp @@ -171,7 +171,7 @@ void ODBCHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Ne else { std::string query = params.get("query"); - LOG_TRACE(log, "Query: " << query); + LOG_TRACE_FORMATTED(log, "Query: {}", query); BlockOutputStreamPtr writer = FormatFactory::instance().getOutput(format, out, *sample_block, context); auto pool = getPool(connection_string); diff --git a/programs/server/HTTPHandler.cpp b/programs/server/HTTPHandler.cpp index b50cba97d90..7569ee057bc 100644 --- a/programs/server/HTTPHandler.cpp +++ b/programs/server/HTTPHandler.cpp @@ -241,7 +241,7 @@ void HTTPHandler::processQuery( CurrentThread::QueryScope query_scope(context); - LOG_TRACE(log, "Request URI: " << request.getURI()); + LOG_TRACE_FORMATTED(log, "Request URI: {}", request.getURI()); std::istream & istr = request.stream(); diff --git a/programs/server/InterserverIOHTTPHandler.cpp b/programs/server/InterserverIOHTTPHandler.cpp index 1dc7fa88dc0..bedd38c28ef 100644 --- a/programs/server/InterserverIOHTTPHandler.cpp +++ b/programs/server/InterserverIOHTTPHandler.cpp @@ -53,7 +53,7 @@ void InterserverIOHTTPHandler::processQuery(Poco::Net::HTTPServerRequest & reque { HTMLForm params(request); - LOG_TRACE(log, "Request URI: " << request.getURI()); + LOG_TRACE_FORMATTED(log, "Request URI: {}", request.getURI()); String endpoint_name = params.get("endpoint"); bool compress = params.get("compress") == "true"; diff --git a/programs/server/MySQLHandler.cpp b/programs/server/MySQLHandler.cpp index b6919c2f188..f070c850509 100644 --- a/programs/server/MySQLHandler.cpp +++ b/programs/server/MySQLHandler.cpp @@ -197,7 +197,7 @@ void MySQLHandler::finishHandshake(MySQLProtocol::HandshakeResponse & packet) read_bytes(3); /// We can find out whether it is SSLRequest of HandshakeResponse by first 3 bytes. size_t payload_size = unalignedLoad(buf) & 0xFFFFFFu; - LOG_TRACE(log, "payload size: " << payload_size); + LOG_TRACE_FORMATTED(log, "payload size: {}", payload_size); if (payload_size == SSL_REQUEST_PAYLOAD_SIZE) { @@ -245,7 +245,7 @@ void MySQLHandler::comInitDB(ReadBuffer & payload) { String database; readStringUntilEOF(database, payload); - LOG_DEBUG(log, "Setting current database to " << database); + LOG_DEBUG_FORMATTED(log, "Setting current database to {}", database); connection_context.setCurrentDatabase(database); packet_sender->sendPacket(OK_Packet(0, client_capability_flags, 0, 0, 1), true); } diff --git a/programs/server/MySQLHandlerFactory.cpp b/programs/server/MySQLHandlerFactory.cpp index 3fbab08d413..9c98739380e 100644 --- a/programs/server/MySQLHandlerFactory.cpp +++ b/programs/server/MySQLHandlerFactory.cpp @@ -32,7 +32,7 @@ MySQLHandlerFactory::MySQLHandlerFactory(IServer & server_) } catch (...) { - LOG_TRACE(log, "Failed to create SSL context. SSL will be disabled. Error: " << getCurrentExceptionMessage(false)); + LOG_TRACE_FORMATTED(log, "Failed to create SSL context. SSL will be disabled. Error: {}", getCurrentExceptionMessage(false)); ssl_enabled = false; } @@ -43,7 +43,7 @@ MySQLHandlerFactory::MySQLHandlerFactory(IServer & server_) } catch (...) { - LOG_TRACE(log, "Failed to read RSA key pair from server certificate. Error: " << getCurrentExceptionMessage(false)); + LOG_TRACE_FORMATTED(log, "Failed to read RSA key pair from server certificate. Error: {}", getCurrentExceptionMessage(false)); generateRSAKeys(); } #endif diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index e3fbd234dfd..a097b12f2d8 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -101,11 +101,11 @@ void setupTmpPath(Logger * log, const std::string & path) { if (it->isFile() && startsWith(it.name(), "tmp")) { - LOG_DEBUG(log, "Removing old temporary file " << it->path()); + LOG_DEBUG_FORMATTED(log, "Removing old temporary file {}", it->path()); it->remove(); } else - LOG_DEBUG(log, "Skipped file in temporary path " << it->path()); + LOG_DEBUG_FORMATTED(log, "Skipped file in temporary path {}", it->path()); } } @@ -349,7 +349,7 @@ int Server::main(const std::vector & /*args*/) if (rlim.rlim_cur == rlim.rlim_max) { - LOG_DEBUG(log, "rlimit on number of file descriptors is " << rlim.rlim_cur); + LOG_DEBUG_FORMATTED(log, "rlimit on number of file descriptors is {}", rlim.rlim_cur); } else { @@ -579,7 +579,7 @@ int Server::main(const std::vector & /*args*/) if (max_server_memory_usage == 0) { max_server_memory_usage = default_max_server_memory_usage; - LOG_INFO(log, "Setting max_server_memory_usage was set to " << formatReadableSizeWithBinarySuffix(max_server_memory_usage)); + LOG_INFO_FORMATTED(log, "Setting max_server_memory_usage was set to {}", formatReadableSizeWithBinarySuffix(max_server_memory_usage)); } else if (max_server_memory_usage > default_max_server_memory_usage) { diff --git a/programs/server/TCPHandlerFactory.h b/programs/server/TCPHandlerFactory.h index ab000718d6f..dc08fc8825d 100644 --- a/programs/server/TCPHandlerFactory.h +++ b/programs/server/TCPHandlerFactory.h @@ -35,7 +35,7 @@ public: { try { - LOG_TRACE(log, "TCP Request. Address: " << socket.peerAddress().toString()); + LOG_TRACE_FORMATTED(log, "TCP Request. Address: {}", socket.peerAddress().toString()); return new TCPHandler(server, socket); } catch (const Poco::Net::NetException &) diff --git a/src/Common/DNSResolver.cpp b/src/Common/DNSResolver.cpp index b0b9c61bc84..4b039bb810a 100644 --- a/src/Common/DNSResolver.cpp +++ b/src/Common/DNSResolver.cpp @@ -202,7 +202,7 @@ bool DNSResolver::updateCache() } if (!lost_hosts.empty()) - LOG_INFO(&Logger::get("DNSResolver"), "Cached hosts not found: " << lost_hosts); + LOG_INFO_FORMATTED(&Logger::get("DNSResolver"), "Cached hosts not found: {}", lost_hosts); return updated; } diff --git a/src/Common/PipeFDs.cpp b/src/Common/PipeFDs.cpp index 17eeb9aaef7..bb1dad4e2d3 100644 --- a/src/Common/PipeFDs.cpp +++ b/src/Common/PipeFDs.cpp @@ -102,7 +102,7 @@ void LazyPipeFDs::tryIncreaseSize(int desired_size) if (-1 == fcntl(fds_rw[1], F_SETPIPE_SZ, pipe_size * 2) && errno != EPERM) throwFromErrno("Cannot increase pipe capacity to " + std::to_string(pipe_size * 2), ErrorCodes::CANNOT_FCNTL); - LOG_TRACE(log, "Pipe capacity is " << formatReadableSizeWithBinarySuffix(std::min(pipe_size, desired_size))); + LOG_TRACE_FORMATTED(log, "Pipe capacity is {}", formatReadableSizeWithBinarySuffix(std::min(pipe_size, desired_size))); } #else (void)desired_size; diff --git a/src/Common/ShellCommand.cpp b/src/Common/ShellCommand.cpp index cf7ddb559c1..98a01e18511 100644 --- a/src/Common/ShellCommand.cpp +++ b/src/Common/ShellCommand.cpp @@ -188,7 +188,7 @@ int ShellCommand::tryWait() { wait_called = true; - LOG_TRACE(getLogger(), "Will wait for shell command pid " << pid); + LOG_TRACE_FORMATTED(getLogger(), "Will wait for shell command pid {}", pid); int status = 0; if (-1 == waitpid(pid, &status, 0)) diff --git a/src/Core/MySQLProtocol.h b/src/Core/MySQLProtocol.h index f0cacca6358..fded91c116c 100644 --- a/src/Core/MySQLProtocol.h +++ b/src/Core/MySQLProtocol.h @@ -1068,7 +1068,7 @@ public: # pragma GCC diagnostic pop String pem(pem_buf, pem_size); - LOG_TRACE(log, "Key: " << pem); + LOG_TRACE_FORMATTED(log, "Key: {}", pem); AuthMoreData data(pem); packet_sender->sendPacket(data, true); diff --git a/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp b/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp index 4dbd1e0d9ec..382dbc37063 100644 --- a/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp +++ b/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp @@ -149,7 +149,7 @@ void MergingAggregatedMemoryEfficientBlockInputStream::cancel(bool kill) * (example: connection reset during distributed query execution) * - then don't care. */ - LOG_ERROR(log, "Exception while cancelling " << input.stream->getName()); + LOG_ERROR_FORMATTED(log, "Exception while cancelling {}", input.stream->getName()); } } } diff --git a/src/DataStreams/ParallelInputsProcessor.h b/src/DataStreams/ParallelInputsProcessor.h index a786dac7497..62d26fada2e 100644 --- a/src/DataStreams/ParallelInputsProcessor.h +++ b/src/DataStreams/ParallelInputsProcessor.h @@ -134,7 +134,7 @@ public: * (for example, the connection is broken for distributed query processing) * - then do not care. */ - LOG_ERROR(log, "Exception while cancelling " << input->getName()); + LOG_ERROR_FORMATTED(log, "Exception while cancelling {}", input->getName()); } } } diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index aed9f38c2ea..e5c8c5cf287 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -381,7 +381,7 @@ void DatabaseOnDisk::iterateMetadataFiles(const Context & context, const Iterati } else { - LOG_INFO(log, "Removing file " << getMetadataPath() + file_name); + LOG_INFO_FORMATTED(log, "Removing file {}", getMetadataPath() + file_name); Poco::File(getMetadataPath() + file_name).remove(); } }; @@ -406,7 +406,7 @@ void DatabaseOnDisk::iterateMetadataFiles(const Context & context, const Iterati else if (endsWith(dir_it.name(), ".sql.tmp")) { /// There are files .sql.tmp - delete - LOG_INFO(log, "Removing file " << dir_it->path()); + LOG_INFO_FORMATTED(log, "Removing file {}", dir_it->path()); Poco::File(dir_it->path()).remove(); } else if (endsWith(dir_it.name(), ".sql")) diff --git a/src/Dictionaries/Embedded/RegionsHierarchies.cpp b/src/Dictionaries/Embedded/RegionsHierarchies.cpp index 906ae381a45..eaab1fac214 100644 --- a/src/Dictionaries/Embedded/RegionsHierarchies.cpp +++ b/src/Dictionaries/Embedded/RegionsHierarchies.cpp @@ -13,7 +13,7 @@ RegionsHierarchies::RegionsHierarchies(IRegionsHierarchiesDataProviderPtr data_p for (const auto & name : data_provider->listCustomHierarchies()) { - LOG_DEBUG(log, "Adding regions hierarchy for " << name); + LOG_DEBUG_FORMATTED(log, "Adding regions hierarchy for {}", name); data.emplace(name, data_provider->getHierarchySource(name)); } diff --git a/src/Dictionaries/Embedded/RegionsNames.cpp b/src/Dictionaries/Embedded/RegionsNames.cpp index d696b35c5d0..12362c7e50e 100644 --- a/src/Dictionaries/Embedded/RegionsNames.cpp +++ b/src/Dictionaries/Embedded/RegionsNames.cpp @@ -55,7 +55,7 @@ void RegionsNames::reload() if (!names_source || !names_source->isModified()) continue; - LOG_DEBUG(log, "Reloading regions names for language: " << language); + LOG_DEBUG_FORMATTED(log, "Reloading regions names for language: {}", language); auto names_reader = names_source->createReader(); diff --git a/src/Dictionaries/MySQLDictionarySource.cpp b/src/Dictionaries/MySQLDictionarySource.cpp index 632951613ac..884a88c1c3e 100644 --- a/src/Dictionaries/MySQLDictionarySource.cpp +++ b/src/Dictionaries/MySQLDictionarySource.cpp @@ -227,7 +227,7 @@ LocalDateTime MySQLDictionarySource::getLastModification(mysqlxx::Pool::Entry & if (!update_time_value.isNull()) { modification_time = update_time_value.getDateTime(); - LOG_TRACE(log, "Got modification time: " << modification_time); + LOG_TRACE_FORMATTED(log, "Got modification time: {}", modification_time); } /// fetch remaining rows to avoid "commands out of sync" error diff --git a/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp index 7e089ba8b51..3b3dbf155f8 100644 --- a/src/Disks/DiskLocal.cpp +++ b/src/Disks/DiskLocal.cpp @@ -91,7 +91,7 @@ bool DiskLocal::tryReserve(UInt64 bytes) std::lock_guard lock(DiskLocal::reservation_mutex); if (bytes == 0) { - LOG_DEBUG(&Logger::get("DiskLocal"), "Reserving 0 bytes on disk " << backQuote(name)); + LOG_DEBUG_FORMATTED(&Logger::get("DiskLocal"), "Reserving 0 bytes on disk {}", backQuote(name)); ++reservation_count; return true; } diff --git a/src/Disks/S3/DiskS3.cpp b/src/Disks/S3/DiskS3.cpp index 1b058354545..af83fdbfe10 100644 --- a/src/Disks/S3/DiskS3.cpp +++ b/src/Disks/S3/DiskS3.cpp @@ -556,7 +556,7 @@ std::unique_ptr DiskS3::writeFile(const String & path, void DiskS3::remove(const String & path) { - LOG_DEBUG(&Logger::get("DiskS3"), "Remove file by path: " << backQuote(metadata_path + path)); + LOG_DEBUG_FORMATTED(&Logger::get("DiskS3"), "Remove file by path: {}", backQuote(metadata_path + path)); Poco::File file(metadata_path + path); if (file.isFile()) @@ -610,7 +610,7 @@ bool DiskS3::tryReserve(UInt64 bytes) std::lock_guard lock(reservation_mutex); if (bytes == 0) { - LOG_DEBUG(&Logger::get("DiskS3"), "Reserving 0 bytes on s3 disk " << backQuote(name)); + LOG_DEBUG_FORMATTED(&Logger::get("DiskS3"), "Reserving 0 bytes on s3 disk {}", backQuote(name)); ++reservation_count; return true; } diff --git a/src/Disks/S3/ProxyListConfiguration.cpp b/src/Disks/S3/ProxyListConfiguration.cpp index bc68635fa7b..3842fc727fd 100644 --- a/src/Disks/S3/ProxyListConfiguration.cpp +++ b/src/Disks/S3/ProxyListConfiguration.cpp @@ -20,7 +20,7 @@ Aws::Client::ClientConfigurationPerRequest ProxyListConfiguration::getConfigurat cfg.proxyHost = proxies[index].getHost(); cfg.proxyPort = proxies[index].getPort(); - LOG_DEBUG(&Logger::get("AWSClient"), "Use proxy: " << proxies[index].toString()); + LOG_DEBUG_FORMATTED(&Logger::get("AWSClient"), "Use proxy: {}", proxies[index].toString()); return cfg; } diff --git a/src/Disks/S3/ProxyResolverConfiguration.cpp b/src/Disks/S3/ProxyResolverConfiguration.cpp index c36432d933e..202c10aa7e4 100644 --- a/src/Disks/S3/ProxyResolverConfiguration.cpp +++ b/src/Disks/S3/ProxyResolverConfiguration.cpp @@ -21,7 +21,7 @@ ProxyResolverConfiguration::ProxyResolverConfiguration(const Poco::URI & endpoin Aws::Client::ClientConfigurationPerRequest ProxyResolverConfiguration::getConfiguration(const Aws::Http::HttpRequest &) { - LOG_DEBUG(&Logger::get("AWSClient"), "Obtain proxy using resolver: " << endpoint.toString()); + LOG_DEBUG_FORMATTED(&Logger::get("AWSClient"), "Obtain proxy using resolver: {}", endpoint.toString()); /// 1 second is enough for now. /// TODO: Make timeouts configurable. diff --git a/src/Disks/S3/registerDiskS3.cpp b/src/Disks/S3/registerDiskS3.cpp index 1912579275e..5da407bf9f0 100644 --- a/src/Disks/S3/registerDiskS3.cpp +++ b/src/Disks/S3/registerDiskS3.cpp @@ -71,7 +71,7 @@ namespace proxies.push_back(proxy_uri); - LOG_DEBUG(&Logger::get("DiskS3"), "Configured proxy: " << proxy_uri.toString()); + LOG_DEBUG_FORMATTED(&Logger::get("DiskS3"), "Configured proxy: {}", proxy_uri.toString()); } if (!proxies.empty()) diff --git a/src/IO/AIOContextPool.cpp b/src/IO/AIOContextPool.cpp index 31d0948fedc..43ffea00512 100644 --- a/src/IO/AIOContextPool.cpp +++ b/src/IO/AIOContextPool.cpp @@ -98,7 +98,7 @@ void AIOContextPool::fulfillPromises(const io_event events[], const int num_even const auto it = promises.find(completed_id); if (it == std::end(promises)) { - LOG_ERROR(&Poco::Logger::get("AIOcontextPool"), "Found io_event with unknown id " << completed_id); + LOG_ERROR_FORMATTED(&Poco::Logger::get("AIOcontextPool"), "Found io_event with unknown id {}", completed_id); continue; } diff --git a/src/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h index 4b4e4453360..51402a1f062 100644 --- a/src/IO/ReadWriteBufferFromHTTP.h +++ b/src/IO/ReadWriteBufferFromHTTP.h @@ -127,7 +127,7 @@ namespace detail if (!credentials.getUsername().empty()) credentials.authenticate(request); - LOG_TRACE((&Logger::get("ReadWriteBufferFromHTTP")), "Sending request to " << uri.toString()); + LOG_TRACE_FORMATTED((&Logger::get("ReadWriteBufferFromHTTP")), "Sending request to {}", uri.toString()); auto sess = session->getSession(); diff --git a/src/IO/WriteBufferFromHTTP.cpp b/src/IO/WriteBufferFromHTTP.cpp index 0a8095b960f..79add0d90c0 100644 --- a/src/IO/WriteBufferFromHTTP.cpp +++ b/src/IO/WriteBufferFromHTTP.cpp @@ -15,7 +15,7 @@ WriteBufferFromHTTP::WriteBufferFromHTTP( request.setHost(uri.getHost()); request.setChunkedTransferEncoding(true); - LOG_TRACE((&Logger::get("WriteBufferToHTTP")), "Sending request to " << uri.toString()); + LOG_TRACE_FORMATTED((&Logger::get("WriteBufferToHTTP")), "Sending request to {}", uri.toString()); ostr = &session->sendRequest(request); } diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index 34b1c6b07ae..699db5f92fe 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -100,7 +100,7 @@ void WriteBufferFromS3::initiate() if (outcome.IsSuccess()) { upload_id = outcome.GetResult().GetUploadId(); - LOG_DEBUG(log, "Multipart upload initiated. Upload id: " << upload_id); + LOG_DEBUG_FORMATTED(log, "Multipart upload initiated. Upload id: {}", upload_id); } else throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR); @@ -170,7 +170,7 @@ void WriteBufferFromS3::complete() auto outcome = client_ptr->CompleteMultipartUpload(req); if (outcome.IsSuccess()) - LOG_DEBUG(log, "Multipart upload completed. Upload_id: " << upload_id); + LOG_DEBUG_FORMATTED(log, "Multipart upload completed. Upload_id: {}", upload_id); else throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR); } diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index d1b9b8637d3..5c4455f3281 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -551,7 +551,7 @@ bool Aggregator::executeOnBlock(Columns columns, UInt64 num_rows, AggregatedData result.init(method_chosen); result.keys_size = params.keys_size; result.key_sizes = key_sizes; - LOG_TRACE(log, "Aggregation method: " << result.getMethodName()); + LOG_TRACE_FORMATTED(log, "Aggregation method: {}", result.getMethodName()); } if (isCancelled()) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 45eeb47c035..aa983196d02 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -1761,7 +1761,7 @@ void Context::updateStorageConfiguration(const Poco::Util::AbstractConfiguration } catch (Exception & e) { - LOG_ERROR(shared->log, "An error has occured while reloading storage policies, storage policies were not applied: " << e.message()); + LOG_ERROR_FORMATTED(shared->log, "An error has occured while reloading storage policies, storage policies were not applied: {}", e.message()); } } } diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index c982ddfd83e..6fa2b171981 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -579,7 +579,7 @@ bool DDLWorker::tryExecuteQuery(const String & query, const DDLTask & task, Exec } status = ExecutionStatus(0); - LOG_DEBUG(log, "Executed query: " << query); + LOG_DEBUG_FORMATTED(log, "Executed query: {}", query); return true; } @@ -631,7 +631,7 @@ void DDLWorker::processTask(DDLTask & task, const ZooKeeperPtr & zookeeper) ASTPtr rewritten_ast = task.query_on_cluster->getRewrittenASTWithoutOnCluster(task.address_in_cluster.default_database); String rewritten_query = queryToString(rewritten_ast); - LOG_DEBUG(log, "Executing query: " << rewritten_query); + LOG_DEBUG_FORMATTED(log, "Executing query: {}", rewritten_query); if (auto * query_with_table = dynamic_cast(rewritten_ast.get()); query_with_table) { @@ -1003,7 +1003,7 @@ void DDLWorker::runMainThread() { if (Coordination::isHardwareError(e.code)) { - LOG_DEBUG(log, "Recovering ZooKeeper session after: " << getCurrentExceptionMessage(false)); + LOG_DEBUG_FORMATTED(log, "Recovering ZooKeeper session after: {}", getCurrentExceptionMessage(false)); while (!stop_flag) { @@ -1023,7 +1023,7 @@ void DDLWorker::runMainThread() } else if (e.code == Coordination::ZNONODE) { - LOG_ERROR(log, "ZooKeeper error: " << getCurrentExceptionMessage(true)); + LOG_ERROR_FORMATTED(log, "ZooKeeper error: {}", getCurrentExceptionMessage(true)); } else { diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp index 0e463a84dc4..776d06e645d 100644 --- a/src/Interpreters/HashJoin.cpp +++ b/src/Interpreters/HashJoin.cpp @@ -422,7 +422,7 @@ void HashJoin::setSampleBlock(const Block & block) /// You have to restore this lock if you call the function outside of ctor. //std::unique_lock lock(rwlock); - LOG_DEBUG(log, "setSampleBlock: " << block.dumpStructure()); + LOG_DEBUG_FORMATTED(log, "setSampleBlock: {}", block.dumpStructure()); if (!empty()) return; diff --git a/src/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp index 18fe2735c49..a27508303c4 100644 --- a/src/Interpreters/ThreadStatusExt.cpp +++ b/src/Interpreters/ThreadStatusExt.cpp @@ -95,7 +95,7 @@ void ThreadStatus::setupState(const ThreadGroupStatusPtr & thread_group_) Int32 new_os_thread_priority = settings.os_thread_priority; if (new_os_thread_priority && hasLinuxCapability(CAP_SYS_NICE)) { - LOG_TRACE(log, "Setting nice to " << new_os_thread_priority); + LOG_TRACE_FORMATTED(log, "Setting nice to {}", new_os_thread_priority); if (0 != setpriority(PRIO_PROCESS, thread_id, new_os_thread_priority)) throwFromErrno("Cannot 'setpriority'", ErrorCodes::CANNOT_SET_THREAD_PRIORITY); @@ -221,7 +221,7 @@ void ThreadStatus::detachQuery(bool exit_if_already_detached, bool thread_exits) LOG_TRACE_FORMATTED(log, "Resetting nice"); if (0 != setpriority(PRIO_PROCESS, thread_id, 0)) - LOG_ERROR(log, "Cannot 'setpriority' back to zero: " << errnoToString(ErrorCodes::CANNOT_SET_THREAD_PRIORITY, errno)); + LOG_ERROR_FORMATTED(log, "Cannot 'setpriority' back to zero: {}", errnoToString(ErrorCodes::CANNOT_SET_THREAD_PRIORITY, errno)); os_thread_priority = 0; } diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index edb03bcfb68..00f8c1bfdc5 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -105,7 +105,7 @@ static void logQuery(const String & query, const Context & context, bool interna { if (internal) { - LOG_DEBUG(&Logger::get("executeQuery"), "(internal) " << joinLines(query)); + LOG_DEBUG_FORMATTED(&Logger::get("executeQuery"), "(internal) {}", joinLines(query)); } else { diff --git a/src/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp index 4552262040b..5bf7ec0ff15 100644 --- a/src/Processors/Executors/PipelineExecutor.cpp +++ b/src/Processors/Executors/PipelineExecutor.cpp @@ -486,7 +486,7 @@ void PipelineExecutor::execute(size_t num_threads) catch (...) { #ifndef NDEBUG - LOG_TRACE(log, "Exception while executing query. Current state:\n" << dumpPipeline()); + LOG_TRACE_FORMATTED(log, "Exception while executing query. Current state:\n{}", dumpPipeline()); #endif throw; } diff --git a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp index d92cefbde7d..d0272e6f277 100644 --- a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp @@ -612,7 +612,7 @@ private: try { Poco::URI url(base_url, "/schemas/ids/" + std::to_string(id)); - LOG_TRACE((&Logger::get("AvroConfluentRowInputFormat")), "Fetching schema id = " << id); + LOG_TRACE_FORMATTED((&Logger::get("AvroConfluentRowInputFormat")), "Fetching schema id = {}", id); /// One second for connect/send/receive. Just in case. ConnectionTimeouts timeouts({1, 0}, {1, 0}, {1, 0}); diff --git a/src/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp b/src/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp index a118fdf23ca..873e03580bc 100644 --- a/src/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp +++ b/src/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp @@ -38,7 +38,7 @@ ReadBufferFromKafkaConsumer::ReadBufferFromKafkaConsumer( // called (synchroniously, during poll) when we enter the consumer group consumer->set_assignment_callback([this](const cppkafka::TopicPartitionList & topic_partitions) { - LOG_TRACE(log, "Topics/partitions assigned: " << topic_partitions); + LOG_TRACE_FORMATTED(log, "Topics/partitions assigned: {}", topic_partitions); assignment = topic_partitions; }); @@ -47,7 +47,7 @@ ReadBufferFromKafkaConsumer::ReadBufferFromKafkaConsumer( { // Rebalance is happening now, and now we have a chance to finish the work // with topics/partitions we were working with before rebalance - LOG_TRACE(log, "Rebalance initiated. Revoking partitions: " << topic_partitions); + LOG_TRACE_FORMATTED(log, "Rebalance initiated. Revoking partitions: {}", topic_partitions); // we can not flush data to target from that point (it is pulled, not pushed) // so the best we can now it to @@ -70,13 +70,13 @@ ReadBufferFromKafkaConsumer::ReadBufferFromKafkaConsumer( // } // catch (cppkafka::HandleException & e) // { - // LOG_WARNING(log, "Commit error: " << e.what()); + // LOG_WARNING_FORMATTED(log, "Commit error: {}", e.what()); // } }); consumer->set_rebalance_error_callback([this](cppkafka::Error err) { - LOG_ERROR(log, "Rebalance error: " << err); + LOG_ERROR_FORMATTED(log, "Rebalance error: {}", err); }); } @@ -150,7 +150,7 @@ void ReadBufferFromKafkaConsumer::commit() } catch (const cppkafka::HandleException & e) { - LOG_ERROR(log, "Exception during commit attempt: " << e.what()); + LOG_ERROR_FORMATTED(log, "Exception during commit attempt: {}", e.what()); } --max_retries; } @@ -176,7 +176,7 @@ void ReadBufferFromKafkaConsumer::subscribe() << boost::algorithm::join(consumer->get_subscription(), ", ") << " ]"); - LOG_TRACE(log, "Already assigned to : " << assignment); + LOG_TRACE_FORMATTED(log, "Already assigned to : {}", assignment); size_t max_retries = 5; @@ -223,7 +223,7 @@ void ReadBufferFromKafkaConsumer::unsubscribe() } catch (const cppkafka::HandleException & e) { - LOG_ERROR(log, "Exception from ReadBufferFromKafkaConsumer::unsubscribe: " << e.what()); + LOG_ERROR_FORMATTED(log, "Exception from ReadBufferFromKafkaConsumer::unsubscribe: {}", e.what()); } } @@ -340,7 +340,7 @@ bool ReadBufferFromKafkaConsumer::nextImpl() ++current; // TODO: should throw exception instead - LOG_ERROR(log, "Consumer error: " << err); + LOG_ERROR_FORMATTED(log, "Consumer error: {}", err); return false; } diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 2b491f9ede6..1fe26eb06da 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -82,7 +82,7 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & /*bo ++data.current_table_sends; SCOPE_EXIT({--data.current_table_sends;}); - LOG_TRACE(log, "Sending part " << part_name); + LOG_TRACE_FORMATTED(log, "Sending part {}", part_name); try { diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 44ad4af626e..882c1226273 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -267,7 +267,7 @@ void IMergeTreeDataPart::removeIfNeeded() if (state == State::DeleteOnDestroy) { - LOG_TRACE(storage.log, "Removed part from old location " << path); + LOG_TRACE_FORMATTED(storage.log, "Removed part from old location {}", path); } } catch (...) @@ -812,7 +812,7 @@ void IMergeTreeDataPart::renameToDetached(const String & prefix) const void IMergeTreeDataPart::makeCloneInDetached(const String & prefix) const { assertOnDisk(); - LOG_INFO(storage.log, "Detaching " << relative_path); + LOG_INFO_FORMATTED(storage.log, "Detaching {}", relative_path); String destination_path = storage.relative_data_path + getRelativePathForDetachedPart(prefix); diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 12a6a5dbde4..0d29f0808b2 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1144,7 +1144,7 @@ void MergeTreeData::clearOldTemporaryDirectories(ssize_t custom_directories_life { if (disk->isDirectory(it->path()) && isOldPartDirectory(disk, it->path(), deadline)) { - LOG_WARNING(log, "Removing temporary directory " << fullPath(disk, it->path())); + LOG_WARNING_FORMATTED(log, "Removing temporary directory {}", fullPath(disk, it->path())); disk->removeRecursive(it->path()); } } @@ -1281,7 +1281,7 @@ void MergeTreeData::clearPartsFromFilesystem(const DataPartsVector & parts_to_re { pool.scheduleOrThrowOnError([&] { - LOG_DEBUG(log, "Removing part from filesystem " << part->name); + LOG_DEBUG_FORMATTED(log, "Removing part from filesystem {}", part->name); part->remove(); }); } @@ -1292,7 +1292,7 @@ void MergeTreeData::clearPartsFromFilesystem(const DataPartsVector & parts_to_re { for (const DataPartPtr & part : parts_to_remove) { - LOG_DEBUG(log, "Removing part from filesystem " << part->name); + LOG_DEBUG_FORMATTED(log, "Removing part from filesystem {}", part->name); part->remove(); } } @@ -1684,7 +1684,7 @@ void MergeTreeData::PartsTemporaryRename::tryRenameAll() catch (...) { old_and_new_names.resize(i); - LOG_WARNING(storage.log, "Cannot rename parts to perform operation on them: " << getCurrentExceptionMessage(false)); + LOG_WARNING_FORMATTED(storage.log, "Cannot rename parts to perform operation on them: {}", getCurrentExceptionMessage(false)); throw; } } @@ -2004,7 +2004,7 @@ restore_covered) if (restore_covered && part->info.level == 0) { - LOG_WARNING(log, "Will not recover parts covered by zero-level part " << part->name); + LOG_WARNING_FORMATTED(log, "Will not recover parts covered by zero-level part {}", part->name); return; } @@ -2085,7 +2085,7 @@ restore_covered) for (const String & name : restored) { - LOG_INFO(log, "Activated part " << name); + LOG_INFO_FORMATTED(log, "Activated part {}", name); } if (error) @@ -2104,7 +2104,7 @@ void MergeTreeData::tryRemovePartImmediately(DataPartPtr && part) { auto lock = lockParts(); - LOG_TRACE(log, "Trying to immediately remove part " << part->getNameWithState()); + LOG_TRACE_FORMATTED(log, "Trying to immediately remove part {}", part->getNameWithState()); auto it = data_parts_by_info.find(part->info); if (it == data_parts_by_info.end() || (*it).get() != part.get()) @@ -2130,7 +2130,7 @@ void MergeTreeData::tryRemovePartImmediately(DataPartPtr && part) } removePartsFinally({part_to_delete}); - LOG_TRACE(log, "Removed part " << part_to_delete->name); + LOG_TRACE_FORMATTED(log, "Removed part {}", part_to_delete->name); } @@ -2779,7 +2779,7 @@ void MergeTreeData::dropDetached(const ASTPtr & partition, bool part, const Cont { const auto & [path, disk] = renamed_parts.old_part_name_to_path_and_disk[old_name]; disk->removeRecursive(path + "detached/" + new_name + "/"); - LOG_DEBUG(log, "Dropped detached part " << old_name); + LOG_DEBUG_FORMATTED(log, "Dropped detached part {}", old_name); old_name.clear(); } } @@ -2819,7 +2819,7 @@ MergeTreeData::MutableDataPartsVector MergeTreeData::tryLoadPartsToAttach(const { continue; } - LOG_DEBUG(log, "Found part " << name); + LOG_DEBUG_FORMATTED(log, "Found part {}", name); active_parts.add(name); name_to_disk[name] = disk; } @@ -2849,7 +2849,7 @@ MergeTreeData::MutableDataPartsVector MergeTreeData::tryLoadPartsToAttach(const loaded_parts.reserve(renamed_parts.old_and_new_names.size()); for (const auto & part_names : renamed_parts.old_and_new_names) { - LOG_DEBUG(log, "Checking part " << part_names.second); + LOG_DEBUG_FORMATTED(log, "Checking part {}", part_names.second); auto single_disk_volume = std::make_shared("volume_" + part_names.first, name_to_disk[part_names.first]); MutableDataPartPtr part = createPart(part_names.first, single_disk_volume, source_dir + part_names.second); loadPartAndFixMetadataImpl(part); @@ -3042,7 +3042,7 @@ void MergeTreeData::Transaction::rollback() for (const auto & part : precommitted_parts) ss << " " << part->relative_path; ss << "."; - LOG_DEBUG(data.log, "Undoing transaction." << ss.str()); + LOG_DEBUG_FORMATTED(data.log, "Undoing transaction.{}", ss.str()); data.removePartsFromWorkingSet( DataPartsVector(precommitted_parts.begin(), precommitted_parts.end()), diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 4165e0a4551..2d482e676da 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -640,7 +640,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor size_t sum_input_rows_upper_bound = merge_entry->total_rows_count; MergeAlgorithm merge_alg = chooseMergeAlgorithm(parts, sum_input_rows_upper_bound, gathering_columns, deduplicate, need_remove_expired_values); - LOG_DEBUG(log, "Selected MergeAlgorithm: " << ((merge_alg == MergeAlgorithm::Vertical) ? "Vertical" : "Horizontal")); + LOG_DEBUG_FORMATTED(log, "Selected MergeAlgorithm: {}", ((merge_alg == MergeAlgorithm::Vertical) ? "Vertical" : "Horizontal")); /// Note: this is done before creating input streams, because otherwise data.data_parts_mutex /// (which is locked in data.getTotalActiveSizeInBytes()) diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index dc2da4ff03b..f51e56f7735 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -107,7 +107,7 @@ size_t MergeTreeDataSelectExecutor::getApproximateTotalRowsToRead( size_t rows_count = 0; /// We will find out how many rows we would have read without sampling. - LOG_DEBUG(log, "Preliminary index scan with condition: " << key_condition.toString()); + LOG_DEBUG_FORMATTED(log, "Preliminary index scan with condition: {}", key_condition.toString()); for (const auto & part : parts) { @@ -329,7 +329,7 @@ Pipes MergeTreeDataSelectExecutor::readFromParts( if (relative_sample_size > 1) { relative_sample_size = convertAbsoluteSampleSizeToRelative(select_sample_size, approx_total_rows); - LOG_DEBUG(log, "Selected relative sample size: " << toString(relative_sample_size)); + LOG_DEBUG_FORMATTED(log, "Selected relative sample size: {}", toString(relative_sample_size)); } /// SAMPLE 1 is the same as the absence of SAMPLE. @@ -342,7 +342,7 @@ Pipes MergeTreeDataSelectExecutor::readFromParts( if (relative_sample_offset > 1) { relative_sample_offset = convertAbsoluteSampleSizeToRelative(select_sample_offset, approx_total_rows); - LOG_DEBUG(log, "Selected relative sample offset: " << toString(relative_sample_offset)); + LOG_DEBUG_FORMATTED(log, "Selected relative sample offset: {}", toString(relative_sample_offset)); } } @@ -534,9 +534,9 @@ Pipes MergeTreeDataSelectExecutor::readFromParts( return {}; } - LOG_DEBUG(log, "Key condition: " << key_condition.toString()); + LOG_DEBUG_FORMATTED(log, "Key condition: {}", key_condition.toString()); if (minmax_idx_condition) - LOG_DEBUG(log, "MinMax index condition: " << minmax_idx_condition->toString()); + LOG_DEBUG_FORMATTED(log, "MinMax index condition: {}", minmax_idx_condition->toString()); /// PREWHERE String prewhere_column; diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index 1609e1e517e..d9c7492fb41 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -193,7 +193,7 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt if (moves_blocker.isCancelled()) throw Exception("Cancelled moving parts.", ErrorCodes::ABORTED); - LOG_TRACE(log, "Cloning part " << moving_part.part->name); + LOG_TRACE_FORMATTED(log, "Cloning part {}", moving_part.part->name); moving_part.part->makeCloneOnDiskDetached(moving_part.reserved_space); auto single_disk_volume = std::make_shared("volume_" + moving_part.part->name, moving_part.reserved_space->getDisk()); diff --git a/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp index 8efa7bd31fe..8c8f240203b 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -178,7 +178,7 @@ void MergeTreeReadPool::profileFeedback(const ReadBufferFromFileBase::ProfileInf --backoff_state.current_threads; ProfileEvents::increment(ProfileEvents::ReadBackoff); - LOG_DEBUG(log, "Will lower number of threads to " << backoff_state.current_threads); + LOG_DEBUG_FORMATTED(log, "Will lower number of threads to {}", backoff_state.current_threads); } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp index 5f405189fd3..4bb7d4cff52 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp @@ -184,7 +184,7 @@ void ReplicatedMergeTreePartCheckThread::searchForMissingPart(const String & par CheckResult ReplicatedMergeTreePartCheckThread::checkPart(const String & part_name) { - LOG_WARNING(log, "Checking part " << part_name); + LOG_WARNING_FORMATTED(log, "Checking part {}", part_name); ProfileEvents::increment(ProfileEvents::ReplicatedPartChecks); /// If the part is still in the PreCommitted -> Committed transition, it is not lost diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 78f981f39d9..acc10624df8 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -49,7 +49,7 @@ bool ReplicatedMergeTreeQueue::isVirtualPart(const MergeTreeData::DataPartPtr & bool ReplicatedMergeTreeQueue::load(zkutil::ZooKeeperPtr zookeeper) { auto queue_path = replica_path + "/queue"; - LOG_DEBUG(log, "Loading queue from " << queue_path); + LOG_DEBUG_FORMATTED(log, "Loading queue from {}", queue_path); bool updated = false; std::optional min_unprocessed_insert_time_changed; @@ -234,7 +234,7 @@ void ReplicatedMergeTreeQueue::updateStateOnQueueEntryRemoval( if (entry->type == LogEntry::ALTER_METADATA) { - LOG_TRACE(log, "Finishing metadata alter with version " << entry->alter_version); + LOG_TRACE_FORMATTED(log, "Finishing metadata alter with version {}", entry->alter_version); alter_sequence.finishMetadataAlter(entry->alter_version, state_lock); } } diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 54eccea5a83..840408e467f 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -587,7 +587,7 @@ void StorageDistributed::startup() if (inc > file_names_increment.value) file_names_increment.value.store(inc); } - LOG_DEBUG(log, "Auto-increment is " << file_names_increment.value); + LOG_DEBUG_FORMATTED(log, "Auto-increment is {}", file_names_increment.value); } @@ -816,7 +816,7 @@ void StorageDistributed::renameOnDisk(const String & new_path_to_table_data) auto new_path = path + new_path_to_table_data; Poco::File(path + relative_data_path).renameTo(new_path); - LOG_DEBUG(log, "Updating path to " << new_path); + LOG_DEBUG_FORMATTED(log, "Updating path to {}", new_path); std::lock_guard lock(cluster_nodes_mutex); for (auto & node : cluster_nodes_data) diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index fbe5b6b4d4d..678bf510954 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -384,14 +384,14 @@ Int64 StorageMergeTree::startMutation(const MutationCommands & commands, String auto insertion = current_mutations_by_id.emplace(mutation_file_name, std::move(entry)); current_mutations_by_version.emplace(version, insertion.first->second); - LOG_INFO(log, "Added mutation: " << mutation_file_name); + LOG_INFO_FORMATTED(log, "Added mutation: {}", mutation_file_name); merging_mutating_task_handle->wake(); return version; } void StorageMergeTree::waitForMutation(Int64 version, const String & file_name) { - LOG_INFO(log, "Waiting mutation: " << file_name); + LOG_INFO_FORMATTED(log, "Waiting mutation: {}", file_name); auto check = [version, this]() { return shutdown_called || isMutationDone(version); }; std::unique_lock lock(mutation_wait_mutex); mutation_wait_event.wait(lock, check); @@ -492,7 +492,7 @@ std::vector StorageMergeTree::getMutationsStatus() cons CancellationCode StorageMergeTree::killMutation(const String & mutation_id) { - LOG_TRACE(log, "Killing mutation " << mutation_id); + LOG_TRACE_FORMATTED(log, "Killing mutation {}", mutation_id); std::optional to_kill; { @@ -511,7 +511,7 @@ CancellationCode StorageMergeTree::killMutation(const String & mutation_id) global_context.getMergeList().cancelPartMutations({}, to_kill->block_number); to_kill->removeFile(); - LOG_TRACE(log, "Cancelled part mutations and removed mutation file " << mutation_id); + LOG_TRACE_FORMATTED(log, "Cancelled part mutations and removed mutation file {}", mutation_id); { std::lock_guard lock(mutation_wait_mutex); mutation_wait_event.notify_all(); @@ -896,7 +896,7 @@ void StorageMergeTree::clearOldMutations(bool truncate) for (auto & mutation : mutations_to_delete) { - LOG_TRACE(log, "Removing mutation: " << mutation.file_name); + LOG_TRACE_FORMATTED(log, "Removing mutation: {}", mutation.file_name); mutation.removeFile(); } } @@ -1050,7 +1050,7 @@ void StorageMergeTree::dropPartition(const ASTPtr & partition, bool detach, cons /// If DETACH clone parts to detached/ directory for (const auto & part : parts_to_remove) { - LOG_INFO(log, "Detaching " << part->relative_path); + LOG_INFO_FORMATTED(log, "Detaching {}", part->relative_path); part->makeCloneInDetached(""); } } diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 8bae12e2a19..b8319c45dca 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -412,7 +412,7 @@ void StorageReplicatedMergeTree::createTableIfNotExists() if (zookeeper->exists(zookeeper_path)) return; - LOG_DEBUG(log, "Creating table " << zookeeper_path); + LOG_DEBUG_FORMATTED(log, "Creating table {}", zookeeper_path); zookeeper->createAncestors(zookeeper_path); @@ -548,7 +548,7 @@ void StorageReplicatedMergeTree::createReplica() { auto zookeeper = getZooKeeper(); - LOG_DEBUG(log, "Creating replica " << replica_path); + LOG_DEBUG_FORMATTED(log, "Creating replica {}", replica_path); int32_t code; @@ -643,7 +643,7 @@ void StorageReplicatedMergeTree::checkParts(bool skip_sanity_checks) if (MergeTreePartInfo::tryParsePartName(part_name, &part_info, format_version)) return part_info.getBlocksCount(); - LOG_ERROR(log, "Unexpected part name: " << part_name); + LOG_ERROR_FORMATTED(log, "Unexpected part name: {}", part_name); return 0; }; @@ -701,7 +701,7 @@ void StorageReplicatedMergeTree::checkParts(bool skip_sanity_checks) for (size_t i = 0; i < parts_to_fetch.size(); ++i) { const String & part_name = parts_to_fetch[i]; - LOG_ERROR(log, "Removing locally missing part from ZooKeeper and queueing a fetch: " << part_name); + LOG_ERROR_FORMATTED(log, "Removing locally missing part from ZooKeeper and queueing a fetch: {}", part_name); Coordination::Requests ops; @@ -1483,7 +1483,7 @@ void StorageReplicatedMergeTree::executeDropRange(const LogEntry & entry) /// If DETACH clone parts to detached/ directory for (const auto & part : parts_to_remove) { - LOG_INFO(log, "Detaching " << part->relative_path); + LOG_INFO_FORMATTED(log, "Detaching {}", part->relative_path); part->makeCloneInDetached(""); } } @@ -1824,7 +1824,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) void StorageReplicatedMergeTree::cloneReplica(const String & source_replica, Coordination::Stat source_is_lost_stat, zkutil::ZooKeeperPtr & zookeeper) { - LOG_INFO(log, "Will mimic " << source_replica); + LOG_INFO_FORMATTED(log, "Will mimic {}", source_replica); String source_path = zookeeper_path + "/replicas/" + source_replica; @@ -3283,7 +3283,7 @@ bool StorageReplicatedMergeTree::executeMetadataAlter(const StorageReplicatedMer setTableStructure(std::move(columns_from_entry), metadata_diff); metadata_version = entry.alter_version; - LOG_INFO(log, "Applied changes to the metadata of the table. Current metadata version: " << metadata_version); + LOG_INFO_FORMATTED(log, "Applied changes to the metadata of the table. Current metadata version: {}", metadata_version); } /// This transaction may not happen, but it's OK, because on the next retry we will eventually create/update this node @@ -3722,7 +3722,7 @@ void StorageReplicatedMergeTree::drop() if (zookeeper->expired()) throw Exception("Table was not dropped because ZooKeeper session has expired.", ErrorCodes::TABLE_WAS_NOT_DROPPED); - LOG_INFO(log, "Removing replica " << replica_path); + LOG_INFO_FORMATTED(log, "Removing replica {}", replica_path); replica_is_active_node = nullptr; /// It may left some garbage if replica_path subtree are concurently modified zookeeper->tryRemoveRecursive(replica_path); @@ -3833,7 +3833,7 @@ StorageReplicatedMergeTree::allocateBlockNumber( Strings StorageReplicatedMergeTree::waitForAllReplicasToProcessLogEntry(const ReplicatedMergeTreeLogEntryData & entry, bool wait_for_non_active) { - LOG_DEBUG(log, "Waiting for all replicas to process " << entry.znode_name); + LOG_DEBUG_FORMATTED(log, "Waiting for all replicas to process {}", entry.znode_name); auto zookeeper = getZooKeeper(); Strings replicas = zookeeper->getChildren(zookeeper_path + "/replicas"); @@ -3851,7 +3851,7 @@ Strings StorageReplicatedMergeTree::waitForAllReplicasToProcessLogEntry(const Re } } - LOG_DEBUG(log, "Finished waiting for all replicas to process " << entry.znode_name); + LOG_DEBUG_FORMATTED(log, "Finished waiting for all replicas to process {}", entry.znode_name); return unwaited; } @@ -4397,7 +4397,7 @@ void StorageReplicatedMergeTree::fetchPartition(const ASTPtr & partition, const } } - LOG_INFO(log, "Parts to fetch: " << parts_to_fetch.size()); + LOG_INFO_FORMATTED(log, "Parts to fetch: {}", parts_to_fetch.size()); missing_parts.clear(); for (const String & part : parts_to_fetch) @@ -4514,7 +4514,7 @@ void StorageReplicatedMergeTree::mutate(const MutationCommands & commands, const const String & path_created = dynamic_cast(responses[1].get())->path_created; entry.znode_name = path_created.substr(path_created.find_last_of('/') + 1); - LOG_TRACE(log, "Created mutation with ID " << entry.znode_name); + LOG_TRACE_FORMATTED(log, "Created mutation with ID {}", entry.znode_name); break; } else if (rc == Coordination::ZBADVERSION) @@ -4556,7 +4556,7 @@ CancellationCode StorageReplicatedMergeTree::killMutation(const String & mutatio zkutil::ZooKeeperPtr zookeeper = getZooKeeper(); - LOG_TRACE(log, "Killing mutation " << mutation_id); + LOG_TRACE_FORMATTED(log, "Killing mutation {}", mutation_id); auto mutation_entry = queue.removeMutation(zookeeper, mutation_id); if (!mutation_entry) @@ -4639,7 +4639,7 @@ void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK() } catch (...) { - LOG_ERROR(log, "There is a problem with deleting parts from ZooKeeper: " << getCurrentExceptionMessage(true)); + LOG_ERROR_FORMATTED(log, "There is a problem with deleting parts from ZooKeeper: {}", getCurrentExceptionMessage(true)); } /// Part names that were reliably deleted from ZooKeeper should be deleted from filesystem @@ -5386,7 +5386,7 @@ bool StorageReplicatedMergeTree::dropPartsInPartition( queue.disableMergesInBlockRange(drop_range_fake_part_name); } - LOG_DEBUG(log, "Disabled merges covered by range " << drop_range_fake_part_name); + LOG_DEBUG_FORMATTED(log, "Disabled merges covered by range {}", drop_range_fake_part_name); /// Finally, having achieved the necessary invariants, you can put an entry in the log. entry.type = LogEntry::DROP_RANGE;