diff --git a/base/daemon/BaseDaemon.cpp b/base/daemon/BaseDaemon.cpp index c48e92a1b7b..b091d01dea0 100644 --- a/base/daemon/BaseDaemon.cpp +++ b/base/daemon/BaseDaemon.cpp @@ -278,7 +278,7 @@ private: } /// Write symbolized stack trace line by line for better grep-ability. - stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL(log, s); }); + stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL_FORMATTED(log, s); }); } }; @@ -318,7 +318,7 @@ static void sanitizerDeathCallback() } /// Write symbolized stack trace line by line for better grep-ability. - stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL(log, s); }); + stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL_FORMATTED(log, s); }); } #endif diff --git a/programs/server/InterserverIOHTTPHandler.cpp b/programs/server/InterserverIOHTTPHandler.cpp index d86876800b8..71b6010f87a 100644 --- a/programs/server/InterserverIOHTTPHandler.cpp +++ b/programs/server/InterserverIOHTTPHandler.cpp @@ -121,9 +121,9 @@ void InterserverIOHTTPHandler::handleRequest(Poco::Net::HTTPServerRequest & requ writeString(message, *used_output.out); if (is_real_error) - LOG_ERROR(log, message); + LOG_ERROR_FORMATTED(log, message); else - LOG_INFO(log, message); + LOG_INFO_FORMATTED(log, message); } catch (...) { @@ -132,7 +132,7 @@ void InterserverIOHTTPHandler::handleRequest(Poco::Net::HTTPServerRequest & requ if (!response.sent()) writeString(message, *used_output.out); - LOG_ERROR(log, message); + LOG_ERROR_FORMATTED(log, message); } } diff --git a/src/Dictionaries/MySQLDictionarySource.cpp b/src/Dictionaries/MySQLDictionarySource.cpp index 884a88c1c3e..0c4ce686f62 100644 --- a/src/Dictionaries/MySQLDictionarySource.cpp +++ b/src/Dictionaries/MySQLDictionarySource.cpp @@ -118,7 +118,7 @@ BlockInputStreamPtr MySQLDictionarySource::loadAll() auto connection = pool.get(); last_modification = getLastModification(connection, false); - LOG_TRACE(log, load_all_query); + LOG_TRACE_FORMATTED(log, load_all_query); return std::make_shared(connection, load_all_query, sample_block, max_block_size, close_connection); } @@ -128,7 +128,7 @@ BlockInputStreamPtr MySQLDictionarySource::loadUpdatedAll() last_modification = getLastModification(connection, false); std::string load_update_query = getUpdateFieldAndDate(); - LOG_TRACE(log, load_update_query); + LOG_TRACE_FORMATTED(log, load_update_query); return std::make_shared(connection, load_update_query, sample_block, max_block_size, close_connection); } diff --git a/src/Dictionaries/XDBCDictionarySource.cpp b/src/Dictionaries/XDBCDictionarySource.cpp index 03d31fa001f..27839678c6d 100644 --- a/src/Dictionaries/XDBCDictionarySource.cpp +++ b/src/Dictionaries/XDBCDictionarySource.cpp @@ -134,7 +134,7 @@ std::string XDBCDictionarySource::getUpdateFieldAndDate() BlockInputStreamPtr XDBCDictionarySource::loadAll() { - LOG_TRACE(log, load_all_query); + LOG_TRACE_FORMATTED(log, load_all_query); return loadBase(load_all_query); } @@ -142,7 +142,7 @@ BlockInputStreamPtr XDBCDictionarySource::loadUpdatedAll() { std::string load_query_update = getUpdateFieldAndDate(); - LOG_TRACE(log, load_query_update); + LOG_TRACE_FORMATTED(log, load_query_update); return loadBase(load_query_update); } diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index ae09706ea5d..4e8cb916c38 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -325,7 +325,7 @@ void StorageKafka::updateConfiguration(cppkafka::Configuration & conf) conf.set_log_callback([this](cppkafka::KafkaHandleBase &, int level, const std::string & /* facility */, const std::string & message) { auto [poco_level, client_logs_level] = parseSyslogLevel(level); - LOG_IMPL(log, client_logs_level, poco_level, message); + LOG_IMPL_FORMATTED(log, client_logs_level, poco_level, message); }); // Configure interceptor to change thread name diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp index 345f6a37cf8..ade201c04de 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp @@ -260,7 +260,7 @@ CheckResult ReplicatedMergeTreePartCheckThread::checkPart(const String & part_na tryLogCurrentException(log, __PRETTY_FUNCTION__); String message = "Part " + part_name + " looks broken. Removing it and queueing a fetch."; - LOG_ERROR(log, message); + LOG_ERROR_FORMATTED(log, message); ProfileEvents::increment(ProfileEvents::ReplicatedPartChecksFailed); storage.removePartAndEnqueueFetch(part_name); @@ -278,7 +278,7 @@ CheckResult ReplicatedMergeTreePartCheckThread::checkPart(const String & part_na ProfileEvents::increment(ProfileEvents::ReplicatedPartChecksFailed); String message = "Unexpected part " + part_name + " in filesystem. Removing."; - LOG_ERROR(log, message); + LOG_ERROR_FORMATTED(log, message); storage.forgetPartAndMoveToDetached(part, "unexpected"); return {part_name, false, message}; } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 2a95e939cd8..3d04a51daf9 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -987,7 +987,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( if (!isNotCoveredByFuturePartsImpl(new_part_name, out_postpone_reason, state_lock)) { if (!out_postpone_reason.empty()) - LOG_DEBUG(log, out_postpone_reason); + LOG_DEBUG_FORMATTED(log, out_postpone_reason); return false; } } @@ -1007,7 +1007,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( { String reason = "Not merging into part " + entry.new_part_name + " because part " + name + " is not ready yet (log entry for that part is being processed)."; - LOG_TRACE(log, reason); + LOG_TRACE_FORMATTED(log, reason); out_postpone_reason = reason; return false; } @@ -1020,7 +1020,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( if (merger_mutator.merges_blocker.isCancelled()) { String reason = "Not executing log entry for part " + entry.new_part_name + " because merges and mutations are cancelled now."; - LOG_DEBUG(log, reason); + LOG_DEBUG_FORMATTED(log, reason); out_postpone_reason = reason; return false; } @@ -1041,7 +1041,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( String reason = "Not executing log entry " + entry.typeToString() + " for part " + entry.new_part_name + " because source parts size (" + formatReadableSizeWithBinarySuffix(sum_parts_size_in_bytes) + ") is greater than the current maximum (" + formatReadableSizeWithBinarySuffix(max_source_parts_size) + ")."; - LOG_DEBUG(log, reason); + LOG_DEBUG_FORMATTED(log, reason); out_postpone_reason = reason; return false; } @@ -1056,7 +1056,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( if (0 != getConflictsCountForRange(range, entry, &conflicts_description, state_lock)) { - LOG_DEBUG(log, conflicts_description); + LOG_DEBUG_FORMATTED(log, conflicts_description); return false; } }