find {base,src,programs} -name '*.h' -or -name '*.cpp' | xargs grep -l -P 'LOG_[^\W_]+\([^,]+, [^<+)"]+\);' | xargs sed -i -r -e 's/(LOG_[^\W_]+)\(([^,]+), ([^<+)"]+)\);/\1_FORMATTED(\2, \3);/'

This commit is contained in:
Alexey Milovidov 2020-05-23 20:29:56 +03:00
parent b3f4fe12dd
commit 3a09f9ca1c
7 changed files with 17 additions and 17 deletions

View File

@ -278,7 +278,7 @@ private:
}
/// Write symbolized stack trace line by line for better grep-ability.
stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL(log, s); });
stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL_FORMATTED(log, s); });
}
};
@ -318,7 +318,7 @@ static void sanitizerDeathCallback()
}
/// Write symbolized stack trace line by line for better grep-ability.
stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL(log, s); });
stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL_FORMATTED(log, s); });
}
#endif

View File

@ -121,9 +121,9 @@ void InterserverIOHTTPHandler::handleRequest(Poco::Net::HTTPServerRequest & requ
writeString(message, *used_output.out);
if (is_real_error)
LOG_ERROR(log, message);
LOG_ERROR_FORMATTED(log, message);
else
LOG_INFO(log, message);
LOG_INFO_FORMATTED(log, message);
}
catch (...)
{
@ -132,7 +132,7 @@ void InterserverIOHTTPHandler::handleRequest(Poco::Net::HTTPServerRequest & requ
if (!response.sent())
writeString(message, *used_output.out);
LOG_ERROR(log, message);
LOG_ERROR_FORMATTED(log, message);
}
}

View File

@ -118,7 +118,7 @@ BlockInputStreamPtr MySQLDictionarySource::loadAll()
auto connection = pool.get();
last_modification = getLastModification(connection, false);
LOG_TRACE(log, load_all_query);
LOG_TRACE_FORMATTED(log, load_all_query);
return std::make_shared<MySQLBlockInputStream>(connection, load_all_query, sample_block, max_block_size, close_connection);
}
@ -128,7 +128,7 @@ BlockInputStreamPtr MySQLDictionarySource::loadUpdatedAll()
last_modification = getLastModification(connection, false);
std::string load_update_query = getUpdateFieldAndDate();
LOG_TRACE(log, load_update_query);
LOG_TRACE_FORMATTED(log, load_update_query);
return std::make_shared<MySQLBlockInputStream>(connection, load_update_query, sample_block, max_block_size, close_connection);
}

View File

@ -134,7 +134,7 @@ std::string XDBCDictionarySource::getUpdateFieldAndDate()
BlockInputStreamPtr XDBCDictionarySource::loadAll()
{
LOG_TRACE(log, load_all_query);
LOG_TRACE_FORMATTED(log, load_all_query);
return loadBase(load_all_query);
}
@ -142,7 +142,7 @@ BlockInputStreamPtr XDBCDictionarySource::loadUpdatedAll()
{
std::string load_query_update = getUpdateFieldAndDate();
LOG_TRACE(log, load_query_update);
LOG_TRACE_FORMATTED(log, load_query_update);
return loadBase(load_query_update);
}

View File

@ -325,7 +325,7 @@ void StorageKafka::updateConfiguration(cppkafka::Configuration & conf)
conf.set_log_callback([this](cppkafka::KafkaHandleBase &, int level, const std::string & /* facility */, const std::string & message)
{
auto [poco_level, client_logs_level] = parseSyslogLevel(level);
LOG_IMPL(log, client_logs_level, poco_level, message);
LOG_IMPL_FORMATTED(log, client_logs_level, poco_level, message);
});
// Configure interceptor to change thread name

View File

@ -260,7 +260,7 @@ CheckResult ReplicatedMergeTreePartCheckThread::checkPart(const String & part_na
tryLogCurrentException(log, __PRETTY_FUNCTION__);
String message = "Part " + part_name + " looks broken. Removing it and queueing a fetch.";
LOG_ERROR(log, message);
LOG_ERROR_FORMATTED(log, message);
ProfileEvents::increment(ProfileEvents::ReplicatedPartChecksFailed);
storage.removePartAndEnqueueFetch(part_name);
@ -278,7 +278,7 @@ CheckResult ReplicatedMergeTreePartCheckThread::checkPart(const String & part_na
ProfileEvents::increment(ProfileEvents::ReplicatedPartChecksFailed);
String message = "Unexpected part " + part_name + " in filesystem. Removing.";
LOG_ERROR(log, message);
LOG_ERROR_FORMATTED(log, message);
storage.forgetPartAndMoveToDetached(part, "unexpected");
return {part_name, false, message};
}

View File

@ -987,7 +987,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
if (!isNotCoveredByFuturePartsImpl(new_part_name, out_postpone_reason, state_lock))
{
if (!out_postpone_reason.empty())
LOG_DEBUG(log, out_postpone_reason);
LOG_DEBUG_FORMATTED(log, out_postpone_reason);
return false;
}
}
@ -1007,7 +1007,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
{
String reason = "Not merging into part " + entry.new_part_name
+ " because part " + name + " is not ready yet (log entry for that part is being processed).";
LOG_TRACE(log, reason);
LOG_TRACE_FORMATTED(log, reason);
out_postpone_reason = reason;
return false;
}
@ -1020,7 +1020,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
if (merger_mutator.merges_blocker.isCancelled())
{
String reason = "Not executing log entry for part " + entry.new_part_name + " because merges and mutations are cancelled now.";
LOG_DEBUG(log, reason);
LOG_DEBUG_FORMATTED(log, reason);
out_postpone_reason = reason;
return false;
}
@ -1041,7 +1041,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
String reason = "Not executing log entry " + entry.typeToString() + " for part " + entry.new_part_name
+ " because source parts size (" + formatReadableSizeWithBinarySuffix(sum_parts_size_in_bytes)
+ ") is greater than the current maximum (" + formatReadableSizeWithBinarySuffix(max_source_parts_size) + ").";
LOG_DEBUG(log, reason);
LOG_DEBUG_FORMATTED(log, reason);
out_postpone_reason = reason;
return false;
}
@ -1056,7 +1056,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
if (0 != getConflictsCountForRange(range, entry, &conflicts_description, state_lock))
{
LOG_DEBUG(log, conflicts_description);
LOG_DEBUG_FORMATTED(log, conflicts_description);
return false;
}
}