Merge pull request #34223 from azat/bump-fmt

Bump fmtlib from 7.0.0 to 8.1.1
This commit is contained in:
Sergei Trifonov 2022-02-02 00:03:25 +03:00 committed by GitHub
commit 68bc456830
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
48 changed files with 178 additions and 157 deletions

View File

@ -12,6 +12,8 @@ namespace
{
template <typename... Ts> constexpr size_t numArgs(Ts &&...) { return sizeof...(Ts); }
template <typename T, typename... Ts> constexpr auto firstArg(T && x, Ts &&...) { return std::forward<T>(x); }
/// For implicit conversion of fmt::basic_runtime<> to char* for std::string ctor
template <typename T, typename... Ts> constexpr auto firstArg(fmt::basic_runtime<T> && data, Ts &&...) { return data.str.data(); }
}

View File

@ -317,7 +317,7 @@ private:
else
error_message = "Sanitizer trap.";
LOG_FATAL(log, error_message);
LOG_FATAL(log, fmt::runtime(error_message));
if (stack_trace.getSize())
{
@ -330,11 +330,11 @@ private:
for (size_t i = stack_trace.getOffset(); i < stack_trace.getSize(); ++i)
bare_stacktrace << ' ' << stack_trace.getFramePointers()[i];
LOG_FATAL(log, bare_stacktrace.str());
LOG_FATAL(log, fmt::runtime(bare_stacktrace.str()));
}
/// Write symbolized stack trace line by line for better grep-ability.
stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL(log, s); });
stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL(log, fmt::runtime(s)); });
#if defined(OS_LINUX)
/// Write information about binary checksum. It can be difficult to calculate, so do it only after printing stack trace.

2
contrib/fmtlib vendored

@ -1 +1 @@
Subproject commit c108ee1d590089ccf642fc85652b845924067af2
Subproject commit b6f4ceaed0a0a24ccf575fab6c56dd50ccf6f1a9

View File

@ -1,7 +1,10 @@
set (SRCS
# NOTE: do not build module for now:
# ../fmtlib/src/fmt.cc
../fmtlib/src/format.cc
../fmtlib/src/os.cc
../fmtlib/include/fmt/args.h
../fmtlib/include/fmt/chrono.h
../fmtlib/include/fmt/color.h
../fmtlib/include/fmt/compile.h
@ -11,9 +14,9 @@ set (SRCS
../fmtlib/include/fmt/locale.h
../fmtlib/include/fmt/os.h
../fmtlib/include/fmt/ostream.h
../fmtlib/include/fmt/posix.h
../fmtlib/include/fmt/printf.h
../fmtlib/include/fmt/ranges.h
../fmtlib/include/fmt/xchar.h
)
add_library(_fmt ${SRCS})

View File

@ -324,7 +324,7 @@ int Keeper::main(const std::vector<std::string> & /*args*/)
}
else
{
LOG_WARNING(log, message);
LOG_WARNING(log, fmt::runtime(message));
}
}

View File

@ -37,7 +37,7 @@ namespace
if (!response.sent())
*response.send() << message << std::endl;
LOG_WARNING(&Poco::Logger::get("LibraryBridge"), message);
LOG_WARNING(&Poco::Logger::get("LibraryBridge"), fmt::runtime(message));
}
std::shared_ptr<Block> parseColumns(std::string && column_string)
@ -123,7 +123,7 @@ void LibraryRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServe
}
else
{
LOG_TRACE(log, "Cannot clone from dictionary with id: {}, will call libNew instead");
LOG_TRACE(log, "Cannot clone from dictionary with id: {}, will call libNew instead", from_dictionary_id);
lib_new = true;
}
}
@ -178,7 +178,7 @@ void LibraryRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServe
catch (const Exception & ex)
{
processError(response, "Invalid 'sample_block' parameter in request body '" + ex.message() + "'");
LOG_WARNING(log, ex.getStackTraceString());
LOG_WARNING(log, fmt::runtime(ex.getStackTraceString()));
return;
}
@ -278,7 +278,7 @@ void LibraryRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServe
catch (const Exception & ex)
{
processError(response, "Invalid 'requested_block' parameter in request body '" + ex.message() + "'");
LOG_WARNING(log, ex.getStackTraceString());
LOG_WARNING(log, fmt::runtime(ex.getStackTraceString()));
return;
}

View File

@ -77,7 +77,7 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ
response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
if (!response.sent())
*response.send() << message << std::endl;
LOG_WARNING(log, message);
LOG_WARNING(log, fmt::runtime(message));
};
if (!params.has("table"))

View File

@ -29,7 +29,7 @@ void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServ
response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
if (!response.sent())
*response.send() << message << std::endl;
LOG_WARNING(log, message);
LOG_WARNING(log, fmt::runtime(message));
};
if (!params.has("connection_string"))

View File

@ -46,7 +46,7 @@ void ODBCHandler::processError(HTTPServerResponse & response, const std::string
response.setStatusAndReason(HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
if (!response.sent())
*response.send() << message << std::endl;
LOG_WARNING(log, message);
LOG_WARNING(log, fmt::runtime(message));
}
@ -102,7 +102,7 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse
catch (const Exception & ex)
{
processError(response, "Invalid 'sample_block' parameter in request body '" + ex.message() + "'");
LOG_ERROR(log, ex.getStackTraceString());
LOG_ERROR(log, fmt::runtime(ex.getStackTraceString()));
return;
}

View File

@ -37,7 +37,7 @@ void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServer
response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
if (!response.sent())
*response.send() << message << std::endl;
LOG_WARNING(log, message);
LOG_WARNING(log, fmt::runtime(message));
};
if (!params.has("connection_string"))

View File

@ -1261,7 +1261,7 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
for (const auto & query_id_format : query_id_formats)
{
writeString(query_id_format.first, std_out);
writeString(fmt::format(query_id_format.second, fmt::arg("query_id", global_context->getCurrentQueryId())), std_out);
writeString(fmt::format(fmt::runtime(query_id_format.second), fmt::arg("query_id", global_context->getCurrentQueryId())), std_out);
writeChar('\n', std_out);
std_out.next();
}

View File

@ -405,7 +405,7 @@ bool Connection::ping()
}
catch (const Poco::Exception & e)
{
LOG_TRACE(log_wrapper.get(), e.displayText());
LOG_TRACE(log_wrapper.get(), fmt::runtime(e.displayText()));
return false;
}

View File

@ -58,9 +58,9 @@ void ConnectionEstablisher::run(ConnectionEstablisher::TryResult & result, std::
auto table_status_it = status_response.table_states_by_id.find(*table_to_check);
if (table_status_it == status_response.table_states_by_id.end())
{
const char * message_pattern = "There is no table {}.{} on server: {}";
fail_message = fmt::format(message_pattern, backQuote(table_to_check->database), backQuote(table_to_check->table), result.entry->getDescription());
LOG_WARNING(log, fail_message);
fail_message = fmt::format("There is no table {}.{} on server: {}",
backQuote(table_to_check->database), backQuote(table_to_check->table), result.entry->getDescription());
LOG_WARNING(log, fmt::runtime(fail_message));
ProfileEvents::increment(ProfileEvents::DistributedConnectionMissingTable);
return;
}

View File

@ -272,7 +272,7 @@ bool DNSResolver::updateCacheImpl(UpdateF && update_func, ElemsT && elems, const
}
if (!lost_elems.empty())
LOG_INFO(log, log_msg, lost_elems);
LOG_INFO(log, fmt::runtime(log_msg), lost_elems);
return updated;
}

View File

@ -37,7 +37,7 @@ public:
// Format message with fmt::format, like the logging functions.
template <typename ...Args>
Exception(int code, const std::string & fmt, Args&&... args)
: Exception(fmt::format(fmt, std::forward<Args>(args)...), code)
: Exception(fmt::format(fmt::runtime(fmt), std::forward<Args>(args)...), code)
{}
struct CreateFromPocoTag {};
@ -55,7 +55,7 @@ public:
template <typename ...Args>
void addMessage(const std::string& format, Args&&... args)
{
extendedMessage(fmt::format(format, std::forward<Args>(args)...));
extendedMessage(fmt::format(fmt::runtime(format), std::forward<Args>(args)...));
}
void addMessage(const std::string& message)
@ -119,7 +119,7 @@ public:
// Format message with fmt::format, like the logging functions.
template <typename ...Args>
ParsingException(int code, const std::string & fmt, Args&&... args)
: Exception(fmt::format(fmt, std::forward<Args>(args)...), code)
: Exception(fmt::format(fmt::runtime(fmt), std::forward<Args>(args)...), code)
{}

View File

@ -1145,7 +1145,7 @@ std::string normalizeZooKeeperPath(std::string zookeeper_path, bool check_starts
if (check_starts_with_slash)
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "ZooKeeper path must starts with '/', got '{}'", zookeeper_path);
if (log)
LOG_WARNING(log, "ZooKeeper path ('{}') does not start with '/'. It will not be supported in future releases");
LOG_WARNING(log, "ZooKeeper path ('{}') does not start with '/'. It will not be supported in future releases", zookeeper_path);
zookeeper_path = "/" + zookeeper_path;
}

View File

@ -17,7 +17,7 @@ TEST(Logger, Log)
Poco::Logger * log = &Poco::Logger::get("Log");
/// This test checks that we don't pass this string to fmtlib, because it is the only argument.
EXPECT_NO_THROW(LOG_INFO(log, "Hello {} World"));
EXPECT_NO_THROW(LOG_INFO(log, fmt::runtime("Hello {} World")));
}
TEST(Logger, TestLog)

View File

@ -39,7 +39,7 @@ public:
const std::string & msg) override
{
LogsLevel db_level = static_cast<LogsLevel>(level_);
LOG_IMPL(log, db_level, LEVELS.at(db_level), msg);
LOG_IMPL(log, db_level, LEVELS.at(db_level), fmt::runtime(msg));
}
void set_level(int level_) override

View File

@ -80,7 +80,7 @@ void DatabaseAtomic::drop(ContextPtr)
}
catch (...)
{
LOG_WARNING(log, getCurrentExceptionMessage(true));
LOG_WARNING(log, fmt::runtime(getCurrentExceptionMessage(true)));
}
fs::remove_all(getMetadataPath());
}
@ -469,7 +469,7 @@ void DatabaseAtomic::tryCreateSymlink(const String & table_name, const String &
}
catch (...)
{
LOG_WARNING(log, getCurrentExceptionMessage(true));
LOG_WARNING(log, fmt::runtime(getCurrentExceptionMessage(true)));
}
}
@ -482,7 +482,7 @@ void DatabaseAtomic::tryRemoveSymlink(const String & table_name)
}
catch (...)
{
LOG_WARNING(log, getCurrentExceptionMessage(true));
LOG_WARNING(log, fmt::runtime(getCurrentExceptionMessage(true)));
}
}
@ -527,7 +527,7 @@ void DatabaseAtomic::renameDatabase(ContextPtr query_context, const String & new
}
catch (...)
{
LOG_WARNING(log, getCurrentExceptionMessage(true));
LOG_WARNING(log, fmt::runtime(getCurrentExceptionMessage(true)));
}
auto new_name_escaped = escapeForFileName(new_name);

View File

@ -316,7 +316,7 @@ void DatabaseOnDisk::dropTable(ContextPtr local_context, const String & table_na
}
catch (...)
{
LOG_WARNING(log, getCurrentExceptionMessage(__PRETTY_FUNCTION__));
LOG_WARNING(log, fmt::runtime(getCurrentExceptionMessage(__PRETTY_FUNCTION__)));
attachTable(local_context, table_name, table, table_data_path_relative);
if (renamed)
fs::rename(table_metadata_path_drop, table_metadata_path);

View File

@ -94,7 +94,7 @@ bool DatabaseSQLite::checkSQLiteTable(const String & table_name) const
if (!sqlite_db)
sqlite_db = openSQLiteDB(database_path, getContext(), /* throw_on_error */true);
const String query = fmt::format("SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';", table_name);
const String query = fmt::format("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';", table_name);
auto callback_get_data = [](void * res, int, char **, char **) -> int
{

View File

@ -20,7 +20,7 @@ void processSQLiteError(const String & message, bool throw_on_error)
if (throw_on_error)
throw Exception(ErrorCodes::PATH_ACCESS_DENIED, message);
else
LOG_ERROR(&Poco::Logger::get("SQLiteEngine"), message);
LOG_ERROR(&Poco::Logger::get("SQLiteEngine"), fmt::runtime(message));
}

View File

@ -58,15 +58,15 @@ void cassandraLogCallback(const CassLogMessage * message, void * data)
{
Poco::Logger * logger = static_cast<Poco::Logger *>(data);
if (message->severity == CASS_LOG_CRITICAL || message->severity == CASS_LOG_ERROR)
LOG_ERROR(logger, message->message);
LOG_ERROR(logger, fmt::runtime(message->message));
else if (message->severity == CASS_LOG_WARN)
LOG_WARNING(logger, message->message);
LOG_WARNING(logger, fmt::runtime(message->message));
else if (message->severity == CASS_LOG_INFO)
LOG_INFO(logger, message->message);
LOG_INFO(logger, fmt::runtime(message->message));
else if (message->severity == CASS_LOG_DEBUG)
LOG_DEBUG(logger, message->message);
LOG_DEBUG(logger, fmt::runtime(message->message));
else if (message->severity == CASS_LOG_TRACE)
LOG_TRACE(logger, message->message);
LOG_TRACE(logger, fmt::runtime(message->message));
}
}

View File

@ -193,7 +193,7 @@ Pipe MySQLDictionarySource::loadAll()
auto connection = pool->get();
last_modification = getLastModification(connection, false);
LOG_TRACE(log, load_all_query);
LOG_TRACE(log, fmt::runtime(load_all_query));
return loadFromQuery(load_all_query);
}
@ -203,7 +203,7 @@ Pipe MySQLDictionarySource::loadUpdatedAll()
last_modification = getLastModification(connection, false);
std::string load_update_query = getUpdateFieldAndDate();
LOG_TRACE(log, load_update_query);
LOG_TRACE(log, fmt::runtime(load_update_query));
return loadFromQuery(load_update_query);
}
@ -289,7 +289,7 @@ LocalDateTime MySQLDictionarySource::getLastModification(mysqlxx::Pool::Entry &
{
auto query = connection->query("SHOW TABLE STATUS LIKE " + quoteForLike(configuration.table));
LOG_TRACE(log, query.str());
LOG_TRACE(log, fmt::runtime(query.str()));
auto result = query.use();

View File

@ -80,7 +80,7 @@ PostgreSQLDictionarySource::PostgreSQLDictionarySource(const PostgreSQLDictionar
Pipe PostgreSQLDictionarySource::loadAll()
{
LOG_TRACE(log, load_all_query);
LOG_TRACE(log, fmt::runtime(load_all_query));
return loadBase(load_all_query);
}
@ -88,7 +88,7 @@ Pipe PostgreSQLDictionarySource::loadAll()
Pipe PostgreSQLDictionarySource::loadUpdatedAll()
{
auto load_update_query = getUpdateFieldAndDate();
LOG_TRACE(log, load_update_query);
LOG_TRACE(log, fmt::runtime(load_update_query));
return loadBase(load_update_query);
}

View File

@ -121,7 +121,7 @@ std::string XDBCDictionarySource::getUpdateFieldAndDate()
Pipe XDBCDictionarySource::loadAll()
{
LOG_TRACE(log, load_all_query);
LOG_TRACE(log, fmt::runtime(load_all_query));
return loadFromQuery(bridge_url, sample_block, load_all_query);
}
@ -130,7 +130,7 @@ Pipe XDBCDictionarySource::loadUpdatedAll()
{
std::string load_query_update = getUpdateFieldAndDate();
LOG_TRACE(log, load_query_update);
LOG_TRACE(log, fmt::runtime(load_query_update));
return loadFromQuery(bridge_url, sample_block, load_query_update);
}

View File

@ -101,7 +101,7 @@ DiskSelectorPtr DiskSelector::updateFromConfig(
}
writeString(" disappeared from configuration, this change will be applied after restart of ClickHouse", warning);
LOG_WARNING(&Poco::Logger::get("DiskSelector"), warning.str());
LOG_WARNING(&Poco::Logger::get("DiskSelector"), fmt::runtime(warning.str()));
}
return result;

View File

@ -48,7 +48,7 @@ namespace
"First argument for function " + getName() + " must be Constant string", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
static auto * log = &Poco::Logger::get("FunctionLogTrace");
LOG_TRACE(log, message);
LOG_TRACE(log, fmt::runtime(message));
return DataTypeUInt8().createColumnConst(input_rows_count, 0);
}

View File

@ -317,7 +317,7 @@ public:
, load_frequency_ms(Aws::Auth::REFRESH_THRESHOLD)
, logger(&Poco::Logger::get("AWSInstanceProfileCredentialsProvider"))
{
LOG_INFO(logger, "Creating Instance with injected EC2MetadataClient and refresh rate {}.");
LOG_INFO(logger, "Creating Instance with injected EC2MetadataClient and refresh rate.");
}
Aws::Auth::AWSCredentials GetAWSCredentials() override

View File

@ -325,7 +325,7 @@ Chunk DDLQueryStatusSource::generate()
return {};
}
LOG_INFO(log, msg_format, node_path, timeout_seconds, num_unfinished_hosts, num_active_hosts);
LOG_INFO(log, fmt::runtime(msg_format), node_path, timeout_seconds, num_unfinished_hosts, num_active_hosts);
NameSet unfinished_hosts = waiting_hosts;
for (const auto & host_id : finished_hosts)

View File

@ -102,7 +102,7 @@ public:
~DebugASTLog()
{
if constexpr (_enable)
LOG_DEBUG(log, buf.str());
LOG_DEBUG(log, fmt::runtime(buf.str()));
}
WriteBuffer * stream() { return (_enable ? &buf : nullptr); }

View File

@ -1262,7 +1262,7 @@ namespace
{
io.onException();
LOG_ERROR(log, getExceptionMessage(exception, true));
LOG_ERROR(log, fmt::runtime(getExceptionMessage(exception, true)));
if (responder && !responder_finished)
{

View File

@ -915,7 +915,10 @@ void HTTPHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse
}
processQuery(request, params, response, used_output, query_scope);
LOG_DEBUG(log, (request_credentials ? "Authentication in progress..." : "Done processing query"));
if (request_credentials)
LOG_DEBUG(log, "Authentication in progress...");
else
LOG_DEBUG(log, "Done processing query");
}
catch (...)
{

View File

@ -138,9 +138,9 @@ void InterserverIOHTTPHandler::handleRequest(HTTPServerRequest & request, HTTPSe
write_response(message);
if (is_real_error)
LOG_ERROR(log, message);
LOG_ERROR(log, fmt::runtime(message));
else
LOG_INFO(log, message);
LOG_INFO(log, fmt::runtime(message));
}
catch (...)
{
@ -148,7 +148,7 @@ void InterserverIOHTTPHandler::handleRequest(HTTPServerRequest & request, HTTPSe
std::string message = getCurrentExceptionMessage(false);
write_response(message);
LOG_ERROR(log, message);
LOG_ERROR(log, fmt::runtime(message));
}
}

View File

@ -105,7 +105,7 @@ void PostgreSQLHandler::run()
"0A000",
"Command is not supported"),
true);
LOG_ERROR(log, Poco::format("Command is not supported. Command code %d", static_cast<Int32>(message_type)));
LOG_ERROR(log, "Command is not supported. Command code {:d}", static_cast<Int32>(message_type));
message_transport->dropMessage();
}
}
@ -222,7 +222,7 @@ void PostgreSQLHandler::cancelRequest()
std::unique_ptr<PostgreSQLProtocol::Messaging::CancelRequest> msg =
message_transport->receiveWithPayloadSize<PostgreSQLProtocol::Messaging::CancelRequest>(8);
String query = Poco::format("KILL QUERY WHERE query_id = 'postgres:%d:%d'", msg->process_id, msg->secret_key);
String query = fmt::format("KILL QUERY WHERE query_id = 'postgres:{:d}:{:d}'", msg->process_id, msg->secret_key);
ReadBufferFromString replacement(query);
auto query_context = session->makeQueryContext();
@ -287,7 +287,7 @@ void PostgreSQLHandler::processQuery()
{
secret_key = dis(gen);
auto query_context = session->makeQueryContext();
query_context->setCurrentQueryId(Poco::format("postgres:%d:%d", connection_id, secret_key));
query_context->setCurrentQueryId(fmt::format("postgres:{:d}:{:d}", connection_id, secret_key));
CurrentThread::QueryScope query_scope{query_context};
ReadBufferFromString read_buf(spl_query);

View File

@ -466,7 +466,7 @@ void TCPHandler::runImpl()
}
const auto & e = *exception;
LOG_ERROR(log, getExceptionMessage(e, true));
LOG_ERROR(log, fmt::runtime(getExceptionMessage(e, true)));
sendException(*exception, send_exception_with_stack_trace);
}
}

View File

@ -222,7 +222,7 @@ void Service::sendPartFromMemory(
auto projection_sample_block = metadata_snapshot->projections.get(name).sample_block;
auto part_in_memory = asInMemoryPart(projection);
if (!part_in_memory)
throw Exception("Projection " + name + " of part " + part->name + " is not stored in memory", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Projection {} of part {} is not stored in memory", name, part->name);
writeStringBinary(name, out);
projection->checksums.write(out);
@ -232,7 +232,7 @@ void Service::sendPartFromMemory(
auto part_in_memory = asInMemoryPart(part);
if (!part_in_memory)
throw Exception("Part " + part->name + " is not stored in memory", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Part {} is not stored in memory", part->name);
NativeWriter block_out(out, 0, metadata_snapshot->getSampleBlock());
part->checksums.write(out);
@ -300,7 +300,7 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDisk(
throw Exception("Transferring part to replica was cancelled", ErrorCodes::ABORTED);
if (hashing_out.count() != size)
throw Exception("Unexpected size of file " + path, ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART);
throw Exception(ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, "Unexpected size of file {}", path);
writePODBinary(hashing_out.getHash(), out);
@ -323,7 +323,7 @@ void Service::sendPartFromDiskRemoteMeta(const MergeTreeData::DataPartPtr & part
auto disk = part->volume->getDisk();
if (!disk->supportZeroCopyReplication())
throw Exception(fmt::format("disk {} doesn't support zero-copy replication", disk->getName()), ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "disk {} doesn't support zero-copy replication", disk->getName());
part->storage.lockSharedData(*part);
@ -340,9 +340,9 @@ void Service::sendPartFromDiskRemoteMeta(const MergeTreeData::DataPartPtr & part
fs::path metadata(metadata_file);
if (!fs::exists(metadata))
throw Exception("Remote metadata '" + file_name + "' is not exists", ErrorCodes::CORRUPTED_DATA);
throw Exception(ErrorCodes::CORRUPTED_DATA, "Remote metadata '{}' is not exists", file_name);
if (!fs::is_regular_file(metadata))
throw Exception("Remote metadata '" + file_name + "' is not a file", ErrorCodes::CORRUPTED_DATA);
throw Exception(ErrorCodes::CORRUPTED_DATA, "Remote metadata '{}' is not a file", file_name);
UInt64 file_size = fs::file_size(metadata);
writeStringBinary(it.first, out);
@ -355,7 +355,7 @@ void Service::sendPartFromDiskRemoteMeta(const MergeTreeData::DataPartPtr & part
throw Exception("Transferring part to replica was cancelled", ErrorCodes::ABORTED);
if (hashing_out.count() != file_size)
throw Exception("Unexpected size of file " + metadata_file, ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART);
throw Exception(ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, "Unexpected size of file {}", metadata_file);
writePODBinary(hashing_out.getHash(), out);
}
@ -370,7 +370,7 @@ MergeTreeData::DataPartPtr Service::findPart(const String & name)
if (part)
return part;
throw Exception("No part " + name + " in table", ErrorCodes::NO_SUCH_DATA_PART);
throw Exception(ErrorCodes::NO_SUCH_DATA_PART, "No part {} in table", name);
}
MergeTreeData::MutableDataPartPtr Fetcher::fetchPart(
@ -511,9 +511,9 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart(
if (!try_zero_copy)
throw Exception("Got unexpected 'remote_fs_metadata' cookie", ErrorCodes::LOGICAL_ERROR);
if (std::find(capability.begin(), capability.end(), remote_fs_metadata) == capability.end())
throw Exception(fmt::format("Got 'remote_fs_metadata' cookie {}, expect one from {}", remote_fs_metadata, fmt::join(capability, ", ")), ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Got 'remote_fs_metadata' cookie {}, expect one from {}", remote_fs_metadata, fmt::join(capability, ", "));
if (server_protocol_version < REPLICATION_PROTOCOL_VERSION_WITH_PARTS_ZERO_COPY)
throw Exception(fmt::format("Got 'remote_fs_metadata' cookie with old protocol version {}", server_protocol_version), ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Got 'remote_fs_metadata' cookie with old protocol version {}", server_protocol_version);
if (part_type == "InMemory")
throw Exception("Got 'remote_fs_metadata' cookie for in-memory part", ErrorCodes::INCORRECT_PART_TYPE);
@ -525,7 +525,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart(
{
if (e.code() != ErrorCodes::S3_ERROR && e.code() != ErrorCodes::ZERO_COPY_REPLICATION_ERROR)
throw;
LOG_WARNING(log, e.message() + " Will retry fetching part without zero-copy.");
LOG_WARNING(log, fmt::runtime(e.message() + " Will retry fetching part without zero-copy."));
/// Try again but without zero-copy
return fetchPart(metadata_snapshot, context, part_name, replica_path, host, port, timeouts,
user, password, interserver_scheme, throttler, to_detached, tmp_prefix_, nullptr, false, disk);
@ -649,9 +649,10 @@ void Fetcher::downloadBaseOrProjectionPartToDisk(
/// Otherwise malicious ClickHouse replica may force us to write to arbitrary path.
String absolute_file_path = fs::weakly_canonical(fs::path(part_download_path) / file_name);
if (!startsWith(absolute_file_path, fs::weakly_canonical(part_download_path).string()))
throw Exception("File path (" + absolute_file_path + ") doesn't appear to be inside part path (" + part_download_path + ")."
" This may happen if we are trying to download part from malicious replica or logical error.",
ErrorCodes::INSECURE_PATH);
throw Exception(ErrorCodes::INSECURE_PATH,
"File path ({}) doesn't appear to be inside part path ({}). "
"This may happen if we are trying to download part from malicious replica or logical error.",
absolute_file_path, part_download_path);
auto file_out = disk->writeFile(fs::path(part_download_path) / file_name);
HashingWriteBuffer hashing_out(*file_out);
@ -670,8 +671,10 @@ void Fetcher::downloadBaseOrProjectionPartToDisk(
readPODBinary(expected_hash, in);
if (expected_hash != hashing_out.getHash())
throw Exception("Checksum mismatch for file " + fullPath(disk, (fs::path(part_download_path) / file_name).string()) + " transferred from " + replica_path,
ErrorCodes::CHECKSUM_DOESNT_MATCH);
throw Exception(ErrorCodes::CHECKSUM_DOESNT_MATCH,
"Checksum mismatch for file {} transferred from {}",
fullPath(disk, (fs::path(part_download_path) / file_name).string()),
replica_path);
if (file_name != "checksums.txt" &&
file_name != "columns.txt" &&
@ -762,7 +765,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDiskRemoteMeta(
if (!disk->supportZeroCopyReplication() || !disk->checkUniqueId(part_id))
{
throw Exception(fmt::format("Part {} unique id {} doesn't exist on {}.", part_name, part_id, disk->getName()), ErrorCodes::ZERO_COPY_REPLICATION_ERROR);
throw Exception(ErrorCodes::ZERO_COPY_REPLICATION_ERROR, "Part {} unique id {} doesn't exist on {}.", part_name, part_id, disk->getName());
}
LOG_DEBUG(log, "Downloading Part {} unique id {} metadata onto disk {}.",
part_name, part_id, disk->getName());
@ -774,7 +777,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDiskRemoteMeta(
String part_download_path = fs::path(data.getRelativeDataPath()) / part_relative_path / "";
if (disk->exists(part_download_path))
throw Exception("Directory " + fullPath(disk, part_download_path) + " already exists.", ErrorCodes::DIRECTORY_ALREADY_EXISTS);
throw Exception(ErrorCodes::DIRECTORY_ALREADY_EXISTS, "Directory {} already exists.", fullPath(disk, part_download_path));
CurrentMetrics::Increment metric_increment{CurrentMetrics::ReplicatedFetch};
@ -817,8 +820,9 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDiskRemoteMeta(
if (expected_hash != hashing_out.getHash())
{
throw Exception("Checksum mismatch for file " + metadata_file + " transferred from " + replica_path,
ErrorCodes::CHECKSUM_DOESNT_MATCH);
throw Exception(ErrorCodes::CHECKSUM_DOESNT_MATCH,
"Checksum mismatch for file {} transferred from {}",
metadata_file, replica_path);
}
}
}

View File

@ -1428,7 +1428,7 @@ void IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, const String & di
if (disk->exists(fs::path(path_to_clone) / relative_path))
{
LOG_WARNING(storage.log, "Path " + fullPath(disk, path_to_clone + relative_path) + " already exists. Will remove it and clone again.");
LOG_WARNING(storage.log, "Path {} already exists. Will remove it and clone again.", fullPath(disk, path_to_clone + relative_path));
disk->removeRecursive(fs::path(path_to_clone) / relative_path / "");
}
disk->createDirectories(path_to_clone);

View File

@ -84,7 +84,7 @@ std::pair<bool, ReplicatedMergeMutateTaskBase::PartLogWriter> MergeFromLogEntryT
/// 3. We have two intersecting parts, both cover source_part_name. It's logical error.
/// TODO Why 1 and 2 can happen? Do we need more assertions here or somewhere else?
constexpr const char * message = "Part {} is covered by {} but should be merged into {}. This shouldn't happen often.";
LOG_WARNING(log, message, source_part_name, source_part_or_covering->name, entry.new_part_name);
LOG_WARNING(log, fmt::runtime(message), source_part_name, source_part_or_covering->name, entry.new_part_name);
if (!source_part_or_covering->info.contains(MergeTreePartInfo::fromPartName(entry.new_part_name, storage.format_version)))
throw Exception(ErrorCodes::LOGICAL_ERROR, message, source_part_name, source_part_or_covering->name, entry.new_part_name);
return {false, {}};

View File

@ -1677,7 +1677,7 @@ size_t MergeTreeData::clearOldWriteAheadLogs()
auto min_max_block_number = MergeTreeWriteAheadLog::tryParseMinMaxBlockNumber(it->name());
if (min_max_block_number && is_range_on_disk(min_max_block_number->first, min_max_block_number->second))
{
LOG_DEBUG(log, "Removing from filesystem the outdated WAL file " + it->name());
LOG_DEBUG(log, "Removing from filesystem the outdated WAL file {}", it->name());
disk_ptr->removeFile(relative_data_path + it->name());
++cleared_count;
}
@ -5918,7 +5918,7 @@ ReservationPtr MergeTreeData::balancedReservation(
writeCString("\nbalancer: \n", log_str);
for (const auto & [disk_name, per_disk_parts] : disk_parts_for_logging)
writeString(fmt::format(" {}: [{}]\n", disk_name, fmt::join(per_disk_parts, ", ")), log_str);
LOG_DEBUG(log, log_str.str());
LOG_DEBUG(log, fmt::runtime(log_str.str()));
if (ttl_infos)
reserved_space = tryReserveSpacePreferringTTLRules(

View File

@ -211,7 +211,7 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt
String relative_path = part->relative_path;
if (disk->exists(path_to_clone + relative_path))
{
LOG_WARNING(log, "Path " + fullPath(disk, path_to_clone + relative_path) + " already exists. Will remove it and clone again.");
LOG_WARNING(log, "Path {} already exists. Will remove it and clone again.", fullPath(disk, path_to_clone + relative_path));
disk->removeRecursive(fs::path(path_to_clone) / relative_path / "");
}
disk->createDirectories(path_to_clone);

View File

@ -28,9 +28,11 @@ std::pair<bool, ReplicatedMergeMutateTaskBase::PartLogWriter> MutateFromLogEntry
if (source_part->name != source_part_name)
{
LOG_WARNING(log, "Part " + source_part_name + " is covered by " + source_part->name
+ " but should be mutated to " + entry.new_part_name + ". "
+ "Possibly the mutation of this part is not needed and will be skipped. This shouldn't happen often.");
LOG_WARNING(log,
"Part {} is covered by {} but should be mutated to {}. "
"Possibly the mutation of this part is not needed and will be skipped. "
"This shouldn't happen often.",
source_part_name, source_part->name, entry.new_part_name);
return {false, {}};
}

View File

@ -45,17 +45,17 @@ bool ReplicatedMergeMutateTaskBase::executeStep()
if (e.code() == ErrorCodes::NO_REPLICA_HAS_PART)
{
/// If no one has the right part, probably not all replicas work; We will not write to log with Error level.
LOG_INFO(log, e.displayText());
LOG_INFO(log, fmt::runtime(e.displayText()));
}
else if (e.code() == ErrorCodes::ABORTED)
{
/// Interrupted merge or downloading a part is not an error.
LOG_INFO(log, e.message());
LOG_INFO(log, fmt::runtime(e.message()));
}
else if (e.code() == ErrorCodes::PART_IS_TEMPORARILY_LOCKED)
{
/// Part cannot be added temporarily
LOG_INFO(log, e.displayText());
LOG_INFO(log, fmt::runtime(e.displayText()));
storage.cleanup_thread.wakeup();
}
else

View File

@ -359,7 +359,7 @@ CheckResult ReplicatedMergeTreePartCheckThread::checkPart(const String & part_na
tryLogCurrentException(log, __PRETTY_FUNCTION__);
String message = "Part " + part_name + " looks broken. Removing it and will try to fetch.";
LOG_ERROR(log, message);
LOG_ERROR(log, fmt::runtime(message));
/// Delete part locally.
storage.forgetPartAndMoveToDetached(part, "broken");
@ -378,7 +378,7 @@ CheckResult ReplicatedMergeTreePartCheckThread::checkPart(const String & part_na
ProfileEvents::increment(ProfileEvents::ReplicatedPartChecksFailed);
String message = "Unexpected part " + part_name + " in filesystem. Removing.";
LOG_ERROR(log, message);
LOG_ERROR(log, fmt::runtime(message));
storage.forgetPartAndMoveToDetached(part, "unexpected");
return {part_name, false, message};
}

View File

@ -1060,12 +1060,12 @@ bool ReplicatedMergeTreeQueue::isNotCoveredByFuturePartsImpl(const LogEntry & en
if (entry_for_same_part_it != future_parts.end())
{
const LogEntry & another_entry = *entry_for_same_part_it->second;
const char * format_str = "Not executing log entry {} of type {} for part {} "
"because another log entry {} of type {} for the same part ({}) is being processed. This shouldn't happen often.";
LOG_INFO(log, format_str, entry.znode_name, entry.type, entry.new_part_name,
another_entry.znode_name, another_entry.type, another_entry.new_part_name);
out_reason = fmt::format(format_str, entry.znode_name, entry.type, entry.new_part_name,
another_entry.znode_name, another_entry.type, another_entry.new_part_name);
out_reason = fmt::format(
"Not executing log entry {} of type {} for part {} "
"because another log entry {} of type {} for the same part ({}) is being processed. This shouldn't happen often.",
entry.znode_name, entry.type, entry.new_part_name,
another_entry.znode_name, another_entry.type, another_entry.new_part_name);
LOG_INFO(log, fmt::runtime(out_reason));
return false;
/** When the corresponding action is completed, then `isNotCoveredByFuturePart` next time, will succeed,
@ -1086,10 +1086,11 @@ bool ReplicatedMergeTreeQueue::isNotCoveredByFuturePartsImpl(const LogEntry & en
if (future_part.contains(result_part))
{
const char * format_str = "Not executing log entry {} for part {} "
"because it is covered by part {} that is currently executing.";
LOG_TRACE(log, format_str, entry.znode_name, new_part_name, future_part_elem.first);
out_reason = fmt::format(format_str, entry.znode_name, new_part_name, future_part_elem.first);
out_reason = fmt::format(
"Not executing log entry {} for part {} "
"because it is covered by part {} that is currently executing.",
entry.znode_name, new_part_name, future_part_elem.first);
LOG_TRACE(log, fmt::runtime(out_reason));
return false;
}
}
@ -1171,11 +1172,11 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
{
if (future_parts.count(name))
{
const char * format_str = "Not executing log entry {} of type {} for part {} "
"because part {} is not ready yet (log entry for that part is being processed).";
LOG_TRACE(log, format_str, entry.znode_name, entry.typeToString(), entry.new_part_name, name);
/// Copy-paste of above because we need structured logging (instead of already formatted message).
out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.typeToString(), entry.new_part_name, name);
out_postpone_reason = fmt::format(
"Not executing log entry {} of type {} for part {} "
"because part {} is not ready yet (log entry for that part is being processed).",
entry.znode_name, entry.typeToString(), entry.new_part_name, name);
LOG_TRACE(log, fmt::runtime(out_postpone_reason));
return false;
}
@ -1191,9 +1192,10 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
if (merger_mutator.merges_blocker.isCancelled())
{
const char * format_str = "Not executing log entry {} of type {} for part {} because merges and mutations are cancelled now.";
LOG_DEBUG(log, format_str, entry.znode_name, entry.typeToString(), entry.new_part_name);
out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.typeToString(), entry.new_part_name);
out_postpone_reason = fmt::format(
"Not executing log entry {} of type {} for part {} because merges and mutations are cancelled now.",
entry.znode_name, entry.typeToString(), entry.new_part_name);
LOG_DEBUG(log, fmt::runtime(out_postpone_reason));
return false;
}
@ -1228,24 +1230,20 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
{
if (merger_mutator.ttl_merges_blocker.isCancelled())
{
const char * format_str = "Not executing log entry {} for part {} because merges with TTL are cancelled now.";
LOG_DEBUG(log, format_str,
entry.znode_name, entry.new_part_name);
out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.new_part_name);
out_postpone_reason = fmt::format(
"Not executing log entry {} for part {} because merges with TTL are cancelled now.",
entry.znode_name, entry.new_part_name);
LOG_DEBUG(log, fmt::runtime(out_postpone_reason));
return false;
}
size_t total_merges_with_ttl = data.getTotalMergesWithTTLInMergeList();
if (total_merges_with_ttl >= data_settings->max_number_of_merges_with_ttl_in_pool)
{
const char * format_str = "Not executing log entry {} for part {}"
" because {} merges with TTL already executing, maximum {}.";
LOG_DEBUG(log, format_str,
entry.znode_name, entry.new_part_name, total_merges_with_ttl,
data_settings->max_number_of_merges_with_ttl_in_pool);
out_postpone_reason = fmt::format(format_str,
out_postpone_reason = fmt::format(
"Not executing log entry {} for part {} because {} merges with TTL already executing, maximum {}.",
entry.znode_name, entry.new_part_name, total_merges_with_ttl,
data_settings->max_number_of_merges_with_ttl_in_pool);
LOG_DEBUG(log, fmt::runtime(out_postpone_reason));
return false;
}
}
@ -1258,7 +1256,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
entry.znode_name, entry.typeToString(), entry.new_part_name,
ReadableSize(sum_parts_size_in_bytes), ReadableSize(max_source_parts_size));
LOG_DEBUG(log, out_postpone_reason);
LOG_DEBUG(log, fmt::runtime(out_postpone_reason));
return false;
}
@ -1271,9 +1269,10 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
if (!alter_sequence.canExecuteMetaAlter(entry.alter_version, state_lock))
{
int head_alter = alter_sequence.getHeadAlterVersion(state_lock);
const char * format_str = "Cannot execute alter metadata {} with version {} because another alter {} must be executed before";
LOG_TRACE(log, format_str, entry.znode_name, entry.alter_version, head_alter);
out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.alter_version, head_alter);
out_postpone_reason = fmt::format(
"Cannot execute alter metadata {} with version {} because another alter {} must be executed before",
entry.znode_name, entry.alter_version, head_alter);
LOG_TRACE(log, fmt::runtime(out_postpone_reason));
return false;
}
}
@ -1286,15 +1285,17 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
int head_alter = alter_sequence.getHeadAlterVersion(state_lock);
if (head_alter == entry.alter_version)
{
const char * format_str = "Cannot execute alter data {} with version {} because metadata still not altered";
LOG_TRACE(log, format_str, entry.znode_name, entry.alter_version);
out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.alter_version);
out_postpone_reason = fmt::format(
"Cannot execute alter data {} with version {} because metadata still not altered",
entry.znode_name, entry.alter_version);
LOG_TRACE(log, fmt::runtime(out_postpone_reason));
}
else
{
const char * format_str = "Cannot execute alter data {} with version {} because another alter {} must be executed before";
LOG_TRACE(log, format_str, entry.znode_name, entry.alter_version, head_alter);
out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.alter_version, head_alter);
out_postpone_reason = fmt::format(
"Cannot execute alter data {} with version {} because another alter {} must be executed before",
entry.znode_name, entry.alter_version, head_alter);
LOG_TRACE(log, fmt::runtime(out_postpone_reason));
}
return false;
@ -1309,11 +1310,11 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
/// See also removePartProducingOpsInRange(...) and ReplicatedMergeTreeQueue::CurrentlyExecuting.
if (currently_executing_drop_or_replace_range)
{
const char * format_str = "Not executing log entry {} of type {} for part {} "
"because another DROP_RANGE or REPLACE_RANGE entry are currently executing.";
LOG_TRACE(log, format_str, entry.znode_name, entry.typeToString(), entry.new_part_name);
out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.typeToString(), entry.new_part_name);
out_postpone_reason = fmt::format(
"Not executing log entry {} of type {} for part {} "
"because another DROP_RANGE or REPLACE_RANGE entry are currently executing.",
entry.znode_name, entry.typeToString(), entry.new_part_name);
LOG_TRACE(log, fmt::runtime(out_postpone_reason));
return false;
}
@ -1338,10 +1339,11 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
auto new_part_info = MergeTreePartInfo::fromPartName(new_part_name, format_version);
if (!new_part_info.isDisjoint(drop_part_info))
{
const char * format_str = "Not executing log entry {} of type {} for part {} "
"because it probably depends on {} (REPLACE_RANGE).";
LOG_TRACE(log, format_str, entry.znode_name, entry.typeToString(), entry.new_part_name, replace_entry->znode_name);
out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.typeToString(), entry.new_part_name, replace_entry->znode_name);
out_postpone_reason = fmt::format(
"Not executing log entry {} of type {} for part {} "
"because it probably depends on {} (REPLACE_RANGE).",
entry.znode_name, entry.typeToString(), entry.new_part_name, replace_entry->znode_name);
LOG_TRACE(log, fmt::runtime(out_postpone_reason));
return false;
}
}
@ -1616,7 +1618,11 @@ MutationCommands ReplicatedMergeTreeQueue::getMutationCommands(
auto end = in_partition->second.lower_bound(desired_mutation_version);
if (end == in_partition->second.end() || end->first != desired_mutation_version)
LOG_WARNING(log, "Mutation with version {} not found in partition ID {} (trying to mutate part {}", desired_mutation_version, part->info.partition_id, part->name + ")");
LOG_WARNING(log,
"Mutation with version {} not found in partition ID {} (trying to mutate part {})",
desired_mutation_version,
part->info.partition_id,
part->name);
else
++end;

View File

@ -450,7 +450,8 @@ void MaterializedPostgreSQLConsumer::processReplicationMessage(const char * repl
if (replica_identity != 'd' && replica_identity != 'i')
{
LOG_WARNING(log,
"Table has replica identity {} - not supported. A table must have a primary key or a replica identity index");
"Table has replica identity {} - not supported. A table must have a primary key or a replica identity index",
replica_identity);
markTableAsSkipped(relation_id, table_name);
return;
}

View File

@ -1230,7 +1230,7 @@ bool StorageMergeTree::optimize(
constexpr const char * message = "Cannot OPTIMIZE table: {}";
if (disable_reason.empty())
disable_reason = "unknown reason";
LOG_INFO(log, message, disable_reason);
LOG_INFO(log, fmt::runtime(message), disable_reason);
if (local_context->getSettingsRef().optimize_throw_if_noop)
throw Exception(ErrorCodes::CANNOT_ASSIGN_OPTIMIZE, message, disable_reason);
@ -1256,7 +1256,7 @@ bool StorageMergeTree::optimize(
constexpr const char * message = "Cannot OPTIMIZE table: {}";
if (disable_reason.empty())
disable_reason = "unknown reason";
LOG_INFO(log, message, disable_reason);
LOG_INFO(log, fmt::runtime(message), disable_reason);
if (local_context->getSettingsRef().optimize_throw_if_noop)
throw Exception(ErrorCodes::CANNOT_ASSIGN_OPTIMIZE, message, disable_reason);

View File

@ -1211,7 +1211,7 @@ void StorageReplicatedMergeTree::checkParts(bool skip_sanity_checks)
if (unexpected_parts_nonnew_rows > 0)
{
LOG_WARNING(log, sanity_report_fmt, getStorageID().getNameForLogs(),
LOG_WARNING(log, fmt::runtime(sanity_report_fmt), getStorageID().getNameForLogs(),
formatReadableQuantity(unexpected_parts_rows), formatReadableQuantity(total_rows_on_filesystem),
unexpected_parts.size(), unexpected_parts_rows, unexpected_parts_nonnew, unexpected_parts_nonnew_rows,
parts_to_fetch.size(), parts_to_fetch_blocks);
@ -2203,7 +2203,7 @@ void StorageReplicatedMergeTree::executeClonePartFromShard(const LogEntry & entr
if (replica.empty())
throw Exception(ErrorCodes::NO_REPLICA_HAS_PART, "Not found active replica on shard {} to clone part {}", entry.source_shard, entry.new_part_name);
LOG_INFO(log, "Will clone part from shard " + entry.source_shard + " and replica " + replica);
LOG_INFO(log, "Will clone part from shard {} and replica {}", entry.source_shard, replica);
MutableDataPartPtr part;
@ -2861,17 +2861,17 @@ bool StorageReplicatedMergeTree::processQueueEntry(ReplicatedMergeTreeQueue::Sel
if (e.code() == ErrorCodes::NO_REPLICA_HAS_PART)
{
/// If no one has the right part, probably not all replicas work; We will not write to log with Error level.
LOG_INFO(log, e.displayText());
LOG_INFO(log, fmt::runtime(e.displayText()));
}
else if (e.code() == ErrorCodes::ABORTED)
{
/// Interrupted merge or downloading a part is not an error.
LOG_INFO(log, e.message());
LOG_INFO(log, fmt::runtime(e.message()));
}
else if (e.code() == ErrorCodes::PART_IS_TEMPORARILY_LOCKED)
{
/// Part cannot be added temporarily
LOG_INFO(log, e.displayText());
LOG_INFO(log, fmt::runtime(e.displayText()));
cleanup_thread.wakeup();
}
else
@ -4383,7 +4383,7 @@ bool StorageReplicatedMergeTree::optimize(
if (!partition_id.empty())
disable_reason += fmt::format(" (in partition {})", partition_id);
String message = fmt::format(message_fmt, disable_reason);
LOG_INFO(log, message);
LOG_INFO(log, fmt::runtime(message));
return handle_noop(message);
}
@ -4397,7 +4397,7 @@ bool StorageReplicatedMergeTree::optimize(
if (create_result == CreateMergeEntryResult::MissingPart)
{
String message = "Can't create merge queue node in ZooKeeper, because some parts are missing";
LOG_TRACE(log, message);
LOG_TRACE(log, fmt::runtime(message));
return handle_noop(message);
}
@ -4410,7 +4410,7 @@ bool StorageReplicatedMergeTree::optimize(
assert(try_no == max_retries);
String message = fmt::format("Can't create merge queue node in ZooKeeper, because log was updated in every of {} tries", try_no);
LOG_TRACE(log, message);
LOG_TRACE(log, fmt::runtime(message));
return handle_noop(message);
};
@ -5569,7 +5569,7 @@ void StorageReplicatedMergeTree::fetchPartition(
&& e.code() != ErrorCodes::CANNOT_READ_ALL_DATA)
throw;
LOG_INFO(log, e.displayText());
LOG_INFO(log, fmt::runtime(e.displayText()));
}
return;
}
@ -5706,7 +5706,7 @@ void StorageReplicatedMergeTree::fetchPartition(
&& e.code() != ErrorCodes::CANNOT_READ_ALL_DATA)
throw;
LOG_INFO(log, e.displayText());
LOG_INFO(log, fmt::runtime(e.displayText()));
}
if (!fetched)
@ -6685,7 +6685,7 @@ void StorageReplicatedMergeTree::movePartitionToShard(
zkutil::KeeperMultiException::check(rc, ops, responses);
String task_znode_path = dynamic_cast<const Coordination::CreateResponse &>(*responses.back()).path_created;
LOG_DEBUG(log, "Created task for part movement between shards at " + task_znode_path);
LOG_DEBUG(log, "Created task for part movement between shards at {}", task_znode_path);
/// TODO(nv): Nice to have support for `replication_alter_partitions_sync`.
/// For now use the system.part_moves_between_shards table for status.
@ -6824,7 +6824,7 @@ bool StorageReplicatedMergeTree::waitForShrinkingQueueSize(size_t queue_size, UI
bool StorageReplicatedMergeTree::dropPartImpl(
zkutil::ZooKeeperPtr & zookeeper, String part_name, LogEntry & entry, bool detach, bool throw_if_noop)
{
LOG_TRACE(log, "Will try to insert a log entry to DROP_RANGE for part: " + part_name);
LOG_TRACE(log, "Will try to insert a log entry to DROP_RANGE for part {}", part_name);
auto part_info = MergeTreePartInfo::fromPartName(part_name, format_version);