find {base,src,programs} -name '*.h' -or -name '*.cpp' | xargs grep -l -P 'LOG_\w+\([^,]+, "[^"]+" << [^<]+\);' | xargs sed -i -r -e 's/(LOG_\w+)\(([^,]+), "([^"]+)" << ([^<]+)\);/\1_FORMATTED(\2, "\3{}", \4);/'

This commit is contained in:
Alexey Milovidov 2020-05-23 19:47:56 +03:00
parent 8d2e80a5e2
commit ee4ffbc332
51 changed files with 124 additions and 124 deletions

View File

@ -180,7 +180,7 @@ public:
// levels and more info, but for completeness we log all signals
// here at trace level.
// Don't use strsignal here, because it's not thread-safe.
LOG_TRACE(log, "Received signal " << sig);
LOG_TRACE_FORMATTED(log, "Received signal {}", sig);
if (sig == Signals::StopThread)
{

View File

@ -26,7 +26,7 @@ void ClusterCopier::init()
if (response.error != Coordination::ZOK)
return;
UInt64 version = ++task_description_version;
LOG_DEBUG(log, "Task description should be updated, local version " << version);
LOG_DEBUG_FORMATTED(log, "Task description should be updated, local version {}", version);
};
task_description_path = task_zookeeper_path + "/description";
@ -85,7 +85,7 @@ void ClusterCopier::discoverShardPartitions(const ConnectionTimeouts & timeouts,
{
TaskTable & task_table = task_shard->task_table;
LOG_INFO(log, "Discover partitions of shard " << task_shard->getDescription());
LOG_INFO_FORMATTED(log, "Discover partitions of shard {}", task_shard->getDescription());
auto get_partitions = [&] () { return getShardPartitions(timeouts, *task_shard); };
auto existing_partitions_names = retry(get_partitions, 60);
@ -221,7 +221,7 @@ void ClusterCopier::reloadTaskDescription()
if (code)
throw Exception("Can't get description node " + task_description_path, ErrorCodes::BAD_ARGUMENTS);
LOG_DEBUG(log, "Loading description, zxid=" << task_description_current_stat.czxid);
LOG_DEBUG_FORMATTED(log, "Loading description, zxid={}", task_description_current_stat.czxid);
auto config = getConfigurationFromXMLString(task_config_str);
/// Setup settings
@ -548,7 +548,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
{
if (e.code == Coordination::ZNODEEXISTS)
{
LOG_DEBUG(log, "Someone is already moving pieces " << current_partition_attach_is_active);
LOG_DEBUG_FORMATTED(log, "Someone is already moving pieces {}", current_partition_attach_is_active);
return TaskStatus::Active;
}
@ -614,7 +614,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
" ATTACH PARTITION " + partition_name +
" FROM " + getQuotedTable(helping_table);
LOG_DEBUG(log, "Executing ALTER query: " << query_alter_ast_string);
LOG_DEBUG_FORMATTED(log, "Executing ALTER query: {}", query_alter_ast_string);
try
{
@ -626,7 +626,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
PoolMode::GET_MANY,
ClusterExecutionMode::ON_EACH_NODE);
LOG_INFO(log, "Number of nodes that executed ALTER query successfully : " << toString(num_nodes));
LOG_INFO_FORMATTED(log, "Number of nodes that executed ALTER query successfully : {}", toString(num_nodes));
}
catch (...)
{
@ -647,7 +647,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
query_deduplicate_ast_string += " OPTIMIZE TABLE " + getQuotedTable(original_table) +
" PARTITION " + partition_name + " DEDUPLICATE;";
LOG_DEBUG(log, "Executing OPTIMIZE DEDUPLICATE query: " << query_alter_ast_string);
LOG_DEBUG_FORMATTED(log, "Executing OPTIMIZE DEDUPLICATE query: {}", query_alter_ast_string);
UInt64 num_nodes = executeQueryOnCluster(
task_table.cluster_push,
@ -832,7 +832,7 @@ bool ClusterCopier::tryDropPartitionPiece(
/// It is important, DROP PARTITION must be done synchronously
settings_push.replication_alter_partitions_sync = 2;
LOG_DEBUG(log, "Execute distributed DROP PARTITION: " << query);
LOG_DEBUG_FORMATTED(log, "Execute distributed DROP PARTITION: {}", query);
/// We have to drop partition_piece on each replica
size_t num_shards = executeQueryOnCluster(
cluster_push, query,
@ -1210,7 +1210,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
/// Load balancing
auto worker_node_holder = createTaskWorkerNodeAndWaitIfNeed(zookeeper, current_task_piece_status_path, is_unprioritized_task);
LOG_DEBUG(log, "Processing " << current_task_piece_status_path);
LOG_DEBUG_FORMATTED(log, "Processing {}", current_task_piece_status_path);
const String piece_status_path = partition_piece.getPartitionPieceShardsPath();
@ -1253,7 +1253,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
{
if (e.code == Coordination::ZNODEEXISTS)
{
LOG_DEBUG(log, "Someone is already processing " << current_task_piece_is_active_path);
LOG_DEBUG_FORMATTED(log, "Someone is already processing {}", current_task_piece_is_active_path);
return TaskStatus::Active;
}
@ -1387,7 +1387,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
create_query_push_ast->as<ASTCreateQuery &>().if_not_exists = true;
String query = queryToString(create_query_push_ast);
LOG_DEBUG(log, "Create destination tables. Query: " << query);
LOG_DEBUG_FORMATTED(log, "Create destination tables. Query: {}", query);
UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query,
create_query_push_ast, &task_cluster->settings_push,
PoolMode::GET_MANY);
@ -1419,7 +1419,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
const auto & settings = context.getSettingsRef();
query_insert_ast = parseQuery(p_query, query, settings.max_query_size, settings.max_parser_depth);
LOG_DEBUG(log, "Executing INSERT query: " << query);
LOG_DEBUG_FORMATTED(log, "Executing INSERT query: {}", query);
}
try
@ -1513,7 +1513,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
create_query_push_ast->as<ASTCreateQuery &>().if_not_exists = true;
String query = queryToString(create_query_push_ast);
LOG_DEBUG(log, "Create destination tables. Query: " << query);
LOG_DEBUG_FORMATTED(log, "Create destination tables. Query: {}", query);
UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query,
create_query_push_ast, &task_cluster->settings_push,
PoolMode::GET_MANY);
@ -1582,7 +1582,7 @@ void ClusterCopier::dropHelpingTables(const TaskTable & task_table)
const ClusterPtr & cluster_push = task_table.cluster_push;
Settings settings_push = task_cluster->settings_push;
LOG_DEBUG(log, "Execute distributed DROP TABLE: " << query);
LOG_DEBUG_FORMATTED(log, "Execute distributed DROP TABLE: {}", query);
/// We have to drop partition_piece on each replica
UInt64 num_nodes = executeQueryOnCluster(
cluster_push, query,
@ -1609,7 +1609,7 @@ void ClusterCopier::dropParticularPartitionPieceFromAllHelpingTables(const TaskT
const ClusterPtr & cluster_push = task_table.cluster_push;
Settings settings_push = task_cluster->settings_push;
LOG_DEBUG(log, "Execute distributed DROP PARTITION: " << query);
LOG_DEBUG_FORMATTED(log, "Execute distributed DROP PARTITION: {}", query);
/// We have to drop partition_piece on each replica
UInt64 num_nodes = executeQueryOnCluster(
cluster_push, query,
@ -1620,7 +1620,7 @@ void ClusterCopier::dropParticularPartitionPieceFromAllHelpingTables(const TaskT
LOG_DEBUG(log, "DROP PARTITION query was successfully executed on " << toString(num_nodes) << " nodes.");
}
LOG_DEBUG(log, "All helping tables dropped partition " << partition_name);
LOG_DEBUG_FORMATTED(log, "All helping tables dropped partition {}", partition_name);
}
String ClusterCopier::getRemoteCreateTable(const DatabaseAndTableName & table, Connection & connection, const Settings * settings)
@ -1724,7 +1724,7 @@ std::set<String> ClusterCopier::getShardPartitions(const ConnectionTimeouts & ti
const auto & settings = context.getSettingsRef();
ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth);
LOG_DEBUG(log, "Computing destination partition set, executing query: " << query);
LOG_DEBUG_FORMATTED(log, "Computing destination partition set, executing query: {}", query);
Context local_context = context;
local_context.setSettings(task_cluster->settings_pull);

View File

@ -183,11 +183,11 @@ public:
switch (rsp.type)
{
case Coordination::CREATED:
LOG_DEBUG(logger, "CleanStateClock change: CREATED, at " << rsp.path);
LOG_DEBUG_FORMATTED(logger, "CleanStateClock change: CREATED, at {}", rsp.path);
stale->store(true);
break;
case Coordination::CHANGED:
LOG_DEBUG(logger, "CleanStateClock change: CHANGED, at" << rsp.path);
LOG_DEBUG_FORMATTED(logger, "CleanStateClock change: CHANGED, at{}", rsp.path);
stale->store(true);
}
}

View File

@ -211,7 +211,7 @@ try
/// Lock path directory before read
status.emplace(context->getPath() + "status");
LOG_DEBUG(log, "Loading metadata from " << context->getPath());
LOG_DEBUG_FORMATTED(log, "Loading metadata from {}", context->getPath());
loadMetadataSystem(*context);
attachSystemTables();
loadMetadata(*context);

View File

@ -171,7 +171,7 @@ void ODBCHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Ne
else
{
std::string query = params.get("query");
LOG_TRACE(log, "Query: " << query);
LOG_TRACE_FORMATTED(log, "Query: {}", query);
BlockOutputStreamPtr writer = FormatFactory::instance().getOutput(format, out, *sample_block, context);
auto pool = getPool(connection_string);

View File

@ -241,7 +241,7 @@ void HTTPHandler::processQuery(
CurrentThread::QueryScope query_scope(context);
LOG_TRACE(log, "Request URI: " << request.getURI());
LOG_TRACE_FORMATTED(log, "Request URI: {}", request.getURI());
std::istream & istr = request.stream();

View File

@ -53,7 +53,7 @@ void InterserverIOHTTPHandler::processQuery(Poco::Net::HTTPServerRequest & reque
{
HTMLForm params(request);
LOG_TRACE(log, "Request URI: " << request.getURI());
LOG_TRACE_FORMATTED(log, "Request URI: {}", request.getURI());
String endpoint_name = params.get("endpoint");
bool compress = params.get("compress") == "true";

View File

@ -197,7 +197,7 @@ void MySQLHandler::finishHandshake(MySQLProtocol::HandshakeResponse & packet)
read_bytes(3); /// We can find out whether it is SSLRequest of HandshakeResponse by first 3 bytes.
size_t payload_size = unalignedLoad<uint32_t>(buf) & 0xFFFFFFu;
LOG_TRACE(log, "payload size: " << payload_size);
LOG_TRACE_FORMATTED(log, "payload size: {}", payload_size);
if (payload_size == SSL_REQUEST_PAYLOAD_SIZE)
{
@ -245,7 +245,7 @@ void MySQLHandler::comInitDB(ReadBuffer & payload)
{
String database;
readStringUntilEOF(database, payload);
LOG_DEBUG(log, "Setting current database to " << database);
LOG_DEBUG_FORMATTED(log, "Setting current database to {}", database);
connection_context.setCurrentDatabase(database);
packet_sender->sendPacket(OK_Packet(0, client_capability_flags, 0, 0, 1), true);
}

View File

@ -32,7 +32,7 @@ MySQLHandlerFactory::MySQLHandlerFactory(IServer & server_)
}
catch (...)
{
LOG_TRACE(log, "Failed to create SSL context. SSL will be disabled. Error: " << getCurrentExceptionMessage(false));
LOG_TRACE_FORMATTED(log, "Failed to create SSL context. SSL will be disabled. Error: {}", getCurrentExceptionMessage(false));
ssl_enabled = false;
}
@ -43,7 +43,7 @@ MySQLHandlerFactory::MySQLHandlerFactory(IServer & server_)
}
catch (...)
{
LOG_TRACE(log, "Failed to read RSA key pair from server certificate. Error: " << getCurrentExceptionMessage(false));
LOG_TRACE_FORMATTED(log, "Failed to read RSA key pair from server certificate. Error: {}", getCurrentExceptionMessage(false));
generateRSAKeys();
}
#endif

View File

@ -101,11 +101,11 @@ void setupTmpPath(Logger * log, const std::string & path)
{
if (it->isFile() && startsWith(it.name(), "tmp"))
{
LOG_DEBUG(log, "Removing old temporary file " << it->path());
LOG_DEBUG_FORMATTED(log, "Removing old temporary file {}", it->path());
it->remove();
}
else
LOG_DEBUG(log, "Skipped file in temporary path " << it->path());
LOG_DEBUG_FORMATTED(log, "Skipped file in temporary path {}", it->path());
}
}
@ -349,7 +349,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
if (rlim.rlim_cur == rlim.rlim_max)
{
LOG_DEBUG(log, "rlimit on number of file descriptors is " << rlim.rlim_cur);
LOG_DEBUG_FORMATTED(log, "rlimit on number of file descriptors is {}", rlim.rlim_cur);
}
else
{
@ -579,7 +579,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
if (max_server_memory_usage == 0)
{
max_server_memory_usage = default_max_server_memory_usage;
LOG_INFO(log, "Setting max_server_memory_usage was set to " << formatReadableSizeWithBinarySuffix(max_server_memory_usage));
LOG_INFO_FORMATTED(log, "Setting max_server_memory_usage was set to {}", formatReadableSizeWithBinarySuffix(max_server_memory_usage));
}
else if (max_server_memory_usage > default_max_server_memory_usage)
{

View File

@ -35,7 +35,7 @@ public:
{
try
{
LOG_TRACE(log, "TCP Request. Address: " << socket.peerAddress().toString());
LOG_TRACE_FORMATTED(log, "TCP Request. Address: {}", socket.peerAddress().toString());
return new TCPHandler(server, socket);
}
catch (const Poco::Net::NetException &)

View File

@ -202,7 +202,7 @@ bool DNSResolver::updateCache()
}
if (!lost_hosts.empty())
LOG_INFO(&Logger::get("DNSResolver"), "Cached hosts not found: " << lost_hosts);
LOG_INFO_FORMATTED(&Logger::get("DNSResolver"), "Cached hosts not found: {}", lost_hosts);
return updated;
}

View File

@ -102,7 +102,7 @@ void LazyPipeFDs::tryIncreaseSize(int desired_size)
if (-1 == fcntl(fds_rw[1], F_SETPIPE_SZ, pipe_size * 2) && errno != EPERM)
throwFromErrno("Cannot increase pipe capacity to " + std::to_string(pipe_size * 2), ErrorCodes::CANNOT_FCNTL);
LOG_TRACE(log, "Pipe capacity is " << formatReadableSizeWithBinarySuffix(std::min(pipe_size, desired_size)));
LOG_TRACE_FORMATTED(log, "Pipe capacity is {}", formatReadableSizeWithBinarySuffix(std::min(pipe_size, desired_size)));
}
#else
(void)desired_size;

View File

@ -188,7 +188,7 @@ int ShellCommand::tryWait()
{
wait_called = true;
LOG_TRACE(getLogger(), "Will wait for shell command pid " << pid);
LOG_TRACE_FORMATTED(getLogger(), "Will wait for shell command pid {}", pid);
int status = 0;
if (-1 == waitpid(pid, &status, 0))

View File

@ -1068,7 +1068,7 @@ public:
# pragma GCC diagnostic pop
String pem(pem_buf, pem_size);
LOG_TRACE(log, "Key: " << pem);
LOG_TRACE_FORMATTED(log, "Key: {}", pem);
AuthMoreData data(pem);
packet_sender->sendPacket(data, true);

View File

@ -149,7 +149,7 @@ void MergingAggregatedMemoryEfficientBlockInputStream::cancel(bool kill)
* (example: connection reset during distributed query execution)
* - then don't care.
*/
LOG_ERROR(log, "Exception while cancelling " << input.stream->getName());
LOG_ERROR_FORMATTED(log, "Exception while cancelling {}", input.stream->getName());
}
}
}

View File

@ -134,7 +134,7 @@ public:
* (for example, the connection is broken for distributed query processing)
* - then do not care.
*/
LOG_ERROR(log, "Exception while cancelling " << input->getName());
LOG_ERROR_FORMATTED(log, "Exception while cancelling {}", input->getName());
}
}
}

View File

@ -381,7 +381,7 @@ void DatabaseOnDisk::iterateMetadataFiles(const Context & context, const Iterati
}
else
{
LOG_INFO(log, "Removing file " << getMetadataPath() + file_name);
LOG_INFO_FORMATTED(log, "Removing file {}", getMetadataPath() + file_name);
Poco::File(getMetadataPath() + file_name).remove();
}
};
@ -406,7 +406,7 @@ void DatabaseOnDisk::iterateMetadataFiles(const Context & context, const Iterati
else if (endsWith(dir_it.name(), ".sql.tmp"))
{
/// There are files .sql.tmp - delete
LOG_INFO(log, "Removing file " << dir_it->path());
LOG_INFO_FORMATTED(log, "Removing file {}", dir_it->path());
Poco::File(dir_it->path()).remove();
}
else if (endsWith(dir_it.name(), ".sql"))

View File

@ -13,7 +13,7 @@ RegionsHierarchies::RegionsHierarchies(IRegionsHierarchiesDataProviderPtr data_p
for (const auto & name : data_provider->listCustomHierarchies())
{
LOG_DEBUG(log, "Adding regions hierarchy for " << name);
LOG_DEBUG_FORMATTED(log, "Adding regions hierarchy for {}", name);
data.emplace(name, data_provider->getHierarchySource(name));
}

View File

@ -55,7 +55,7 @@ void RegionsNames::reload()
if (!names_source || !names_source->isModified())
continue;
LOG_DEBUG(log, "Reloading regions names for language: " << language);
LOG_DEBUG_FORMATTED(log, "Reloading regions names for language: {}", language);
auto names_reader = names_source->createReader();

View File

@ -227,7 +227,7 @@ LocalDateTime MySQLDictionarySource::getLastModification(mysqlxx::Pool::Entry &
if (!update_time_value.isNull())
{
modification_time = update_time_value.getDateTime();
LOG_TRACE(log, "Got modification time: " << modification_time);
LOG_TRACE_FORMATTED(log, "Got modification time: {}", modification_time);
}
/// fetch remaining rows to avoid "commands out of sync" error

View File

@ -91,7 +91,7 @@ bool DiskLocal::tryReserve(UInt64 bytes)
std::lock_guard lock(DiskLocal::reservation_mutex);
if (bytes == 0)
{
LOG_DEBUG(&Logger::get("DiskLocal"), "Reserving 0 bytes on disk " << backQuote(name));
LOG_DEBUG_FORMATTED(&Logger::get("DiskLocal"), "Reserving 0 bytes on disk {}", backQuote(name));
++reservation_count;
return true;
}

View File

@ -556,7 +556,7 @@ std::unique_ptr<WriteBufferFromFileBase> DiskS3::writeFile(const String & path,
void DiskS3::remove(const String & path)
{
LOG_DEBUG(&Logger::get("DiskS3"), "Remove file by path: " << backQuote(metadata_path + path));
LOG_DEBUG_FORMATTED(&Logger::get("DiskS3"), "Remove file by path: {}", backQuote(metadata_path + path));
Poco::File file(metadata_path + path);
if (file.isFile())
@ -610,7 +610,7 @@ bool DiskS3::tryReserve(UInt64 bytes)
std::lock_guard lock(reservation_mutex);
if (bytes == 0)
{
LOG_DEBUG(&Logger::get("DiskS3"), "Reserving 0 bytes on s3 disk " << backQuote(name));
LOG_DEBUG_FORMATTED(&Logger::get("DiskS3"), "Reserving 0 bytes on s3 disk {}", backQuote(name));
++reservation_count;
return true;
}

View File

@ -20,7 +20,7 @@ Aws::Client::ClientConfigurationPerRequest ProxyListConfiguration::getConfigurat
cfg.proxyHost = proxies[index].getHost();
cfg.proxyPort = proxies[index].getPort();
LOG_DEBUG(&Logger::get("AWSClient"), "Use proxy: " << proxies[index].toString());
LOG_DEBUG_FORMATTED(&Logger::get("AWSClient"), "Use proxy: {}", proxies[index].toString());
return cfg;
}

View File

@ -21,7 +21,7 @@ ProxyResolverConfiguration::ProxyResolverConfiguration(const Poco::URI & endpoin
Aws::Client::ClientConfigurationPerRequest ProxyResolverConfiguration::getConfiguration(const Aws::Http::HttpRequest &)
{
LOG_DEBUG(&Logger::get("AWSClient"), "Obtain proxy using resolver: " << endpoint.toString());
LOG_DEBUG_FORMATTED(&Logger::get("AWSClient"), "Obtain proxy using resolver: {}", endpoint.toString());
/// 1 second is enough for now.
/// TODO: Make timeouts configurable.

View File

@ -71,7 +71,7 @@ namespace
proxies.push_back(proxy_uri);
LOG_DEBUG(&Logger::get("DiskS3"), "Configured proxy: " << proxy_uri.toString());
LOG_DEBUG_FORMATTED(&Logger::get("DiskS3"), "Configured proxy: {}", proxy_uri.toString());
}
if (!proxies.empty())

View File

@ -98,7 +98,7 @@ void AIOContextPool::fulfillPromises(const io_event events[], const int num_even
const auto it = promises.find(completed_id);
if (it == std::end(promises))
{
LOG_ERROR(&Poco::Logger::get("AIOcontextPool"), "Found io_event with unknown id " << completed_id);
LOG_ERROR_FORMATTED(&Poco::Logger::get("AIOcontextPool"), "Found io_event with unknown id {}", completed_id);
continue;
}

View File

@ -127,7 +127,7 @@ namespace detail
if (!credentials.getUsername().empty())
credentials.authenticate(request);
LOG_TRACE((&Logger::get("ReadWriteBufferFromHTTP")), "Sending request to " << uri.toString());
LOG_TRACE_FORMATTED((&Logger::get("ReadWriteBufferFromHTTP")), "Sending request to {}", uri.toString());
auto sess = session->getSession();

View File

@ -15,7 +15,7 @@ WriteBufferFromHTTP::WriteBufferFromHTTP(
request.setHost(uri.getHost());
request.setChunkedTransferEncoding(true);
LOG_TRACE((&Logger::get("WriteBufferToHTTP")), "Sending request to " << uri.toString());
LOG_TRACE_FORMATTED((&Logger::get("WriteBufferToHTTP")), "Sending request to {}", uri.toString());
ostr = &session->sendRequest(request);
}

View File

@ -100,7 +100,7 @@ void WriteBufferFromS3::initiate()
if (outcome.IsSuccess())
{
upload_id = outcome.GetResult().GetUploadId();
LOG_DEBUG(log, "Multipart upload initiated. Upload id: " << upload_id);
LOG_DEBUG_FORMATTED(log, "Multipart upload initiated. Upload id: {}", upload_id);
}
else
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
@ -170,7 +170,7 @@ void WriteBufferFromS3::complete()
auto outcome = client_ptr->CompleteMultipartUpload(req);
if (outcome.IsSuccess())
LOG_DEBUG(log, "Multipart upload completed. Upload_id: " << upload_id);
LOG_DEBUG_FORMATTED(log, "Multipart upload completed. Upload_id: {}", upload_id);
else
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
}

View File

@ -551,7 +551,7 @@ bool Aggregator::executeOnBlock(Columns columns, UInt64 num_rows, AggregatedData
result.init(method_chosen);
result.keys_size = params.keys_size;
result.key_sizes = key_sizes;
LOG_TRACE(log, "Aggregation method: " << result.getMethodName());
LOG_TRACE_FORMATTED(log, "Aggregation method: {}", result.getMethodName());
}
if (isCancelled())

View File

@ -1761,7 +1761,7 @@ void Context::updateStorageConfiguration(const Poco::Util::AbstractConfiguration
}
catch (Exception & e)
{
LOG_ERROR(shared->log, "An error has occured while reloading storage policies, storage policies were not applied: " << e.message());
LOG_ERROR_FORMATTED(shared->log, "An error has occured while reloading storage policies, storage policies were not applied: {}", e.message());
}
}
}

View File

@ -579,7 +579,7 @@ bool DDLWorker::tryExecuteQuery(const String & query, const DDLTask & task, Exec
}
status = ExecutionStatus(0);
LOG_DEBUG(log, "Executed query: " << query);
LOG_DEBUG_FORMATTED(log, "Executed query: {}", query);
return true;
}
@ -631,7 +631,7 @@ void DDLWorker::processTask(DDLTask & task, const ZooKeeperPtr & zookeeper)
ASTPtr rewritten_ast = task.query_on_cluster->getRewrittenASTWithoutOnCluster(task.address_in_cluster.default_database);
String rewritten_query = queryToString(rewritten_ast);
LOG_DEBUG(log, "Executing query: " << rewritten_query);
LOG_DEBUG_FORMATTED(log, "Executing query: {}", rewritten_query);
if (auto * query_with_table = dynamic_cast<ASTQueryWithTableAndOutput *>(rewritten_ast.get()); query_with_table)
{
@ -1003,7 +1003,7 @@ void DDLWorker::runMainThread()
{
if (Coordination::isHardwareError(e.code))
{
LOG_DEBUG(log, "Recovering ZooKeeper session after: " << getCurrentExceptionMessage(false));
LOG_DEBUG_FORMATTED(log, "Recovering ZooKeeper session after: {}", getCurrentExceptionMessage(false));
while (!stop_flag)
{
@ -1023,7 +1023,7 @@ void DDLWorker::runMainThread()
}
else if (e.code == Coordination::ZNONODE)
{
LOG_ERROR(log, "ZooKeeper error: " << getCurrentExceptionMessage(true));
LOG_ERROR_FORMATTED(log, "ZooKeeper error: {}", getCurrentExceptionMessage(true));
}
else
{

View File

@ -422,7 +422,7 @@ void HashJoin::setSampleBlock(const Block & block)
/// You have to restore this lock if you call the function outside of ctor.
//std::unique_lock lock(rwlock);
LOG_DEBUG(log, "setSampleBlock: " << block.dumpStructure());
LOG_DEBUG_FORMATTED(log, "setSampleBlock: {}", block.dumpStructure());
if (!empty())
return;

View File

@ -95,7 +95,7 @@ void ThreadStatus::setupState(const ThreadGroupStatusPtr & thread_group_)
Int32 new_os_thread_priority = settings.os_thread_priority;
if (new_os_thread_priority && hasLinuxCapability(CAP_SYS_NICE))
{
LOG_TRACE(log, "Setting nice to " << new_os_thread_priority);
LOG_TRACE_FORMATTED(log, "Setting nice to {}", new_os_thread_priority);
if (0 != setpriority(PRIO_PROCESS, thread_id, new_os_thread_priority))
throwFromErrno("Cannot 'setpriority'", ErrorCodes::CANNOT_SET_THREAD_PRIORITY);
@ -221,7 +221,7 @@ void ThreadStatus::detachQuery(bool exit_if_already_detached, bool thread_exits)
LOG_TRACE_FORMATTED(log, "Resetting nice");
if (0 != setpriority(PRIO_PROCESS, thread_id, 0))
LOG_ERROR(log, "Cannot 'setpriority' back to zero: " << errnoToString(ErrorCodes::CANNOT_SET_THREAD_PRIORITY, errno));
LOG_ERROR_FORMATTED(log, "Cannot 'setpriority' back to zero: {}", errnoToString(ErrorCodes::CANNOT_SET_THREAD_PRIORITY, errno));
os_thread_priority = 0;
}

View File

@ -105,7 +105,7 @@ static void logQuery(const String & query, const Context & context, bool interna
{
if (internal)
{
LOG_DEBUG(&Logger::get("executeQuery"), "(internal) " << joinLines(query));
LOG_DEBUG_FORMATTED(&Logger::get("executeQuery"), "(internal) {}", joinLines(query));
}
else
{

View File

@ -486,7 +486,7 @@ void PipelineExecutor::execute(size_t num_threads)
catch (...)
{
#ifndef NDEBUG
LOG_TRACE(log, "Exception while executing query. Current state:\n" << dumpPipeline());
LOG_TRACE_FORMATTED(log, "Exception while executing query. Current state:\n{}", dumpPipeline());
#endif
throw;
}

View File

@ -612,7 +612,7 @@ private:
try
{
Poco::URI url(base_url, "/schemas/ids/" + std::to_string(id));
LOG_TRACE((&Logger::get("AvroConfluentRowInputFormat")), "Fetching schema id = " << id);
LOG_TRACE_FORMATTED((&Logger::get("AvroConfluentRowInputFormat")), "Fetching schema id = {}", id);
/// One second for connect/send/receive. Just in case.
ConnectionTimeouts timeouts({1, 0}, {1, 0}, {1, 0});

View File

@ -38,7 +38,7 @@ ReadBufferFromKafkaConsumer::ReadBufferFromKafkaConsumer(
// called (synchroniously, during poll) when we enter the consumer group
consumer->set_assignment_callback([this](const cppkafka::TopicPartitionList & topic_partitions)
{
LOG_TRACE(log, "Topics/partitions assigned: " << topic_partitions);
LOG_TRACE_FORMATTED(log, "Topics/partitions assigned: {}", topic_partitions);
assignment = topic_partitions;
});
@ -47,7 +47,7 @@ ReadBufferFromKafkaConsumer::ReadBufferFromKafkaConsumer(
{
// Rebalance is happening now, and now we have a chance to finish the work
// with topics/partitions we were working with before rebalance
LOG_TRACE(log, "Rebalance initiated. Revoking partitions: " << topic_partitions);
LOG_TRACE_FORMATTED(log, "Rebalance initiated. Revoking partitions: {}", topic_partitions);
// we can not flush data to target from that point (it is pulled, not pushed)
// so the best we can now it to
@ -70,13 +70,13 @@ ReadBufferFromKafkaConsumer::ReadBufferFromKafkaConsumer(
// }
// catch (cppkafka::HandleException & e)
// {
// LOG_WARNING(log, "Commit error: " << e.what());
// LOG_WARNING_FORMATTED(log, "Commit error: {}", e.what());
// }
});
consumer->set_rebalance_error_callback([this](cppkafka::Error err)
{
LOG_ERROR(log, "Rebalance error: " << err);
LOG_ERROR_FORMATTED(log, "Rebalance error: {}", err);
});
}
@ -150,7 +150,7 @@ void ReadBufferFromKafkaConsumer::commit()
}
catch (const cppkafka::HandleException & e)
{
LOG_ERROR(log, "Exception during commit attempt: " << e.what());
LOG_ERROR_FORMATTED(log, "Exception during commit attempt: {}", e.what());
}
--max_retries;
}
@ -176,7 +176,7 @@ void ReadBufferFromKafkaConsumer::subscribe()
<< boost::algorithm::join(consumer->get_subscription(), ", ")
<< " ]");
LOG_TRACE(log, "Already assigned to : " << assignment);
LOG_TRACE_FORMATTED(log, "Already assigned to : {}", assignment);
size_t max_retries = 5;
@ -223,7 +223,7 @@ void ReadBufferFromKafkaConsumer::unsubscribe()
}
catch (const cppkafka::HandleException & e)
{
LOG_ERROR(log, "Exception from ReadBufferFromKafkaConsumer::unsubscribe: " << e.what());
LOG_ERROR_FORMATTED(log, "Exception from ReadBufferFromKafkaConsumer::unsubscribe: {}", e.what());
}
}
@ -340,7 +340,7 @@ bool ReadBufferFromKafkaConsumer::nextImpl()
++current;
// TODO: should throw exception instead
LOG_ERROR(log, "Consumer error: " << err);
LOG_ERROR_FORMATTED(log, "Consumer error: {}", err);
return false;
}

View File

@ -82,7 +82,7 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & /*bo
++data.current_table_sends;
SCOPE_EXIT({--data.current_table_sends;});
LOG_TRACE(log, "Sending part " << part_name);
LOG_TRACE_FORMATTED(log, "Sending part {}", part_name);
try
{

View File

@ -267,7 +267,7 @@ void IMergeTreeDataPart::removeIfNeeded()
if (state == State::DeleteOnDestroy)
{
LOG_TRACE(storage.log, "Removed part from old location " << path);
LOG_TRACE_FORMATTED(storage.log, "Removed part from old location {}", path);
}
}
catch (...)
@ -812,7 +812,7 @@ void IMergeTreeDataPart::renameToDetached(const String & prefix) const
void IMergeTreeDataPart::makeCloneInDetached(const String & prefix) const
{
assertOnDisk();
LOG_INFO(storage.log, "Detaching " << relative_path);
LOG_INFO_FORMATTED(storage.log, "Detaching {}", relative_path);
String destination_path = storage.relative_data_path + getRelativePathForDetachedPart(prefix);

View File

@ -1144,7 +1144,7 @@ void MergeTreeData::clearOldTemporaryDirectories(ssize_t custom_directories_life
{
if (disk->isDirectory(it->path()) && isOldPartDirectory(disk, it->path(), deadline))
{
LOG_WARNING(log, "Removing temporary directory " << fullPath(disk, it->path()));
LOG_WARNING_FORMATTED(log, "Removing temporary directory {}", fullPath(disk, it->path()));
disk->removeRecursive(it->path());
}
}
@ -1281,7 +1281,7 @@ void MergeTreeData::clearPartsFromFilesystem(const DataPartsVector & parts_to_re
{
pool.scheduleOrThrowOnError([&]
{
LOG_DEBUG(log, "Removing part from filesystem " << part->name);
LOG_DEBUG_FORMATTED(log, "Removing part from filesystem {}", part->name);
part->remove();
});
}
@ -1292,7 +1292,7 @@ void MergeTreeData::clearPartsFromFilesystem(const DataPartsVector & parts_to_re
{
for (const DataPartPtr & part : parts_to_remove)
{
LOG_DEBUG(log, "Removing part from filesystem " << part->name);
LOG_DEBUG_FORMATTED(log, "Removing part from filesystem {}", part->name);
part->remove();
}
}
@ -1684,7 +1684,7 @@ void MergeTreeData::PartsTemporaryRename::tryRenameAll()
catch (...)
{
old_and_new_names.resize(i);
LOG_WARNING(storage.log, "Cannot rename parts to perform operation on them: " << getCurrentExceptionMessage(false));
LOG_WARNING_FORMATTED(storage.log, "Cannot rename parts to perform operation on them: {}", getCurrentExceptionMessage(false));
throw;
}
}
@ -2004,7 +2004,7 @@ restore_covered)
if (restore_covered && part->info.level == 0)
{
LOG_WARNING(log, "Will not recover parts covered by zero-level part " << part->name);
LOG_WARNING_FORMATTED(log, "Will not recover parts covered by zero-level part {}", part->name);
return;
}
@ -2085,7 +2085,7 @@ restore_covered)
for (const String & name : restored)
{
LOG_INFO(log, "Activated part " << name);
LOG_INFO_FORMATTED(log, "Activated part {}", name);
}
if (error)
@ -2104,7 +2104,7 @@ void MergeTreeData::tryRemovePartImmediately(DataPartPtr && part)
{
auto lock = lockParts();
LOG_TRACE(log, "Trying to immediately remove part " << part->getNameWithState());
LOG_TRACE_FORMATTED(log, "Trying to immediately remove part {}", part->getNameWithState());
auto it = data_parts_by_info.find(part->info);
if (it == data_parts_by_info.end() || (*it).get() != part.get())
@ -2130,7 +2130,7 @@ void MergeTreeData::tryRemovePartImmediately(DataPartPtr && part)
}
removePartsFinally({part_to_delete});
LOG_TRACE(log, "Removed part " << part_to_delete->name);
LOG_TRACE_FORMATTED(log, "Removed part {}", part_to_delete->name);
}
@ -2779,7 +2779,7 @@ void MergeTreeData::dropDetached(const ASTPtr & partition, bool part, const Cont
{
const auto & [path, disk] = renamed_parts.old_part_name_to_path_and_disk[old_name];
disk->removeRecursive(path + "detached/" + new_name + "/");
LOG_DEBUG(log, "Dropped detached part " << old_name);
LOG_DEBUG_FORMATTED(log, "Dropped detached part {}", old_name);
old_name.clear();
}
}
@ -2819,7 +2819,7 @@ MergeTreeData::MutableDataPartsVector MergeTreeData::tryLoadPartsToAttach(const
{
continue;
}
LOG_DEBUG(log, "Found part " << name);
LOG_DEBUG_FORMATTED(log, "Found part {}", name);
active_parts.add(name);
name_to_disk[name] = disk;
}
@ -2849,7 +2849,7 @@ MergeTreeData::MutableDataPartsVector MergeTreeData::tryLoadPartsToAttach(const
loaded_parts.reserve(renamed_parts.old_and_new_names.size());
for (const auto & part_names : renamed_parts.old_and_new_names)
{
LOG_DEBUG(log, "Checking part " << part_names.second);
LOG_DEBUG_FORMATTED(log, "Checking part {}", part_names.second);
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + part_names.first, name_to_disk[part_names.first]);
MutableDataPartPtr part = createPart(part_names.first, single_disk_volume, source_dir + part_names.second);
loadPartAndFixMetadataImpl(part);
@ -3042,7 +3042,7 @@ void MergeTreeData::Transaction::rollback()
for (const auto & part : precommitted_parts)
ss << " " << part->relative_path;
ss << ".";
LOG_DEBUG(data.log, "Undoing transaction." << ss.str());
LOG_DEBUG_FORMATTED(data.log, "Undoing transaction.{}", ss.str());
data.removePartsFromWorkingSet(
DataPartsVector(precommitted_parts.begin(), precommitted_parts.end()),

View File

@ -640,7 +640,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor
size_t sum_input_rows_upper_bound = merge_entry->total_rows_count;
MergeAlgorithm merge_alg = chooseMergeAlgorithm(parts, sum_input_rows_upper_bound, gathering_columns, deduplicate, need_remove_expired_values);
LOG_DEBUG(log, "Selected MergeAlgorithm: " << ((merge_alg == MergeAlgorithm::Vertical) ? "Vertical" : "Horizontal"));
LOG_DEBUG_FORMATTED(log, "Selected MergeAlgorithm: {}", ((merge_alg == MergeAlgorithm::Vertical) ? "Vertical" : "Horizontal"));
/// Note: this is done before creating input streams, because otherwise data.data_parts_mutex
/// (which is locked in data.getTotalActiveSizeInBytes())

View File

@ -107,7 +107,7 @@ size_t MergeTreeDataSelectExecutor::getApproximateTotalRowsToRead(
size_t rows_count = 0;
/// We will find out how many rows we would have read without sampling.
LOG_DEBUG(log, "Preliminary index scan with condition: " << key_condition.toString());
LOG_DEBUG_FORMATTED(log, "Preliminary index scan with condition: {}", key_condition.toString());
for (const auto & part : parts)
{
@ -329,7 +329,7 @@ Pipes MergeTreeDataSelectExecutor::readFromParts(
if (relative_sample_size > 1)
{
relative_sample_size = convertAbsoluteSampleSizeToRelative(select_sample_size, approx_total_rows);
LOG_DEBUG(log, "Selected relative sample size: " << toString(relative_sample_size));
LOG_DEBUG_FORMATTED(log, "Selected relative sample size: {}", toString(relative_sample_size));
}
/// SAMPLE 1 is the same as the absence of SAMPLE.
@ -342,7 +342,7 @@ Pipes MergeTreeDataSelectExecutor::readFromParts(
if (relative_sample_offset > 1)
{
relative_sample_offset = convertAbsoluteSampleSizeToRelative(select_sample_offset, approx_total_rows);
LOG_DEBUG(log, "Selected relative sample offset: " << toString(relative_sample_offset));
LOG_DEBUG_FORMATTED(log, "Selected relative sample offset: {}", toString(relative_sample_offset));
}
}
@ -534,9 +534,9 @@ Pipes MergeTreeDataSelectExecutor::readFromParts(
return {};
}
LOG_DEBUG(log, "Key condition: " << key_condition.toString());
LOG_DEBUG_FORMATTED(log, "Key condition: {}", key_condition.toString());
if (minmax_idx_condition)
LOG_DEBUG(log, "MinMax index condition: " << minmax_idx_condition->toString());
LOG_DEBUG_FORMATTED(log, "MinMax index condition: {}", minmax_idx_condition->toString());
/// PREWHERE
String prewhere_column;

View File

@ -193,7 +193,7 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt
if (moves_blocker.isCancelled())
throw Exception("Cancelled moving parts.", ErrorCodes::ABORTED);
LOG_TRACE(log, "Cloning part " << moving_part.part->name);
LOG_TRACE_FORMATTED(log, "Cloning part {}", moving_part.part->name);
moving_part.part->makeCloneOnDiskDetached(moving_part.reserved_space);
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + moving_part.part->name, moving_part.reserved_space->getDisk());

View File

@ -178,7 +178,7 @@ void MergeTreeReadPool::profileFeedback(const ReadBufferFromFileBase::ProfileInf
--backoff_state.current_threads;
ProfileEvents::increment(ProfileEvents::ReadBackoff);
LOG_DEBUG(log, "Will lower number of threads to " << backoff_state.current_threads);
LOG_DEBUG_FORMATTED(log, "Will lower number of threads to {}", backoff_state.current_threads);
}

View File

@ -184,7 +184,7 @@ void ReplicatedMergeTreePartCheckThread::searchForMissingPart(const String & par
CheckResult ReplicatedMergeTreePartCheckThread::checkPart(const String & part_name)
{
LOG_WARNING(log, "Checking part " << part_name);
LOG_WARNING_FORMATTED(log, "Checking part {}", part_name);
ProfileEvents::increment(ProfileEvents::ReplicatedPartChecks);
/// If the part is still in the PreCommitted -> Committed transition, it is not lost

View File

@ -49,7 +49,7 @@ bool ReplicatedMergeTreeQueue::isVirtualPart(const MergeTreeData::DataPartPtr &
bool ReplicatedMergeTreeQueue::load(zkutil::ZooKeeperPtr zookeeper)
{
auto queue_path = replica_path + "/queue";
LOG_DEBUG(log, "Loading queue from " << queue_path);
LOG_DEBUG_FORMATTED(log, "Loading queue from {}", queue_path);
bool updated = false;
std::optional<time_t> min_unprocessed_insert_time_changed;
@ -234,7 +234,7 @@ void ReplicatedMergeTreeQueue::updateStateOnQueueEntryRemoval(
if (entry->type == LogEntry::ALTER_METADATA)
{
LOG_TRACE(log, "Finishing metadata alter with version " << entry->alter_version);
LOG_TRACE_FORMATTED(log, "Finishing metadata alter with version {}", entry->alter_version);
alter_sequence.finishMetadataAlter(entry->alter_version, state_lock);
}
}

View File

@ -587,7 +587,7 @@ void StorageDistributed::startup()
if (inc > file_names_increment.value)
file_names_increment.value.store(inc);
}
LOG_DEBUG(log, "Auto-increment is " << file_names_increment.value);
LOG_DEBUG_FORMATTED(log, "Auto-increment is {}", file_names_increment.value);
}
@ -816,7 +816,7 @@ void StorageDistributed::renameOnDisk(const String & new_path_to_table_data)
auto new_path = path + new_path_to_table_data;
Poco::File(path + relative_data_path).renameTo(new_path);
LOG_DEBUG(log, "Updating path to " << new_path);
LOG_DEBUG_FORMATTED(log, "Updating path to {}", new_path);
std::lock_guard lock(cluster_nodes_mutex);
for (auto & node : cluster_nodes_data)

View File

@ -384,14 +384,14 @@ Int64 StorageMergeTree::startMutation(const MutationCommands & commands, String
auto insertion = current_mutations_by_id.emplace(mutation_file_name, std::move(entry));
current_mutations_by_version.emplace(version, insertion.first->second);
LOG_INFO(log, "Added mutation: " << mutation_file_name);
LOG_INFO_FORMATTED(log, "Added mutation: {}", mutation_file_name);
merging_mutating_task_handle->wake();
return version;
}
void StorageMergeTree::waitForMutation(Int64 version, const String & file_name)
{
LOG_INFO(log, "Waiting mutation: " << file_name);
LOG_INFO_FORMATTED(log, "Waiting mutation: {}", file_name);
auto check = [version, this]() { return shutdown_called || isMutationDone(version); };
std::unique_lock lock(mutation_wait_mutex);
mutation_wait_event.wait(lock, check);
@ -492,7 +492,7 @@ std::vector<MergeTreeMutationStatus> StorageMergeTree::getMutationsStatus() cons
CancellationCode StorageMergeTree::killMutation(const String & mutation_id)
{
LOG_TRACE(log, "Killing mutation " << mutation_id);
LOG_TRACE_FORMATTED(log, "Killing mutation {}", mutation_id);
std::optional<MergeTreeMutationEntry> to_kill;
{
@ -511,7 +511,7 @@ CancellationCode StorageMergeTree::killMutation(const String & mutation_id)
global_context.getMergeList().cancelPartMutations({}, to_kill->block_number);
to_kill->removeFile();
LOG_TRACE(log, "Cancelled part mutations and removed mutation file " << mutation_id);
LOG_TRACE_FORMATTED(log, "Cancelled part mutations and removed mutation file {}", mutation_id);
{
std::lock_guard<std::mutex> lock(mutation_wait_mutex);
mutation_wait_event.notify_all();
@ -896,7 +896,7 @@ void StorageMergeTree::clearOldMutations(bool truncate)
for (auto & mutation : mutations_to_delete)
{
LOG_TRACE(log, "Removing mutation: " << mutation.file_name);
LOG_TRACE_FORMATTED(log, "Removing mutation: {}", mutation.file_name);
mutation.removeFile();
}
}
@ -1050,7 +1050,7 @@ void StorageMergeTree::dropPartition(const ASTPtr & partition, bool detach, cons
/// If DETACH clone parts to detached/ directory
for (const auto & part : parts_to_remove)
{
LOG_INFO(log, "Detaching " << part->relative_path);
LOG_INFO_FORMATTED(log, "Detaching {}", part->relative_path);
part->makeCloneInDetached("");
}
}

View File

@ -412,7 +412,7 @@ void StorageReplicatedMergeTree::createTableIfNotExists()
if (zookeeper->exists(zookeeper_path))
return;
LOG_DEBUG(log, "Creating table " << zookeeper_path);
LOG_DEBUG_FORMATTED(log, "Creating table {}", zookeeper_path);
zookeeper->createAncestors(zookeeper_path);
@ -548,7 +548,7 @@ void StorageReplicatedMergeTree::createReplica()
{
auto zookeeper = getZooKeeper();
LOG_DEBUG(log, "Creating replica " << replica_path);
LOG_DEBUG_FORMATTED(log, "Creating replica {}", replica_path);
int32_t code;
@ -643,7 +643,7 @@ void StorageReplicatedMergeTree::checkParts(bool skip_sanity_checks)
if (MergeTreePartInfo::tryParsePartName(part_name, &part_info, format_version))
return part_info.getBlocksCount();
LOG_ERROR(log, "Unexpected part name: " << part_name);
LOG_ERROR_FORMATTED(log, "Unexpected part name: {}", part_name);
return 0;
};
@ -701,7 +701,7 @@ void StorageReplicatedMergeTree::checkParts(bool skip_sanity_checks)
for (size_t i = 0; i < parts_to_fetch.size(); ++i)
{
const String & part_name = parts_to_fetch[i];
LOG_ERROR(log, "Removing locally missing part from ZooKeeper and queueing a fetch: " << part_name);
LOG_ERROR_FORMATTED(log, "Removing locally missing part from ZooKeeper and queueing a fetch: {}", part_name);
Coordination::Requests ops;
@ -1483,7 +1483,7 @@ void StorageReplicatedMergeTree::executeDropRange(const LogEntry & entry)
/// If DETACH clone parts to detached/ directory
for (const auto & part : parts_to_remove)
{
LOG_INFO(log, "Detaching " << part->relative_path);
LOG_INFO_FORMATTED(log, "Detaching {}", part->relative_path);
part->makeCloneInDetached("");
}
}
@ -1824,7 +1824,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry)
void StorageReplicatedMergeTree::cloneReplica(const String & source_replica, Coordination::Stat source_is_lost_stat, zkutil::ZooKeeperPtr & zookeeper)
{
LOG_INFO(log, "Will mimic " << source_replica);
LOG_INFO_FORMATTED(log, "Will mimic {}", source_replica);
String source_path = zookeeper_path + "/replicas/" + source_replica;
@ -3283,7 +3283,7 @@ bool StorageReplicatedMergeTree::executeMetadataAlter(const StorageReplicatedMer
setTableStructure(std::move(columns_from_entry), metadata_diff);
metadata_version = entry.alter_version;
LOG_INFO(log, "Applied changes to the metadata of the table. Current metadata version: " << metadata_version);
LOG_INFO_FORMATTED(log, "Applied changes to the metadata of the table. Current metadata version: {}", metadata_version);
}
/// This transaction may not happen, but it's OK, because on the next retry we will eventually create/update this node
@ -3722,7 +3722,7 @@ void StorageReplicatedMergeTree::drop()
if (zookeeper->expired())
throw Exception("Table was not dropped because ZooKeeper session has expired.", ErrorCodes::TABLE_WAS_NOT_DROPPED);
LOG_INFO(log, "Removing replica " << replica_path);
LOG_INFO_FORMATTED(log, "Removing replica {}", replica_path);
replica_is_active_node = nullptr;
/// It may left some garbage if replica_path subtree are concurently modified
zookeeper->tryRemoveRecursive(replica_path);
@ -3833,7 +3833,7 @@ StorageReplicatedMergeTree::allocateBlockNumber(
Strings StorageReplicatedMergeTree::waitForAllReplicasToProcessLogEntry(const ReplicatedMergeTreeLogEntryData & entry, bool wait_for_non_active)
{
LOG_DEBUG(log, "Waiting for all replicas to process " << entry.znode_name);
LOG_DEBUG_FORMATTED(log, "Waiting for all replicas to process {}", entry.znode_name);
auto zookeeper = getZooKeeper();
Strings replicas = zookeeper->getChildren(zookeeper_path + "/replicas");
@ -3851,7 +3851,7 @@ Strings StorageReplicatedMergeTree::waitForAllReplicasToProcessLogEntry(const Re
}
}
LOG_DEBUG(log, "Finished waiting for all replicas to process " << entry.znode_name);
LOG_DEBUG_FORMATTED(log, "Finished waiting for all replicas to process {}", entry.znode_name);
return unwaited;
}
@ -4397,7 +4397,7 @@ void StorageReplicatedMergeTree::fetchPartition(const ASTPtr & partition, const
}
}
LOG_INFO(log, "Parts to fetch: " << parts_to_fetch.size());
LOG_INFO_FORMATTED(log, "Parts to fetch: {}", parts_to_fetch.size());
missing_parts.clear();
for (const String & part : parts_to_fetch)
@ -4514,7 +4514,7 @@ void StorageReplicatedMergeTree::mutate(const MutationCommands & commands, const
const String & path_created =
dynamic_cast<const Coordination::CreateResponse *>(responses[1].get())->path_created;
entry.znode_name = path_created.substr(path_created.find_last_of('/') + 1);
LOG_TRACE(log, "Created mutation with ID " << entry.znode_name);
LOG_TRACE_FORMATTED(log, "Created mutation with ID {}", entry.znode_name);
break;
}
else if (rc == Coordination::ZBADVERSION)
@ -4556,7 +4556,7 @@ CancellationCode StorageReplicatedMergeTree::killMutation(const String & mutatio
zkutil::ZooKeeperPtr zookeeper = getZooKeeper();
LOG_TRACE(log, "Killing mutation " << mutation_id);
LOG_TRACE_FORMATTED(log, "Killing mutation {}", mutation_id);
auto mutation_entry = queue.removeMutation(zookeeper, mutation_id);
if (!mutation_entry)
@ -4639,7 +4639,7 @@ void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK()
}
catch (...)
{
LOG_ERROR(log, "There is a problem with deleting parts from ZooKeeper: " << getCurrentExceptionMessage(true));
LOG_ERROR_FORMATTED(log, "There is a problem with deleting parts from ZooKeeper: {}", getCurrentExceptionMessage(true));
}
/// Part names that were reliably deleted from ZooKeeper should be deleted from filesystem
@ -5386,7 +5386,7 @@ bool StorageReplicatedMergeTree::dropPartsInPartition(
queue.disableMergesInBlockRange(drop_range_fake_part_name);
}
LOG_DEBUG(log, "Disabled merges covered by range " << drop_range_fake_part_name);
LOG_DEBUG_FORMATTED(log, "Disabled merges covered by range {}", drop_range_fake_part_name);
/// Finally, having achieved the necessary invariants, you can put an entry in the log.
entry.type = LogEntry::DROP_RANGE;