diff --git a/base/common/ErrorHandlers.h b/base/common/ErrorHandlers.h index b35cdcab93d..67194ee043b 100644 --- a/base/common/ErrorHandlers.h +++ b/base/common/ErrorHandlers.h @@ -28,7 +28,7 @@ public: void exception() override { logException(); } private: - Logger * log = &Logger::get("ServerErrorHandler"); + Poco::Logger * log = &Poco::Logger::get("ServerErrorHandler"); void logException() { diff --git a/base/common/logger_useful.h b/base/common/logger_useful.h index ad56ef7e857..f760d59de45 100644 --- a/base/common/logger_useful.h +++ b/base/common/logger_useful.h @@ -9,13 +9,6 @@ #include -/// TODO Remove this. -using Poco::Logger; -using Poco::Message; -using DB::LogsLevel; -using DB::CurrentThread; - - namespace { template constexpr size_t numArgs(Ts &&...) { return sizeof...(Ts); } @@ -31,8 +24,8 @@ namespace #define LOG_IMPL(logger, priority, PRIORITY, ...) do \ { \ - const bool is_clients_log = (CurrentThread::getGroup() != nullptr) && \ - (CurrentThread::getGroup()->client_logs_level >= (priority)); \ + const bool is_clients_log = (DB::CurrentThread::getGroup() != nullptr) && \ + (DB::CurrentThread::getGroup()->client_logs_level >= (priority)); \ if ((logger)->is((PRIORITY)) || is_clients_log) \ { \ std::string formatted_message = numArgs(__VA_ARGS__) > 1 ? fmt::format(__VA_ARGS__) : firstArg(__VA_ARGS__); \ @@ -42,7 +35,7 @@ namespace file_function += __FILE__; \ file_function += "; "; \ file_function += __PRETTY_FUNCTION__; \ - Message poco_message((logger)->name(), formatted_message, \ + Poco::Message poco_message((logger)->name(), formatted_message, \ (PRIORITY), file_function.c_str(), __LINE__); \ channel->log(poco_message); \ } \ @@ -50,9 +43,18 @@ namespace } while (false) -#define LOG_TRACE(logger, ...) LOG_IMPL(logger, LogsLevel::trace, Message::PRIO_TRACE, __VA_ARGS__) -#define LOG_DEBUG(logger, ...) LOG_IMPL(logger, LogsLevel::debug, Message::PRIO_DEBUG, __VA_ARGS__) -#define LOG_INFO(logger, ...) LOG_IMPL(logger, LogsLevel::information, Message::PRIO_INFORMATION, __VA_ARGS__) -#define LOG_WARNING(logger, ...) LOG_IMPL(logger, LogsLevel::warning, Message::PRIO_WARNING, __VA_ARGS__) -#define LOG_ERROR(logger, ...) LOG_IMPL(logger, LogsLevel::error, Message::PRIO_ERROR, __VA_ARGS__) -#define LOG_FATAL(logger, ...) LOG_IMPL(logger, LogsLevel::error, Message::PRIO_FATAL, __VA_ARGS__) +#define LOG_TRACE(logger, ...) LOG_IMPL(logger, DB::LogsLevel::trace, Poco::Message::PRIO_TRACE, __VA_ARGS__) +#define LOG_DEBUG(logger, ...) LOG_IMPL(logger, DB::LogsLevel::debug, Poco::Message::PRIO_DEBUG, __VA_ARGS__) +#define LOG_INFO(logger, ...) LOG_IMPL(logger, DB::LogsLevel::information, Poco::Message::PRIO_INFORMATION, __VA_ARGS__) +#define LOG_WARNING(logger, ...) LOG_IMPL(logger, DB::LogsLevel::warning, Poco::Message::PRIO_WARNING, __VA_ARGS__) +#define LOG_ERROR(logger, ...) LOG_IMPL(logger, DB::LogsLevel::error, Poco::Message::PRIO_ERROR, __VA_ARGS__) +#define LOG_FATAL(logger, ...) LOG_IMPL(logger, DB::LogsLevel::error, Poco::Message::PRIO_FATAL, __VA_ARGS__) + + +/// Compatibility for external projects. +#if defined(ARCADIA_BUILD) + using Poco::Logger; + using Poco::Message; + using DB::LogsLevel; + using DB::CurrentThread; +#endif diff --git a/base/daemon/BaseDaemon.cpp b/base/daemon/BaseDaemon.cpp index 10c7173d5b1..2a36777218e 100644 --- a/base/daemon/BaseDaemon.cpp +++ b/base/daemon/BaseDaemon.cpp @@ -124,7 +124,7 @@ static void signalHandler(int sig, siginfo_t * info, void * context) const ucontext_t signal_context = *reinterpret_cast(context); const StackTrace stack_trace(signal_context); - StringRef query_id = CurrentThread::getQueryId(); /// This is signal safe. + StringRef query_id = DB::CurrentThread::getQueryId(); /// This is signal safe. query_id.size = std::min(query_id.size, max_query_id_size); DB::writeBinary(sig, out); @@ -162,7 +162,7 @@ public: }; explicit SignalListener(BaseDaemon & daemon_) - : log(&Logger::get("BaseDaemon")) + : log(&Poco::Logger::get("BaseDaemon")) , daemon(daemon_) { } @@ -231,7 +231,7 @@ public: } private: - Logger * log; + Poco::Logger * log; BaseDaemon & daemon; void onTerminate(const std::string & message, UInt32 thread_num) const @@ -288,9 +288,9 @@ extern "C" void __sanitizer_set_death_callback(void (*)()); static void sanitizerDeathCallback() { - Logger * log = &Logger::get("BaseDaemon"); + Poco::Logger * log = &Poco::Logger::get("BaseDaemon"); - StringRef query_id = CurrentThread::getQueryId(); /// This is signal safe. + StringRef query_id = DB::CurrentThread::getQueryId(); /// This is signal safe. { std::stringstream message; @@ -498,10 +498,10 @@ void debugIncreaseOOMScore() } catch (const Poco::Exception & e) { - LOG_WARNING(&Logger::root(), "Failed to adjust OOM score: '{}'.", e.displayText()); + LOG_WARNING(&Poco::Logger::root(), "Failed to adjust OOM score: '{}'.", e.displayText()); return; } - LOG_INFO(&Logger::root(), "Set OOM score adjustment to {}", new_score); + LOG_INFO(&Poco::Logger::root(), "Set OOM score adjustment to {}", new_score); } #else void debugIncreaseOOMScore() {} @@ -715,7 +715,7 @@ void BaseDaemon::initializeTerminationAndSignalProcessing() void BaseDaemon::logRevision() const { - Logger::root().information("Starting " + std::string{VERSION_FULL} + Poco::Logger::root().information("Starting " + std::string{VERSION_FULL} + " with revision " + std::to_string(ClickHouseRevision::get()) + ", PID " + std::to_string(getpid())); } @@ -732,7 +732,7 @@ void BaseDaemon::handleNotification(Poco::TaskFailedNotification *_tfn) { task_failed = true; Poco::AutoPtr fn(_tfn); - Logger *lg = &(logger()); + Poco::Logger * lg = &(logger()); LOG_ERROR(lg, "Task '{}' failed. Daemon is shutting down. Reason - {}", fn->task()->name(), fn->reason().displayText()); ServerApplication::terminate(); } diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index eb78c049825..8ce03ac0867 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -135,7 +135,7 @@ static void attachSystemTables(const Context & context) int LocalServer::main(const std::vector & /*args*/) try { - Logger * log = &logger(); + Poco::Logger * log = &logger(); ThreadStatus thread_status; UseSSL use_ssl; diff --git a/programs/odbc-bridge/ODBCBlockInputStream.cpp b/programs/odbc-bridge/ODBCBlockInputStream.cpp index 6ada6dd3a2d..1316ff8f4c6 100644 --- a/programs/odbc-bridge/ODBCBlockInputStream.cpp +++ b/programs/odbc-bridge/ODBCBlockInputStream.cpp @@ -25,7 +25,7 @@ ODBCBlockInputStream::ODBCBlockInputStream( , result{statement} , iterator{result.begin()} , max_block_size{max_block_size_} - , log(&Logger::get("ODBCBlockInputStream")) + , log(&Poco::Logger::get("ODBCBlockInputStream")) { if (sample_block.columns() != result.columnCount()) throw Exception{"RecordSet contains " + toString(result.columnCount()) + " columns while " + toString(sample_block.columns()) diff --git a/programs/odbc-bridge/ODBCBlockOutputStream.cpp b/programs/odbc-bridge/ODBCBlockOutputStream.cpp index ab24c008e40..b5bffc58c55 100644 --- a/programs/odbc-bridge/ODBCBlockOutputStream.cpp +++ b/programs/odbc-bridge/ODBCBlockOutputStream.cpp @@ -94,7 +94,7 @@ ODBCBlockOutputStream::ODBCBlockOutputStream(Poco::Data::Session && session_, , table_name(remote_table_name_) , sample_block(sample_block_) , quoting(quoting_) - , log(&Logger::get("ODBCBlockOutputStream")) + , log(&Poco::Logger::get("ODBCBlockOutputStream")) { description.init(sample_block); } diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 29096327a71..e587e134075 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -89,7 +89,7 @@ namespace CurrentMetrics namespace { -void setupTmpPath(Logger * log, const std::string & path) +void setupTmpPath(Poco::Logger * log, const std::string & path) { LOG_DEBUG(log, "Setting up {} to store temporary data in it", path); @@ -212,7 +212,7 @@ void Server::defineOptions(Poco::Util::OptionSet & options) int Server::main(const std::vector & /*args*/) { - Logger * log = &logger(); + Poco::Logger * log = &logger(); UseSSL use_ssl; ThreadStatus thread_status; diff --git a/src/Access/AllowedClientHosts.cpp b/src/Access/AllowedClientHosts.cpp index 9c8a7cc12f5..82372fd8b14 100644 --- a/src/Access/AllowedClientHosts.cpp +++ b/src/Access/AllowedClientHosts.cpp @@ -309,7 +309,7 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const throw; /// Try to ignore DNS errors: if host cannot be resolved, skip it and try next. LOG_WARNING( - &Logger::get("AddressPatterns"), + &Poco::Logger::get("AddressPatterns"), "Failed to check if the allowed client hosts contain address {}. {}, code = {}", client_address.toString(), e.displayText(), e.code()); return false; @@ -342,7 +342,7 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const throw; /// Try to ignore DNS errors: if host cannot be resolved, skip it and try next. LOG_WARNING( - &Logger::get("AddressPatterns"), + &Poco::Logger::get("AddressPatterns"), "Failed to check if the allowed client hosts contain address {}. {}, code = {}", client_address.toString(), e.displayText(), e.code()); return false; diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 6c1f7f8e953..367d4bab1dc 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -508,18 +508,18 @@ void Connection::sendScalarsData(Scalars & data) "Sent data for {} scalars, total {} rows in {} sec., {} rows/sec., {} ({}/sec.), compressed {} times to {} ({}/sec.)", data.size(), rows, elapsed, static_cast(rows / watch.elapsedSeconds()), - formatReadableSizeWithBinarySuffix(maybe_compressed_out_bytes), - formatReadableSizeWithBinarySuffix(maybe_compressed_out_bytes / watch.elapsedSeconds()), + ReadableSize(maybe_compressed_out_bytes), + ReadableSize(maybe_compressed_out_bytes / watch.elapsedSeconds()), static_cast(maybe_compressed_out_bytes) / out_bytes, - formatReadableSizeWithBinarySuffix(out_bytes), - formatReadableSizeWithBinarySuffix(out_bytes / watch.elapsedSeconds())); + ReadableSize(out_bytes), + ReadableSize(out_bytes / watch.elapsedSeconds())); else LOG_DEBUG(log_wrapper.get(), "Sent data for {} scalars, total {} rows in {} sec., {} rows/sec., {} ({}/sec.), no compression.", data.size(), rows, elapsed, static_cast(rows / watch.elapsedSeconds()), - formatReadableSizeWithBinarySuffix(maybe_compressed_out_bytes), - formatReadableSizeWithBinarySuffix(maybe_compressed_out_bytes / watch.elapsedSeconds())); + ReadableSize(maybe_compressed_out_bytes), + ReadableSize(maybe_compressed_out_bytes / watch.elapsedSeconds())); } namespace @@ -612,18 +612,18 @@ void Connection::sendExternalTablesData(ExternalTablesData & data) "Sent data for {} external tables, total {} rows in {} sec., {} rows/sec., {} ({}/sec.), compressed {} times to {} ({}/sec.)", data.size(), rows, elapsed, static_cast(rows / watch.elapsedSeconds()), - formatReadableSizeWithBinarySuffix(maybe_compressed_out_bytes), - formatReadableSizeWithBinarySuffix(maybe_compressed_out_bytes / watch.elapsedSeconds()), + ReadableSize(maybe_compressed_out_bytes), + ReadableSize(maybe_compressed_out_bytes / watch.elapsedSeconds()), static_cast(maybe_compressed_out_bytes) / out_bytes, - formatReadableSizeWithBinarySuffix(out_bytes), - formatReadableSizeWithBinarySuffix(out_bytes / watch.elapsedSeconds())); + ReadableSize(out_bytes), + ReadableSize(out_bytes / watch.elapsedSeconds())); else LOG_DEBUG(log_wrapper.get(), "Sent data for {} external tables, total {} rows in {} sec., {} rows/sec., {} ({}/sec.), no compression.", data.size(), rows, elapsed, static_cast(rows / watch.elapsedSeconds()), - formatReadableSizeWithBinarySuffix(maybe_compressed_out_bytes), - formatReadableSizeWithBinarySuffix(maybe_compressed_out_bytes / watch.elapsedSeconds())); + ReadableSize(maybe_compressed_out_bytes), + ReadableSize(maybe_compressed_out_bytes / watch.elapsedSeconds())); } std::optional Connection::getResolvedAddress() const diff --git a/src/Client/Connection.h b/src/Client/Connection.h index e056a4323df..f93fa7871e5 100644 --- a/src/Client/Connection.h +++ b/src/Client/Connection.h @@ -249,16 +249,16 @@ private: { } - Logger * get() + Poco::Logger * get() { if (!log) - log = &Logger::get("Connection (" + parent.getDescription() + ")"); + log = &Poco::Logger::get("Connection (" + parent.getDescription() + ")"); return log; } private: - std::atomic log; + std::atomic log; Connection & parent; }; diff --git a/src/Client/ConnectionPool.h b/src/Client/ConnectionPool.h index be52234b904..bbdcae894e7 100644 --- a/src/Client/ConnectionPool.h +++ b/src/Client/ConnectionPool.h @@ -56,7 +56,7 @@ public: Protocol::Compression compression_ = Protocol::Compression::Enable, Protocol::Secure secure_ = Protocol::Secure::Disable) : Base(max_connections_, - &Logger::get("ConnectionPool (" + host_ + ":" + toString(port_) + ")")), + &Poco::Logger::get("ConnectionPool (" + host_ + ":" + toString(port_) + ")")), host(host_), port(port_), default_database(default_database_), diff --git a/src/Client/ConnectionPoolWithFailover.cpp b/src/Client/ConnectionPoolWithFailover.cpp index 9933cc6b555..713bb33342f 100644 --- a/src/Client/ConnectionPoolWithFailover.cpp +++ b/src/Client/ConnectionPoolWithFailover.cpp @@ -35,7 +35,7 @@ ConnectionPoolWithFailover::ConnectionPoolWithFailover( LoadBalancing load_balancing, time_t decrease_error_period_, size_t max_error_cap_) - : Base(std::move(nested_pools_), decrease_error_period_, max_error_cap_, &Logger::get("ConnectionPoolWithFailover")) + : Base(std::move(nested_pools_), decrease_error_period_, max_error_cap_, &Poco::Logger::get("ConnectionPoolWithFailover")) , default_load_balancing(load_balancing) { const std::string & local_hostname = getFQDNOrHostName(); diff --git a/src/Client/TimeoutSetter.cpp b/src/Client/TimeoutSetter.cpp index 10cc9fbffef..a512485407e 100644 --- a/src/Client/TimeoutSetter.cpp +++ b/src/Client/TimeoutSetter.cpp @@ -35,7 +35,7 @@ TimeoutSetter::~TimeoutSetter() catch (std::exception & e) { // Sometimes catched on macos - LOG_ERROR(&Logger::get("Client"), "TimeoutSetter: Can't reset timeouts: {}", e.what()); + LOG_ERROR(&Poco::Logger::get("Client"), "TimeoutSetter: Can't reset timeouts: {}", e.what()); } } } diff --git a/src/Common/AlignedBuffer.cpp b/src/Common/AlignedBuffer.cpp index 6d4775ebadf..f1d3f98ff3a 100644 --- a/src/Common/AlignedBuffer.cpp +++ b/src/Common/AlignedBuffer.cpp @@ -18,8 +18,8 @@ void AlignedBuffer::alloc(size_t size, size_t alignment) void * new_buf; int res = ::posix_memalign(&new_buf, std::max(alignment, sizeof(void*)), size); if (0 != res) - throwFromErrno("Cannot allocate memory (posix_memalign), size: " - + formatReadableSizeWithBinarySuffix(size) + ", alignment: " + formatReadableSizeWithBinarySuffix(alignment) + ".", + throwFromErrno(fmt::format("Cannot allocate memory (posix_memalign), size: {}, alignment: {}.", + ReadableSize(size), ReadableSize(alignment)), ErrorCodes::CANNOT_ALLOCATE_MEMORY, res); buf = new_buf; } diff --git a/src/Common/Allocator.h b/src/Common/Allocator.h index 43d7e67c4bb..ead456f935e 100644 --- a/src/Common/Allocator.h +++ b/src/Common/Allocator.h @@ -129,7 +129,7 @@ public: void * new_buf = ::realloc(buf, new_size); if (nullptr == new_buf) - DB::throwFromErrno("Allocator: Cannot realloc from " + formatReadableSizeWithBinarySuffix(old_size) + " to " + formatReadableSizeWithBinarySuffix(new_size) + ".", DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY); + DB::throwFromErrno(fmt::format("Allocator: Cannot realloc from {} to {}.", ReadableSize(old_size), ReadableSize(new_size)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY); buf = new_buf; if constexpr (clear_memory) @@ -145,7 +145,8 @@ public: buf = clickhouse_mremap(buf, old_size, new_size, MREMAP_MAYMOVE, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); if (MAP_FAILED == buf) - DB::throwFromErrno("Allocator: Cannot mremap memory chunk from " + formatReadableSizeWithBinarySuffix(old_size) + " to " + formatReadableSizeWithBinarySuffix(new_size) + ".", DB::ErrorCodes::CANNOT_MREMAP); + DB::throwFromErrno(fmt::format("Allocator: Cannot mremap memory chunk from {} to {}.", + ReadableSize(old_size), ReadableSize(new_size)), DB::ErrorCodes::CANNOT_MREMAP); /// No need for zero-fill, because mmap guarantees it. } @@ -201,13 +202,13 @@ private: if (size >= MMAP_THRESHOLD) { if (alignment > MMAP_MIN_ALIGNMENT) - throw DB::Exception("Too large alignment " + formatReadableSizeWithBinarySuffix(alignment) + ": more than page size when allocating " - + formatReadableSizeWithBinarySuffix(size) + ".", DB::ErrorCodes::BAD_ARGUMENTS); + throw DB::Exception(fmt::format("Too large alignment {}: more than page size when allocating {}.", + ReadableSize(alignment), ReadableSize(size)), DB::ErrorCodes::BAD_ARGUMENTS); buf = mmap(getMmapHint(), size, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); if (MAP_FAILED == buf) - DB::throwFromErrno("Allocator: Cannot mmap " + formatReadableSizeWithBinarySuffix(size) + ".", DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY); + DB::throwFromErrno(fmt::format("Allocator: Cannot mmap {}.", ReadableSize(size)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY); /// No need for zero-fill, because mmap guarantees it. } @@ -221,7 +222,7 @@ private: buf = ::malloc(size); if (nullptr == buf) - DB::throwFromErrno("Allocator: Cannot malloc " + formatReadableSizeWithBinarySuffix(size) + ".", DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY); + DB::throwFromErrno(fmt::format("Allocator: Cannot malloc {}.", ReadableSize(size)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY); } else { @@ -229,7 +230,8 @@ private: int res = posix_memalign(&buf, alignment, size); if (0 != res) - DB::throwFromErrno("Cannot allocate memory (posix_memalign) " + formatReadableSizeWithBinarySuffix(size) + ".", DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, res); + DB::throwFromErrno(fmt::format("Cannot allocate memory (posix_memalign) {}.", ReadableSize(size)), + DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, res); if constexpr (clear_memory) memset(buf, 0, size); @@ -243,7 +245,7 @@ private: if (size >= MMAP_THRESHOLD) { if (0 != munmap(buf, size)) - DB::throwFromErrno("Allocator: Cannot munmap " + formatReadableSizeWithBinarySuffix(size) + ".", DB::ErrorCodes::CANNOT_MUNMAP); + DB::throwFromErrno(fmt::format("Allocator: Cannot munmap {}.", ReadableSize(size)), DB::ErrorCodes::CANNOT_MUNMAP); } else { diff --git a/src/Common/ArrayCache.h b/src/Common/ArrayCache.h index 304ab8e03c3..5e3af09a2fd 100644 --- a/src/Common/ArrayCache.h +++ b/src/Common/ArrayCache.h @@ -177,13 +177,13 @@ private: { ptr = mmap(address_hint, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (MAP_FAILED == ptr) - DB::throwFromErrno("Allocator: Cannot mmap " + formatReadableSizeWithBinarySuffix(size) + ".", DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY); + DB::throwFromErrno(fmt::format("Allocator: Cannot mmap {}.", ReadableSize(size)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY); } ~Chunk() { if (ptr && 0 != munmap(ptr, size)) - DB::throwFromErrno("Allocator: Cannot munmap " + formatReadableSizeWithBinarySuffix(size) + ".", DB::ErrorCodes::CANNOT_MUNMAP); + DB::throwFromErrno(fmt::format("Allocator: Cannot munmap {}.", ReadableSize(size)), DB::ErrorCodes::CANNOT_MUNMAP); } Chunk(Chunk && other) : ptr(other.ptr), size(other.size) diff --git a/src/Common/AutoArray.h b/src/Common/AutoArray.h index 92e8d45b07b..a39706baa3d 100644 --- a/src/Common/AutoArray.h +++ b/src/Common/AutoArray.h @@ -278,7 +278,7 @@ private: void * new_data = nullptr; int res = posix_memalign(&new_data, alignment, prefix_size + new_size * sizeof(T)); if (0 != res) - throwFromErrno("Cannot allocate memory (posix_memalign) " + formatReadableSizeWithBinarySuffix(new_size) + ".", + throwFromErrno(fmt::format("Cannot allocate memory (posix_memalign) {}.", ReadableSize(new_size)), ErrorCodes::CANNOT_ALLOCATE_MEMORY, res); data_ptr = static_cast(new_data); diff --git a/src/Common/Config/ConfigProcessor.cpp b/src/Common/Config/ConfigProcessor.cpp index 7eb7e13a93e..bf65b7028cc 100644 --- a/src/Common/Config/ConfigProcessor.cpp +++ b/src/Common/Config/ConfigProcessor.cpp @@ -66,21 +66,21 @@ ConfigProcessor::ConfigProcessor( , name_pool(new Poco::XML::NamePool(65521)) , dom_parser(name_pool) { - if (log_to_console && !Logger::has("ConfigProcessor")) + if (log_to_console && !Poco::Logger::has("ConfigProcessor")) { channel_ptr = new Poco::ConsoleChannel; - log = &Logger::create("ConfigProcessor", channel_ptr.get(), Poco::Message::PRIO_TRACE); + log = &Poco::Logger::create("ConfigProcessor", channel_ptr.get(), Poco::Message::PRIO_TRACE); } else { - log = &Logger::get("ConfigProcessor"); + log = &Poco::Logger::get("ConfigProcessor"); } } ConfigProcessor::~ConfigProcessor() { if (channel_ptr) /// This means we have created a new console logger in the constructor. - Logger::destroy("ConfigProcessor"); + Poco::Logger::destroy("ConfigProcessor"); } diff --git a/src/Common/Config/ConfigProcessor.h b/src/Common/Config/ConfigProcessor.h index b6f772f8c16..5a942e73bdb 100644 --- a/src/Common/Config/ConfigProcessor.h +++ b/src/Common/Config/ConfigProcessor.h @@ -116,7 +116,7 @@ private: bool throw_on_bad_incl; - Logger * log; + Poco::Logger * log; Poco::AutoPtr channel_ptr; Substitutions substitutions; diff --git a/src/Common/Config/ConfigReloader.h b/src/Common/Config/ConfigReloader.h index c0904422b39..553589fbd37 100644 --- a/src/Common/Config/ConfigReloader.h +++ b/src/Common/Config/ConfigReloader.h @@ -69,7 +69,7 @@ private: static constexpr auto reload_interval = std::chrono::seconds(2); - Poco::Logger * log = &Logger::get("ConfigReloader"); + Poco::Logger * log = &Poco::Logger::get("ConfigReloader"); std::string path; std::string include_from_path; diff --git a/src/Common/DNSResolver.cpp b/src/Common/DNSResolver.cpp index 1d2d89f477c..c5b4fd0c585 100644 --- a/src/Common/DNSResolver.cpp +++ b/src/Common/DNSResolver.cpp @@ -202,7 +202,7 @@ bool DNSResolver::updateCache() } if (!lost_hosts.empty()) - LOG_INFO(&Logger::get("DNSResolver"), "Cached hosts not found: {}", lost_hosts); + LOG_INFO(&Poco::Logger::get("DNSResolver"), "Cached hosts not found: {}", lost_hosts); return updated; } diff --git a/src/Common/Exception.cpp b/src/Common/Exception.cpp index bac30c802c1..b0c897127c6 100644 --- a/src/Common/Exception.cpp +++ b/src/Common/Exception.cpp @@ -122,7 +122,7 @@ void throwFromErrnoWithPath(const std::string & s, const std::string & path, int void tryLogCurrentException(const char * log_name, const std::string & start_of_message) { - tryLogCurrentException(&Logger::get(log_name), start_of_message); + tryLogCurrentException(&Poco::Logger::get(log_name), start_of_message); } void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_message) @@ -148,13 +148,16 @@ static void getNoSpaceLeftInfoMessage(std::filesystem::path path, std::string & path = path.parent_path(); auto fs = getStatVFS(path); - msg += "\nTotal space: " + formatReadableSizeWithBinarySuffix(fs.f_blocks * fs.f_bsize) - + "\nAvailable space: " + formatReadableSizeWithBinarySuffix(fs.f_bavail * fs.f_bsize) - + "\nTotal inodes: " + formatReadableQuantity(fs.f_files) - + "\nAvailable inodes: " + formatReadableQuantity(fs.f_favail); - auto mount_point = getMountPoint(path).string(); - msg += "\nMount point: " + mount_point; + + fmt::format_to(std::back_inserter(msg), + "\nTotal space: {}\nAvailable space: {}\nTotal inodes: {}\nAvailable inodes: {}\nMount point: {}", + ReadableSize(fs.f_blocks * fs.f_bsize), + ReadableSize(fs.f_bavail * fs.f_bsize), + formatReadableQuantity(fs.f_files), + formatReadableQuantity(fs.f_favail), + mount_point); + #if defined(__linux__) msg += "\nFilesystem: " + getFilesystemName(mount_point); #endif diff --git a/src/Common/FileChecker.h b/src/Common/FileChecker.h index c4a919e9ec1..83db397e78c 100644 --- a/src/Common/FileChecker.h +++ b/src/Common/FileChecker.h @@ -37,7 +37,7 @@ private: Map map; bool initialized = false; - Logger * log = &Logger::get("FileChecker"); + Poco::Logger * log = &Poco::Logger::get("FileChecker"); }; } diff --git a/src/Common/LRUCache.h b/src/Common/LRUCache.h index 5bcfc8fc2db..76aa9705a91 100644 --- a/src/Common/LRUCache.h +++ b/src/Common/LRUCache.h @@ -306,7 +306,7 @@ private: auto it = cells.find(key); if (it == cells.end()) { - LOG_ERROR(&Logger::get("LRUCache"), "LRUCache became inconsistent. There must be a bug in it."); + LOG_ERROR(&Poco::Logger::get("LRUCache"), "LRUCache became inconsistent. There must be a bug in it."); abort(); } @@ -324,7 +324,7 @@ private: if (current_size > (1ull << 63)) { - LOG_ERROR(&Logger::get("LRUCache"), "LRUCache became inconsistent. There must be a bug in it."); + LOG_ERROR(&Poco::Logger::get("LRUCache"), "LRUCache became inconsistent. There must be a bug in it."); abort(); } } diff --git a/src/Common/MemoryTracker.cpp b/src/Common/MemoryTracker.cpp index 16d166c191b..03bd8be94f3 100644 --- a/src/Common/MemoryTracker.cpp +++ b/src/Common/MemoryTracker.cpp @@ -50,13 +50,13 @@ MemoryTracker::~MemoryTracker() void MemoryTracker::logPeakMemoryUsage() const { const auto * description = description_ptr.load(std::memory_order_relaxed); - LOG_DEBUG(&Logger::get("MemoryTracker"), "Peak memory usage{}: {}.", (description ? " " + std::string(description) : ""), formatReadableSizeWithBinarySuffix(peak)); + LOG_DEBUG(&Poco::Logger::get("MemoryTracker"), "Peak memory usage{}: {}.", (description ? " " + std::string(description) : ""), ReadableSize(peak)); } void MemoryTracker::logMemoryUsage(Int64 current) const { const auto * description = description_ptr.load(std::memory_order_relaxed); - LOG_DEBUG(&Logger::get("MemoryTracker"), "Current memory usage{}: {}.", (description ? " " + std::string(description) : ""), formatReadableSizeWithBinarySuffix(current)); + LOG_DEBUG(&Poco::Logger::get("MemoryTracker"), "Current memory usage{}: {}.", (description ? " " + std::string(description) : ""), ReadableSize(current)); } diff --git a/src/Common/PipeFDs.cpp b/src/Common/PipeFDs.cpp index cec8cb1cf29..1f57234534f 100644 --- a/src/Common/PipeFDs.cpp +++ b/src/Common/PipeFDs.cpp @@ -102,7 +102,7 @@ void LazyPipeFDs::tryIncreaseSize(int desired_size) if (-1 == fcntl(fds_rw[1], F_SETPIPE_SZ, pipe_size * 2) && errno != EPERM) throwFromErrno("Cannot increase pipe capacity to " + std::to_string(pipe_size * 2), ErrorCodes::CANNOT_FCNTL); - LOG_TRACE(log, "Pipe capacity is {}", formatReadableSizeWithBinarySuffix(std::min(pipe_size, desired_size))); + LOG_TRACE(log, "Pipe capacity is {}", ReadableSize(std::min(pipe_size, desired_size))); } #else (void)desired_size; diff --git a/src/Common/PoolBase.h b/src/Common/PoolBase.h index 201f9ec1d00..43f4fbff9fe 100644 --- a/src/Common/PoolBase.h +++ b/src/Common/PoolBase.h @@ -152,9 +152,9 @@ private: protected: - Logger * log; + Poco::Logger * log; - PoolBase(unsigned max_items_, Logger * log_) + PoolBase(unsigned max_items_, Poco::Logger * log_) : max_items(max_items_), log(log_) { items.reserve(max_items); diff --git a/src/Common/PoolWithFailoverBase.h b/src/Common/PoolWithFailoverBase.h index e9f3d7d5d23..4d9e52364b9 100644 --- a/src/Common/PoolWithFailoverBase.h +++ b/src/Common/PoolWithFailoverBase.h @@ -57,7 +57,7 @@ public: NestedPools nested_pools_, time_t decrease_error_period_, size_t max_error_cap_, - Logger * log_) + Poco::Logger * log_) : nested_pools(std::move(nested_pools_)) , decrease_error_period(decrease_error_period_) , max_error_cap(max_error_cap_) @@ -134,7 +134,7 @@ protected: /// The time when error counts were last decreased. time_t last_error_decrease_time = 0; - Logger * log; + Poco::Logger * log; }; template diff --git a/src/Common/QueryProfiler.cpp b/src/Common/QueryProfiler.cpp index cd68edd3ba1..a8b7d51a260 100644 --- a/src/Common/QueryProfiler.cpp +++ b/src/Common/QueryProfiler.cpp @@ -79,7 +79,7 @@ namespace ErrorCodes template QueryProfilerBase::QueryProfilerBase(const UInt64 thread_id, const int clock_type, UInt32 period, const int pause_signal_) - : log(&Logger::get("QueryProfiler")) + : log(&Poco::Logger::get("QueryProfiler")) , pause_signal(pause_signal_) { #if USE_UNWIND diff --git a/src/Common/SensitiveDataMasker.cpp b/src/Common/SensitiveDataMasker.cpp index 0d8e155d44a..d3d9991bc90 100644 --- a/src/Common/SensitiveDataMasker.cpp +++ b/src/Common/SensitiveDataMasker.cpp @@ -102,7 +102,7 @@ SensitiveDataMasker::SensitiveDataMasker(const Poco::Util::AbstractConfiguration { Poco::Util::AbstractConfiguration::Keys keys; config.keys(config_prefix, keys); - Logger * logger = &Logger::get("SensitiveDataMaskerConfigRead"); + Poco::Logger * logger = &Poco::Logger::get("SensitiveDataMaskerConfigRead"); std::set used_names; diff --git a/src/Common/StatusFile.cpp b/src/Common/StatusFile.cpp index 79e34d244bc..758f500e9d2 100644 --- a/src/Common/StatusFile.cpp +++ b/src/Common/StatusFile.cpp @@ -43,9 +43,9 @@ StatusFile::StatusFile(const std::string & path_) } if (!contents.empty()) - LOG_INFO(&Logger::get("StatusFile"), "Status file {} already exists - unclean restart. Contents:\n{}", path, contents); + LOG_INFO(&Poco::Logger::get("StatusFile"), "Status file {} already exists - unclean restart. Contents:\n{}", path, contents); else - LOG_INFO(&Logger::get("StatusFile"), "Status file {} already exists and is empty - probably unclean hardware restart.", path); + LOG_INFO(&Poco::Logger::get("StatusFile"), "Status file {} already exists and is empty - probably unclean hardware restart.", path); } fd = ::open(path.c_str(), O_WRONLY | O_CREAT | O_CLOEXEC, 0666); @@ -90,10 +90,10 @@ StatusFile::StatusFile(const std::string & path_) StatusFile::~StatusFile() { if (0 != close(fd)) - LOG_ERROR(&Logger::get("StatusFile"), "Cannot close file {}, {}", path, errnoToString(ErrorCodes::CANNOT_CLOSE_FILE)); + LOG_ERROR(&Poco::Logger::get("StatusFile"), "Cannot close file {}, {}", path, errnoToString(ErrorCodes::CANNOT_CLOSE_FILE)); if (0 != unlink(path.c_str())) - LOG_ERROR(&Logger::get("StatusFile"), "Cannot unlink file {}, {}", path, errnoToString(ErrorCodes::CANNOT_CLOSE_FILE)); + LOG_ERROR(&Poco::Logger::get("StatusFile"), "Cannot unlink file {}, {}", path, errnoToString(ErrorCodes::CANNOT_CLOSE_FILE)); } } diff --git a/src/Common/ZooKeeper/Increment.h b/src/Common/ZooKeeper/Increment.h index 26128480635..fa5f550ca9b 100644 --- a/src/Common/ZooKeeper/Increment.h +++ b/src/Common/ZooKeeper/Increment.h @@ -43,7 +43,7 @@ public: private: zkutil::ZooKeeperHolderPtr zookeeper_holder; std::string path; - Logger * log = &Logger::get("zkutil::Increment"); + Poco::Logger * log = &Poco::Logger::get("zkutil::Increment"); }; } diff --git a/src/Common/ZooKeeper/LeaderElection.h b/src/Common/ZooKeeper/LeaderElection.h index fe10a3bf00e..e3b97e7f8ca 100644 --- a/src/Common/ZooKeeper/LeaderElection.h +++ b/src/Common/ZooKeeper/LeaderElection.h @@ -39,7 +39,7 @@ public: LeaderElection(DB::BackgroundSchedulePool & pool_, const std::string & path_, ZooKeeper & zookeeper_, LeadershipHandler handler_, const std::string & identifier_ = "") : pool(pool_), path(path_), zookeeper(zookeeper_), handler(handler_), identifier(identifier_) , log_name("LeaderElection (" + path + ")") - , log(&Logger::get(log_name)) + , log(&Poco::Logger::get(log_name)) { task = pool.createTask(log_name, [this] { threadFunction(); }); createNode(); @@ -67,7 +67,7 @@ private: LeadershipHandler handler; std::string identifier; std::string log_name; - Logger * log; + Poco::Logger * log; EphemeralNodeHolderPtr node; std::string node_name; diff --git a/src/Common/ZooKeeper/Lock.h b/src/Common/ZooKeeper/Lock.h index 683470cf5a5..67116124b4a 100644 --- a/src/Common/ZooKeeper/Lock.h +++ b/src/Common/ZooKeeper/Lock.h @@ -21,7 +21,7 @@ namespace zkutil zookeeper_holder(zookeeper_holder_), lock_path(lock_prefix_ + "/" + lock_name_), lock_message(lock_message_), - log(&Logger::get("zkutil::Lock")) + log(&Poco::Logger::get("zkutil::Lock")) { auto zookeeper = zookeeper_holder->getZooKeeper(); if (create_parent_path_) @@ -72,7 +72,7 @@ namespace zkutil std::string lock_path; std::string lock_message; - Logger * log; + Poco::Logger * log; }; } diff --git a/src/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp index 6e0cc22a952..115518e2bf9 100644 --- a/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/src/Common/ZooKeeper/ZooKeeper.cpp @@ -48,7 +48,7 @@ static void check(int32_t code, const std::string & path) void ZooKeeper::init(const std::string & implementation_, const std::string & hosts_, const std::string & identity_, int32_t session_timeout_ms_, int32_t operation_timeout_ms_, const std::string & chroot_) { - log = &Logger::get("ZooKeeper"); + log = &Poco::Logger::get("ZooKeeper"); hosts = hosts_; identity = identity_; session_timeout_ms = session_timeout_ms_; diff --git a/src/Common/ZooKeeper/ZooKeeper.h b/src/Common/ZooKeeper/ZooKeeper.h index e8ab06c2182..3bf9ad3c100 100644 --- a/src/Common/ZooKeeper/ZooKeeper.h +++ b/src/Common/ZooKeeper/ZooKeeper.h @@ -269,7 +269,7 @@ private: std::mutex mutex; - Logger * log = nullptr; + Poco::Logger * log = nullptr; }; diff --git a/src/Common/ZooKeeper/ZooKeeperHolder.h b/src/Common/ZooKeeper/ZooKeeperHolder.h index d5792b8fde6..74a0a7fbc34 100644 --- a/src/Common/ZooKeeper/ZooKeeperHolder.h +++ b/src/Common/ZooKeeper/ZooKeeperHolder.h @@ -70,7 +70,7 @@ private: mutable std::mutex mutex; ZooKeeper::Ptr ptr; - Logger * log = &Logger::get("ZooKeeperHolder"); + Poco::Logger * log = &Poco::Logger::get("ZooKeeperHolder"); static std::string nullptr_exception_message; }; diff --git a/src/Common/ZooKeeper/tests/zkutil_expiration_test.cpp b/src/Common/ZooKeeper/tests/zkutil_expiration_test.cpp index 1c74985d5f9..d245428db8e 100644 --- a/src/Common/ZooKeeper/tests/zkutil_expiration_test.cpp +++ b/src/Common/ZooKeeper/tests/zkutil_expiration_test.cpp @@ -20,8 +20,8 @@ int main(int argc, char ** argv) } Poco::AutoPtr channel = new Poco::ConsoleChannel(std::cerr); - Logger::root().setChannel(channel); - Logger::root().setLevel("trace"); + Poco::Logger::root().setChannel(channel); + Poco::Logger::root().setLevel("trace"); zkutil::ZooKeeper zk(argv[1]); std::string unused; diff --git a/src/Common/formatReadable.h b/src/Common/formatReadable.h index 87d24b75c20..1c46bd8af09 100644 --- a/src/Common/formatReadable.h +++ b/src/Common/formatReadable.h @@ -1,6 +1,8 @@ #pragma once #include +#include + namespace DB { @@ -20,3 +22,35 @@ std::string formatReadableSizeWithDecimalSuffix(double value, int precision = 2) /// Prints the number as 123.45 billion. void formatReadableQuantity(double value, DB::WriteBuffer & out, int precision = 2); std::string formatReadableQuantity(double value, int precision = 2); + + +/// Wrapper around value. If used with fmt library (e.g. for log messages), +/// value is automatically formatted as size with binary suffix. +struct ReadableSize +{ + double value; + explicit ReadableSize(double value_) : value(value_) {} +}; + +/// See https://fmt.dev/latest/api.html#formatting-user-defined-types +template <> +struct fmt::formatter +{ + constexpr auto parse(format_parse_context & ctx) + { + auto it = ctx.begin(); + auto end = ctx.end(); + + /// Only support {}. + if (it != end && *it != '}') + throw format_error("invalid format"); + + return it; + } + + template + auto format(const ReadableSize & size, FormatContext & ctx) + { + return format_to(ctx.out(), "{}", formatReadableSizeWithBinarySuffix(size.value)); + } +}; diff --git a/src/Common/tests/gtest_log.cpp b/src/Common/tests/gtest_log.cpp index a1e532f92e6..9f4ef41f642 100644 --- a/src/Common/tests/gtest_log.cpp +++ b/src/Common/tests/gtest_log.cpp @@ -12,7 +12,7 @@ TEST(Logger, Log) { Poco::Logger::root().setLevel("none"); Poco::Logger::root().setChannel(Poco::AutoPtr(new Poco::NullChannel())); - Logger * log = &Logger::get("Log"); + Poco::Logger * log = &Poco::Logger::get("Log"); /// This test checks that we don't pass this string to fmtlib, because it is the only argument. EXPECT_NO_THROW(LOG_INFO(log, "Hello {} World")); diff --git a/src/Core/BackgroundSchedulePool.cpp b/src/Core/BackgroundSchedulePool.cpp index cc9ee23c9b8..a72838c92e9 100644 --- a/src/Core/BackgroundSchedulePool.cpp +++ b/src/Core/BackgroundSchedulePool.cpp @@ -111,7 +111,7 @@ void BackgroundSchedulePoolTaskInfo::execute() static const int32_t slow_execution_threshold_ms = 200; if (milliseconds >= slow_execution_threshold_ms) - LOG_TRACE(&Logger::get(log_name), "Execution took {} ms.", milliseconds); + LOG_TRACE(&Poco::Logger::get(log_name), "Execution took {} ms.", milliseconds); { std::lock_guard lock_schedule(schedule_mutex); @@ -156,7 +156,7 @@ BackgroundSchedulePool::BackgroundSchedulePool(size_t size_, CurrentMetrics::Met , memory_metric(memory_metric_) , thread_name(thread_name_) { - LOG_INFO(&Logger::get("BackgroundSchedulePool/" + thread_name), "Create BackgroundSchedulePool with {} threads", size); + LOG_INFO(&Poco::Logger::get("BackgroundSchedulePool/" + thread_name), "Create BackgroundSchedulePool with {} threads", size); threads.resize(size); for (auto & thread : threads) @@ -179,7 +179,7 @@ BackgroundSchedulePool::~BackgroundSchedulePool() queue.wakeUpAll(); delayed_thread.join(); - LOG_TRACE(&Logger::get("BackgroundSchedulePool/" + thread_name), "Waiting for threads to finish."); + LOG_TRACE(&Poco::Logger::get("BackgroundSchedulePool/" + thread_name), "Waiting for threads to finish."); for (auto & thread : threads) thread.join(); } diff --git a/src/Core/MySQLProtocol.h b/src/Core/MySQLProtocol.h index 2fd15f78c05..ee53276ee1b 100644 --- a/src/Core/MySQLProtocol.h +++ b/src/Core/MySQLProtocol.h @@ -994,7 +994,7 @@ private: class Sha256Password : public IPlugin { public: - Sha256Password(RSA & public_key_, RSA & private_key_, Logger * log_) + Sha256Password(RSA & public_key_, RSA & private_key_, Poco::Logger * log_) : public_key(public_key_) , private_key(private_key_) , log(log_) @@ -1130,7 +1130,7 @@ public: private: RSA & public_key; RSA & private_key; - Logger * log; + Poco::Logger * log; String scramble; }; #endif diff --git a/src/Core/SettingsCollection.cpp b/src/Core/SettingsCollection.cpp index 3b291406df9..324ad889a65 100644 --- a/src/Core/SettingsCollection.cpp +++ b/src/Core/SettingsCollection.cpp @@ -598,7 +598,7 @@ namespace details void SettingsCollectionUtils::warningNameNotFound(const StringRef & name) { - static auto * log = &Logger::get("Settings"); + static auto * log = &Poco::Logger::get("Settings"); LOG_WARNING(log, "Unknown setting {}, skipping", name); } diff --git a/src/DataStreams/AggregatingBlockInputStream.cpp b/src/DataStreams/AggregatingBlockInputStream.cpp index 7c891271468..150f794ca59 100644 --- a/src/DataStreams/AggregatingBlockInputStream.cpp +++ b/src/DataStreams/AggregatingBlockInputStream.cpp @@ -60,7 +60,7 @@ Block AggregatingBlockInputStream::readImpl() input_streams.emplace_back(temporary_inputs.back()->block_in); } - LOG_TRACE(log, "Will merge {} temporary files of size {} compressed, {} uncompressed.", files.files.size(), formatReadableSizeWithBinarySuffix(files.sum_size_compressed), formatReadableSizeWithBinarySuffix(files.sum_size_uncompressed)); + LOG_TRACE(log, "Will merge {} temporary files of size {} compressed, {} uncompressed.", files.files.size(), ReadableSize(files.sum_size_compressed), ReadableSize(files.sum_size_uncompressed)); impl = std::make_unique(input_streams, params, final, 1, 1); } diff --git a/src/DataStreams/AggregatingBlockInputStream.h b/src/DataStreams/AggregatingBlockInputStream.h index 5e993949b63..009a9704e4e 100644 --- a/src/DataStreams/AggregatingBlockInputStream.h +++ b/src/DataStreams/AggregatingBlockInputStream.h @@ -47,7 +47,7 @@ protected: /** From here we will get the completed blocks after the aggregation. */ std::unique_ptr impl; - Logger * log = &Logger::get("AggregatingBlockInputStream"); + Poco::Logger * log = &Poco::Logger::get("AggregatingBlockInputStream"); }; } diff --git a/src/DataStreams/CollapsingFinalBlockInputStream.h b/src/DataStreams/CollapsingFinalBlockInputStream.h index c237d5465c3..d090c53ddf9 100644 --- a/src/DataStreams/CollapsingFinalBlockInputStream.h +++ b/src/DataStreams/CollapsingFinalBlockInputStream.h @@ -168,7 +168,7 @@ private: const SortDescription description; String sign_column_name; - Logger * log = &Logger::get("CollapsingFinalBlockInputStream"); + Poco::Logger * log = &Poco::Logger::get("CollapsingFinalBlockInputStream"); bool first = true; diff --git a/src/DataStreams/ColumnGathererStream.cpp b/src/DataStreams/ColumnGathererStream.cpp index b98ee96d26a..7a6dc7d88f0 100644 --- a/src/DataStreams/ColumnGathererStream.cpp +++ b/src/DataStreams/ColumnGathererStream.cpp @@ -21,7 +21,7 @@ ColumnGathererStream::ColumnGathererStream( const String & column_name_, const BlockInputStreams & source_streams, ReadBuffer & row_sources_buf_, size_t block_preferred_size_) : column_name(column_name_), sources(source_streams.size()), row_sources_buf(row_sources_buf_) - , block_preferred_size(block_preferred_size_), log(&Logger::get("ColumnGathererStream")) + , block_preferred_size(block_preferred_size_), log(&Poco::Logger::get("ColumnGathererStream")) { if (source_streams.empty()) throw Exception("There are no streams to gather", ErrorCodes::EMPTY_DATA_PASSED); @@ -105,7 +105,7 @@ void ColumnGathererStream::readSuffixImpl() else LOG_DEBUG(log, "Gathered column {} ({} bytes/elem.) in {} sec., {} rows/sec., {}/sec.", column_name, static_cast(profile_info.bytes) / profile_info.rows, seconds, - profile_info.rows / seconds, formatReadableSizeWithBinarySuffix(profile_info.bytes / seconds)); + profile_info.rows / seconds, ReadableSize(profile_info.bytes / seconds)); } } diff --git a/src/DataStreams/CreatingSetsBlockInputStream.h b/src/DataStreams/CreatingSetsBlockInputStream.h index 4a4ffef29d6..178fa3f289f 100644 --- a/src/DataStreams/CreatingSetsBlockInputStream.h +++ b/src/DataStreams/CreatingSetsBlockInputStream.h @@ -44,7 +44,7 @@ private: size_t bytes_to_transfer = 0; using Logger = Poco::Logger; - Logger * log = &Logger::get("CreatingSetsBlockInputStream"); + Poco::Logger * log = &Poco::Logger::get("CreatingSetsBlockInputStream"); void createAll(); void createOne(SubqueryForSet & subquery); diff --git a/src/DataStreams/MergeSortingBlockInputStream.cpp b/src/DataStreams/MergeSortingBlockInputStream.cpp index 587f21d2467..5e1fbe599b7 100644 --- a/src/DataStreams/MergeSortingBlockInputStream.cpp +++ b/src/DataStreams/MergeSortingBlockInputStream.cpp @@ -264,7 +264,7 @@ void MergeSortingBlockInputStream::remerge() } merger.readSuffix(); - LOG_DEBUG(log, "Memory usage is lowered from {} to {}", formatReadableSizeWithBinarySuffix(sum_bytes_in_blocks), formatReadableSizeWithBinarySuffix(new_sum_bytes_in_blocks)); + LOG_DEBUG(log, "Memory usage is lowered from {} to {}", ReadableSize(sum_bytes_in_blocks), ReadableSize(new_sum_bytes_in_blocks)); /// If the memory consumption was not lowered enough - we will not perform remerge anymore. 2 is a guess. if (new_sum_bytes_in_blocks * 2 > sum_bytes_in_blocks) diff --git a/src/DataStreams/MergeSortingBlockInputStream.h b/src/DataStreams/MergeSortingBlockInputStream.h index ee03f202be0..c21c548ff24 100644 --- a/src/DataStreams/MergeSortingBlockInputStream.h +++ b/src/DataStreams/MergeSortingBlockInputStream.h @@ -104,7 +104,7 @@ private: String codec; size_t min_free_disk_space; - Logger * log = &Logger::get("MergeSortingBlockInputStream"); + Poco::Logger * log = &Poco::Logger::get("MergeSortingBlockInputStream"); Blocks blocks; size_t sum_rows_in_blocks = 0; diff --git a/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp b/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp index 9f514b6701d..56dcbda0487 100644 --- a/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp +++ b/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp @@ -555,7 +555,7 @@ MergingAggregatedMemoryEfficientBlockInputStream::BlocksToMerge MergingAggregate /// Not yet partitioned (splitted to buckets) block. Will partition it and place result to 'splitted_blocks'. if (input.block.info.bucket_num == -1 && input.block && input.splitted_blocks.empty()) { - LOG_TRACE(&Logger::get("MergingAggregatedMemoryEfficient"), "Having block without bucket: will split."); + LOG_TRACE(&Poco::Logger::get("MergingAggregatedMemoryEfficient"), "Having block without bucket: will split."); input.splitted_blocks = aggregator.convertBlockToTwoLevel(input.block); input.block = Block(); diff --git a/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h b/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h index 4a6eccfd2a4..f071ac42ee5 100644 --- a/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h +++ b/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h @@ -96,7 +96,7 @@ private: std::atomic has_overflows {false}; int current_bucket_num = -1; - Logger * log = &Logger::get("MergingAggregatedMemoryEfficientBlockInputStream"); + Poco::Logger * log = &Poco::Logger::get("MergingAggregatedMemoryEfficientBlockInputStream"); struct Input diff --git a/src/DataStreams/MergingSortedBlockInputStream.cpp b/src/DataStreams/MergingSortedBlockInputStream.cpp index 877213294ec..434c89803b8 100644 --- a/src/DataStreams/MergingSortedBlockInputStream.cpp +++ b/src/DataStreams/MergingSortedBlockInputStream.cpp @@ -23,7 +23,7 @@ MergingSortedBlockInputStream::MergingSortedBlockInputStream( : description(std::move(description_)), max_block_size(max_block_size_), limit(limit_), quiet(quiet_) , source_blocks(inputs_.size()) , cursors(inputs_.size()), out_row_sources_buf(out_row_sources_buf_) - , log(&Logger::get("MergingSortedBlockInputStream")) + , log(&Poco::Logger::get("MergingSortedBlockInputStream")) { children.insert(children.end(), inputs_.begin(), inputs_.end()); header = children.at(0)->getHeader(); @@ -269,7 +269,7 @@ void MergingSortedBlockInputStream::readSuffixImpl() LOG_DEBUG(log, "Merge sorted {} blocks, {} rows in {} sec., {} rows/sec., {}/sec", profile_info.blocks, profile_info.rows, seconds, profile_info.rows / seconds, - formatReadableSizeWithBinarySuffix(profile_info.bytes / seconds)); + ReadableSize(profile_info.bytes / seconds)); } } diff --git a/src/DataStreams/ParallelAggregatingBlockInputStream.cpp b/src/DataStreams/ParallelAggregatingBlockInputStream.cpp index aeda8ea5be9..611059c1443 100644 --- a/src/DataStreams/ParallelAggregatingBlockInputStream.cpp +++ b/src/DataStreams/ParallelAggregatingBlockInputStream.cpp @@ -82,7 +82,7 @@ Block ParallelAggregatingBlockInputStream::readImpl() input_streams.emplace_back(temporary_inputs.back()->block_in); } - LOG_TRACE(log, "Will merge {} temporary files of size {} compressed, {} uncompressed.", files.files.size(), formatReadableSizeWithBinarySuffix(files.sum_size_compressed), formatReadableSizeWithBinarySuffix(files.sum_size_uncompressed)); + LOG_TRACE(log, "Will merge {} temporary files of size {} compressed, {} uncompressed.", files.files.size(), ReadableSize(files.sum_size_compressed), ReadableSize(files.sum_size_uncompressed)); impl = std::make_unique( input_streams, params, final, temporary_data_merge_threads, temporary_data_merge_threads); @@ -178,16 +178,16 @@ void ParallelAggregatingBlockInputStream::execute() { size_t rows = many_data[i]->size(); LOG_TRACE(log, "Aggregated. {} to {} rows (from {}) in {} sec. ({} rows/sec., {}/sec.)", - threads_data[i].src_rows, rows, formatReadableSizeWithBinarySuffix(threads_data[i].src_bytes), + threads_data[i].src_rows, rows, ReadableSize(threads_data[i].src_bytes), elapsed_seconds, threads_data[i].src_rows / elapsed_seconds, - formatReadableSizeWithBinarySuffix(threads_data[i].src_bytes / elapsed_seconds)); + ReadableSize(threads_data[i].src_bytes / elapsed_seconds)); total_src_rows += threads_data[i].src_rows; total_src_bytes += threads_data[i].src_bytes; } LOG_TRACE(log, "Total aggregated. {} rows (from {}) in {} sec. ({} rows/sec., {}/sec.)", - total_src_rows, formatReadableSizeWithBinarySuffix(total_src_bytes), elapsed_seconds, - total_src_rows / elapsed_seconds, formatReadableSizeWithBinarySuffix(total_src_bytes / elapsed_seconds)); + total_src_rows, ReadableSize(total_src_bytes), elapsed_seconds, + total_src_rows / elapsed_seconds, ReadableSize(total_src_bytes / elapsed_seconds)); /// If there was no data, and we aggregate without keys, we must return single row with the result of empty aggregation. /// To do this, we pass a block with zero rows to aggregate. diff --git a/src/DataStreams/ParallelAggregatingBlockInputStream.h b/src/DataStreams/ParallelAggregatingBlockInputStream.h index 942c906b872..4b0a2e806fa 100644 --- a/src/DataStreams/ParallelAggregatingBlockInputStream.h +++ b/src/DataStreams/ParallelAggregatingBlockInputStream.h @@ -60,7 +60,7 @@ private: std::atomic executed {false}; std::vector> temporary_inputs; - Logger * log = &Logger::get("ParallelAggregatingBlockInputStream"); + Poco::Logger * log = &Poco::Logger::get("ParallelAggregatingBlockInputStream"); ManyAggregatedDataVariants many_data; diff --git a/src/DataStreams/ParallelInputsProcessor.h b/src/DataStreams/ParallelInputsProcessor.h index 714174e6ac1..326506d28ca 100644 --- a/src/DataStreams/ParallelInputsProcessor.h +++ b/src/DataStreams/ParallelInputsProcessor.h @@ -359,7 +359,7 @@ private: /// Wait for the completion of all threads. std::atomic joined_threads { false }; - Logger * log = &Logger::get("ParallelInputsProcessor"); + Poco::Logger * log = &Poco::Logger::get("ParallelInputsProcessor"); }; diff --git a/src/DataStreams/RemoteBlockInputStream.h b/src/DataStreams/RemoteBlockInputStream.h index 66b1ebbb6c3..f6bac4155da 100644 --- a/src/DataStreams/RemoteBlockInputStream.h +++ b/src/DataStreams/RemoteBlockInputStream.h @@ -151,7 +151,7 @@ private: PoolMode pool_mode = PoolMode::GET_MANY; StorageID main_table = StorageID::createEmpty(); - Logger * log = &Logger::get("RemoteBlockInputStream"); + Poco::Logger * log = &Poco::Logger::get("RemoteBlockInputStream"); }; } diff --git a/src/DataStreams/SizeLimits.cpp b/src/DataStreams/SizeLimits.cpp index ed57f66d21a..06dde923e55 100644 --- a/src/DataStreams/SizeLimits.cpp +++ b/src/DataStreams/SizeLimits.cpp @@ -16,8 +16,8 @@ bool SizeLimits::check(UInt64 rows, UInt64 bytes, const char * what, int too_man + ", current rows: " + formatReadableQuantity(rows), too_many_rows_exception_code); if (max_bytes && bytes > max_bytes) - throw Exception("Limit for " + std::string(what) + " exceeded, max bytes: " + formatReadableSizeWithBinarySuffix(max_bytes) - + ", current bytes: " + formatReadableSizeWithBinarySuffix(bytes), too_many_bytes_exception_code); + throw Exception(fmt::format("Limit for {} exceeded, max bytes: {}, current bytes: {}", + std::string(what), ReadableSize(max_bytes), ReadableSize(bytes)), too_many_bytes_exception_code); return true; } diff --git a/src/DataStreams/TTLBlockInputStream.cpp b/src/DataStreams/TTLBlockInputStream.cpp index c6542763533..ca65ae520c6 100644 --- a/src/DataStreams/TTLBlockInputStream.cpp +++ b/src/DataStreams/TTLBlockInputStream.cpp @@ -28,7 +28,7 @@ TTLBlockInputStream::TTLBlockInputStream( , current_time(current_time_) , force(force_) , old_ttl_infos(data_part->ttl_infos) - , log(&Logger::get(storage.getLogName() + " (TTLBlockInputStream)")) + , log(&Poco::Logger::get(storage.getLogName() + " (TTLBlockInputStream)")) , date_lut(DateLUT::instance()) { children.push_back(input_); diff --git a/src/DataStreams/TTLBlockInputStream.h b/src/DataStreams/TTLBlockInputStream.h index c6ffa95cd75..060306f7d2d 100644 --- a/src/DataStreams/TTLBlockInputStream.h +++ b/src/DataStreams/TTLBlockInputStream.h @@ -52,7 +52,7 @@ private: NameSet empty_columns; size_t rows_removed = 0; - Logger * log; + Poco::Logger * log; const DateLUTImpl & date_lut; /// TODO rewrite defaults logic to evaluteMissingDefaults diff --git a/src/DataStreams/UnionBlockInputStream.h b/src/DataStreams/UnionBlockInputStream.h index c4e84e85845..ab667ac2e36 100644 --- a/src/DataStreams/UnionBlockInputStream.h +++ b/src/DataStreams/UnionBlockInputStream.h @@ -253,7 +253,7 @@ private: bool started = false; bool all_read = false; - Logger * log = &Logger::get("UnionBlockInputStream"); + Poco::Logger * log = &Poco::Logger::get("UnionBlockInputStream"); }; } diff --git a/src/DataStreams/tests/collapsing_sorted_stream.cpp b/src/DataStreams/tests/collapsing_sorted_stream.cpp index fd7dc11add6..e6d2167578b 100644 --- a/src/DataStreams/tests/collapsing_sorted_stream.cpp +++ b/src/DataStreams/tests/collapsing_sorted_stream.cpp @@ -20,8 +20,8 @@ try using namespace DB; Poco::AutoPtr channel = new Poco::ConsoleChannel(std::cerr); - Logger::root().setChannel(channel); - Logger::root().setLevel("trace"); + Poco::Logger::root().setChannel(channel); + Poco::Logger::root().setLevel("trace"); Block block1; diff --git a/src/Databases/DatabaseDictionary.cpp b/src/Databases/DatabaseDictionary.cpp index c4c74f8e70c..67d33d7bfd7 100644 --- a/src/Databases/DatabaseDictionary.cpp +++ b/src/Databases/DatabaseDictionary.cpp @@ -32,7 +32,7 @@ namespace DatabaseDictionary::DatabaseDictionary(const String & name_, const Context & global_context_) : IDatabase(name_) - , log(&Logger::get("DatabaseDictionary(" + database_name + ")")) + , log(&Poco::Logger::get("DatabaseDictionary(" + database_name + ")")) , global_context(global_context_.getGlobalContext()) { } diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index 2a2ca1841cf..47c54fae800 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -20,7 +20,7 @@ namespace ErrorCodes } DatabaseWithOwnTablesBase::DatabaseWithOwnTablesBase(const String & name_, const String & logger, const Context & context) - : IDatabase(name_), log(&Logger::get(logger)), global_context(context.getGlobalContext()) + : IDatabase(name_), log(&Poco::Logger::get(logger)), global_context(context.getGlobalContext()) { } diff --git a/src/Dictionaries/CacheDictionary.cpp b/src/Dictionaries/CacheDictionary.cpp index fffeb796c56..99d2445807b 100644 --- a/src/Dictionaries/CacheDictionary.cpp +++ b/src/Dictionaries/CacheDictionary.cpp @@ -85,7 +85,7 @@ CacheDictionary::CacheDictionary( , update_queue_push_timeout_milliseconds(update_queue_push_timeout_milliseconds_) , query_wait_timeout_milliseconds(query_wait_timeout_milliseconds_) , max_threads_for_updates(max_threads_for_updates_) - , log(&Logger::get("ExternalDictionaries")) + , log(&Poco::Logger::get("ExternalDictionaries")) , size{roundUpToPowerOfTwoOrZero(std::max(size_, size_t(max_collision_length)))} , size_overlap_mask{this->size - 1} , cells{this->size} diff --git a/src/Dictionaries/CacheDictionary.h b/src/Dictionaries/CacheDictionary.h index bb103c61107..8109a6d706b 100644 --- a/src/Dictionaries/CacheDictionary.h +++ b/src/Dictionaries/CacheDictionary.h @@ -314,7 +314,7 @@ private: const size_t query_wait_timeout_milliseconds; const size_t max_threads_for_updates; - Logger * const log; + Poco::Logger * log; mutable std::shared_mutex rw_lock; @@ -356,7 +356,7 @@ private: * How the update goes: we basically have a method like get(keys)->values. Values are cached, so sometimes we * can return them from the cache. For values not in cache, we query them from the dictionary, and add to the * cache. The cache is lossy, so we can't expect it to store all the keys, and we store them separately. Normally, - * they would be passed as a return value of get(), but for Unknown Reasons the dictionaries use a baroque + * they would be passed as a return value of get(), but for Unknown Reasons the dictionaries use a baroque * interface where get() accepts two callback, one that it calls for found values, and one for not found. * * Now we make it even uglier by doing this from multiple threads. The missing values are retreived from the diff --git a/src/Dictionaries/Embedded/RegionsHierarchies.cpp b/src/Dictionaries/Embedded/RegionsHierarchies.cpp index 9fb51d72457..15e14db4664 100644 --- a/src/Dictionaries/Embedded/RegionsHierarchies.cpp +++ b/src/Dictionaries/Embedded/RegionsHierarchies.cpp @@ -6,7 +6,7 @@ RegionsHierarchies::RegionsHierarchies(IRegionsHierarchiesDataProviderPtr data_provider) { - Logger * log = &Logger::get("RegionsHierarchies"); + Poco::Logger * log = &Poco::Logger::get("RegionsHierarchies"); LOG_DEBUG(log, "Adding default regions hierarchy"); data.emplace("", data_provider->getDefaultHierarchySource()); diff --git a/src/Dictionaries/Embedded/RegionsHierarchy.cpp b/src/Dictionaries/Embedded/RegionsHierarchy.cpp index dac1e948e0a..115ae30d93e 100644 --- a/src/Dictionaries/Embedded/RegionsHierarchy.cpp +++ b/src/Dictionaries/Embedded/RegionsHierarchy.cpp @@ -23,7 +23,7 @@ RegionsHierarchy::RegionsHierarchy(IRegionsHierarchyDataSourcePtr data_source_) void RegionsHierarchy::reload() { - Logger * log = &Logger::get("RegionsHierarchy"); + Poco::Logger * log = &Poco::Logger::get("RegionsHierarchy"); if (!data_source->isModified()) return; diff --git a/src/Dictionaries/Embedded/RegionsNames.cpp b/src/Dictionaries/Embedded/RegionsNames.cpp index b31debdbc26..30ba8259b3e 100644 --- a/src/Dictionaries/Embedded/RegionsNames.cpp +++ b/src/Dictionaries/Embedded/RegionsNames.cpp @@ -42,7 +42,7 @@ std::string RegionsNames::dumpSupportedLanguagesNames() void RegionsNames::reload() { - Logger * log = &Logger::get("RegionsNames"); + Poco::Logger * log = &Poco::Logger::get("RegionsNames"); LOG_DEBUG(log, "Reloading regions names"); RegionID max_region_id = 0; diff --git a/src/Dictionaries/ExecutableDictionarySource.cpp b/src/Dictionaries/ExecutableDictionarySource.cpp index 0bb8c8543e8..38965e00a84 100644 --- a/src/Dictionaries/ExecutableDictionarySource.cpp +++ b/src/Dictionaries/ExecutableDictionarySource.cpp @@ -53,7 +53,7 @@ ExecutableDictionarySource::ExecutableDictionarySource( const std::string & config_prefix, Block & sample_block_, const Context & context_) - : log(&Logger::get("ExecutableDictionarySource")) + : log(&Poco::Logger::get("ExecutableDictionarySource")) , dict_struct{dict_struct_} , command{config.getString(config_prefix + ".command")} , update_field{config.getString(config_prefix + ".update_field", "")} @@ -64,7 +64,7 @@ ExecutableDictionarySource::ExecutableDictionarySource( } ExecutableDictionarySource::ExecutableDictionarySource(const ExecutableDictionarySource & other) - : log(&Logger::get("ExecutableDictionarySource")) + : log(&Poco::Logger::get("ExecutableDictionarySource")) , update_time{other.update_time} , dict_struct{other.dict_struct} , command{other.command} diff --git a/src/Dictionaries/HTTPDictionarySource.cpp b/src/Dictionaries/HTTPDictionarySource.cpp index 7600253d5b8..95aab78ba2b 100644 --- a/src/Dictionaries/HTTPDictionarySource.cpp +++ b/src/Dictionaries/HTTPDictionarySource.cpp @@ -31,7 +31,7 @@ HTTPDictionarySource::HTTPDictionarySource( Block & sample_block_, const Context & context_, bool check_config) - : log(&Logger::get("HTTPDictionarySource")) + : log(&Poco::Logger::get("HTTPDictionarySource")) , update_time{std::chrono::system_clock::from_time_t(0)} , dict_struct{dict_struct_} , url{config.getString(config_prefix + ".url", "")} @@ -71,7 +71,7 @@ HTTPDictionarySource::HTTPDictionarySource( } HTTPDictionarySource::HTTPDictionarySource(const HTTPDictionarySource & other) - : log(&Logger::get("HTTPDictionarySource")) + : log(&Poco::Logger::get("HTTPDictionarySource")) , update_time{other.update_time} , dict_struct{other.dict_struct} , url{other.url} diff --git a/src/Dictionaries/LibraryDictionarySource.cpp b/src/Dictionaries/LibraryDictionarySource.cpp index 83d081707bd..ba538201910 100644 --- a/src/Dictionaries/LibraryDictionarySource.cpp +++ b/src/Dictionaries/LibraryDictionarySource.cpp @@ -125,7 +125,7 @@ LibraryDictionarySource::LibraryDictionarySource( Block & sample_block_, const Context & context, bool check_config) - : log(&Logger::get("LibraryDictionarySource")) + : log(&Poco::Logger::get("LibraryDictionarySource")) , dict_struct{dict_struct_} , config_prefix{config_prefix_} , path{config.getString(config_prefix + ".path", "")} @@ -157,7 +157,7 @@ LibraryDictionarySource::LibraryDictionarySource( } LibraryDictionarySource::LibraryDictionarySource(const LibraryDictionarySource & other) - : log(&Logger::get("LibraryDictionarySource")) + : log(&Poco::Logger::get("LibraryDictionarySource")) , dict_struct{other.dict_struct} , config_prefix{other.config_prefix} , path{other.path} diff --git a/src/Dictionaries/LibraryDictionarySourceExternal.cpp b/src/Dictionaries/LibraryDictionarySourceExternal.cpp index 03447df339c..2e944056283 100644 --- a/src/Dictionaries/LibraryDictionarySourceExternal.cpp +++ b/src/Dictionaries/LibraryDictionarySourceExternal.cpp @@ -10,7 +10,7 @@ void ClickHouseLibrary::log(ClickHouseLibrary::LogLevel level, ClickHouseLibrary { using ClickHouseLibrary::LogLevel; - auto & logger = Logger::get(DICT_LOGGER_NAME); + auto & logger = Poco::Logger::get(DICT_LOGGER_NAME); switch (level) { case LogLevel::TRACE: diff --git a/src/Dictionaries/MySQLDictionarySource.cpp b/src/Dictionaries/MySQLDictionarySource.cpp index 00f592460a7..505ce7b0c12 100644 --- a/src/Dictionaries/MySQLDictionarySource.cpp +++ b/src/Dictionaries/MySQLDictionarySource.cpp @@ -58,7 +58,7 @@ MySQLDictionarySource::MySQLDictionarySource( const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, const Block & sample_block_) - : log(&Logger::get("MySQLDictionarySource")) + : log(&Poco::Logger::get("MySQLDictionarySource")) , update_time{std::chrono::system_clock::from_time_t(0)} , dict_struct{dict_struct_} , db{config.getString(config_prefix + ".db", "")} @@ -77,7 +77,7 @@ MySQLDictionarySource::MySQLDictionarySource( /// copy-constructor is provided in order to support cloneability MySQLDictionarySource::MySQLDictionarySource(const MySQLDictionarySource & other) - : log(&Logger::get("MySQLDictionarySource")) + : log(&Poco::Logger::get("MySQLDictionarySource")) , update_time{other.update_time} , dict_struct{other.dict_struct} , db{other.db} diff --git a/src/Dictionaries/TrieDictionary.h b/src/Dictionaries/TrieDictionary.h index 59f946ebe71..5f8b5df89bf 100644 --- a/src/Dictionaries/TrieDictionary.h +++ b/src/Dictionaries/TrieDictionary.h @@ -253,7 +253,7 @@ private: std::exception_ptr creation_exception; - Logger * logger; + Poco::Logger * logger; }; } diff --git a/src/Dictionaries/XDBCDictionarySource.cpp b/src/Dictionaries/XDBCDictionarySource.cpp index 03d31fa001f..92af20e646b 100644 --- a/src/Dictionaries/XDBCDictionarySource.cpp +++ b/src/Dictionaries/XDBCDictionarySource.cpp @@ -72,7 +72,7 @@ XDBCDictionarySource::XDBCDictionarySource( const Block & sample_block_, const Context & context_, const BridgeHelperPtr bridge_) - : log(&Logger::get(bridge_->getName() + "DictionarySource")) + : log(&Poco::Logger::get(bridge_->getName() + "DictionarySource")) , update_time{std::chrono::system_clock::from_time_t(0)} , dict_struct{dict_struct_} , db{config_.getString(config_prefix_ + ".db", "")} @@ -96,7 +96,7 @@ XDBCDictionarySource::XDBCDictionarySource( /// copy-constructor is provided in order to support cloneability XDBCDictionarySource::XDBCDictionarySource(const XDBCDictionarySource & other) - : log(&Logger::get(other.bridge_helper->getName() + "DictionarySource")) + : log(&Poco::Logger::get(other.bridge_helper->getName() + "DictionarySource")) , update_time{other.update_time} , dict_struct{other.dict_struct} , db{other.db} diff --git a/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp index cea900d5bad..68f5ee99a7a 100644 --- a/src/Disks/DiskLocal.cpp +++ b/src/Disks/DiskLocal.cpp @@ -91,7 +91,7 @@ bool DiskLocal::tryReserve(UInt64 bytes) std::lock_guard lock(DiskLocal::reservation_mutex); if (bytes == 0) { - LOG_DEBUG(&Logger::get("DiskLocal"), "Reserving 0 bytes on disk {}", backQuote(name)); + LOG_DEBUG(&Poco::Logger::get("DiskLocal"), "Reserving 0 bytes on disk {}", backQuote(name)); ++reservation_count; return true; } @@ -100,8 +100,8 @@ bool DiskLocal::tryReserve(UInt64 bytes) UInt64 unreserved_space = available_space - std::min(available_space, reserved_bytes); if (unreserved_space >= bytes) { - LOG_DEBUG(&Logger::get("DiskLocal"), "Reserving {} on disk {}, having unreserved {}.", - formatReadableSizeWithBinarySuffix(bytes), backQuote(name), formatReadableSizeWithBinarySuffix(unreserved_space)); + LOG_DEBUG(&Poco::Logger::get("DiskLocal"), "Reserving {} on disk {}, having unreserved {}.", + ReadableSize(bytes), backQuote(name), ReadableSize(unreserved_space)); ++reservation_count; reserved_bytes += bytes; return true; @@ -310,7 +310,7 @@ DiskLocalReservation::~DiskLocalReservation() if (disk->reserved_bytes < size) { disk->reserved_bytes = 0; - LOG_ERROR(&Logger::get("DiskLocal"), "Unbalanced reservations size for disk '{}'.", disk->getName()); + LOG_ERROR(&Poco::Logger::get("DiskLocal"), "Unbalanced reservations size for disk '{}'.", disk->getName()); } else { @@ -318,7 +318,7 @@ DiskLocalReservation::~DiskLocalReservation() } if (disk->reservation_count == 0) - LOG_ERROR(&Logger::get("DiskLocal"), "Unbalanced reservation count for disk '{}'.", disk->getName()); + LOG_ERROR(&Poco::Logger::get("DiskLocal"), "Unbalanced reservation count for disk '{}'.", disk->getName()); else --disk->reservation_count; } diff --git a/src/Disks/DiskSelector.cpp b/src/Disks/DiskSelector.cpp index 69549e4520d..0fb728a4f02 100644 --- a/src/Disks/DiskSelector.cpp +++ b/src/Disks/DiskSelector.cpp @@ -99,7 +99,7 @@ DiskSelectorPtr DiskSelector::updateFromConfig( } writeString(" disappeared from configuration, this change will be applied after restart of ClickHouse", warning); - LOG_WARNING(&Logger::get("DiskSelector"), warning.str()); + LOG_WARNING(&Poco::Logger::get("DiskSelector"), warning.str()); } return result; diff --git a/src/Disks/S3/DiskS3.cpp b/src/Disks/S3/DiskS3.cpp index 4b36deb7e98..71b5991f770 100644 --- a/src/Disks/S3/DiskS3.cpp +++ b/src/Disks/S3/DiskS3.cpp @@ -515,7 +515,7 @@ std::unique_ptr DiskS3::readFile(const String & path, si { Metadata metadata(metadata_path, path); - LOG_DEBUG(&Logger::get("DiskS3"), "Read from file by path: {}. Existing S3 objects: {}", + LOG_DEBUG(&Poco::Logger::get("DiskS3"), "Read from file by path: {}. Existing S3 objects: {}", backQuote(metadata_path + path), metadata.s3_objects.size()); return std::make_unique(client, bucket, metadata, buf_size); @@ -536,7 +536,7 @@ std::unique_ptr DiskS3::writeFile(const String & path, /// Save empty metadata to disk to have ability to get file size while buffer is not finalized. metadata.save(); - LOG_DEBUG(&Logger::get("DiskS3"), "Write to file by path: {} New S3 path: {}", backQuote(metadata_path + path), s3_path); + LOG_DEBUG(&Poco::Logger::get("DiskS3"), "Write to file by path: {} New S3 path: {}", backQuote(metadata_path + path), s3_path); return std::make_unique(client, bucket, metadata, s3_path, min_upload_part_size, buf_size); } @@ -544,7 +544,7 @@ std::unique_ptr DiskS3::writeFile(const String & path, { Metadata metadata(metadata_path, path); - LOG_DEBUG(&Logger::get("DiskS3"), "Append to file by path: {}. New S3 path: {}. Existing S3 objects: {}.", + LOG_DEBUG(&Poco::Logger::get("DiskS3"), "Append to file by path: {}. New S3 path: {}. Existing S3 objects: {}.", backQuote(metadata_path + path), s3_path, metadata.s3_objects.size()); return std::make_unique(client, bucket, metadata, s3_path, min_upload_part_size, buf_size); @@ -553,7 +553,7 @@ std::unique_ptr DiskS3::writeFile(const String & path, void DiskS3::remove(const String & path) { - LOG_DEBUG(&Logger::get("DiskS3"), "Remove file by path: {}", backQuote(metadata_path + path)); + LOG_DEBUG(&Poco::Logger::get("DiskS3"), "Remove file by path: {}", backQuote(metadata_path + path)); Poco::File file(metadata_path + path); if (file.isFile()) @@ -607,7 +607,7 @@ bool DiskS3::tryReserve(UInt64 bytes) std::lock_guard lock(reservation_mutex); if (bytes == 0) { - LOG_DEBUG(&Logger::get("DiskS3"), "Reserving 0 bytes on s3 disk {}", backQuote(name)); + LOG_DEBUG(&Poco::Logger::get("DiskS3"), "Reserving 0 bytes on s3 disk {}", backQuote(name)); ++reservation_count; return true; } @@ -616,8 +616,8 @@ bool DiskS3::tryReserve(UInt64 bytes) UInt64 unreserved_space = available_space - std::min(available_space, reserved_bytes); if (unreserved_space >= bytes) { - LOG_DEBUG(&Logger::get("DiskS3"), "Reserving {} on disk {}, having unreserved {}.", - formatReadableSizeWithBinarySuffix(bytes), backQuote(name), formatReadableSizeWithBinarySuffix(unreserved_space)); + LOG_DEBUG(&Poco::Logger::get("DiskS3"), "Reserving {} on disk {}, having unreserved {}.", + ReadableSize(bytes), backQuote(name), ReadableSize(unreserved_space)); ++reservation_count; reserved_bytes += bytes; return true; @@ -672,7 +672,7 @@ DiskS3Reservation::~DiskS3Reservation() if (disk->reserved_bytes < size) { disk->reserved_bytes = 0; - LOG_ERROR(&Logger::get("DiskLocal"), "Unbalanced reservations size for disk '{}'.", disk->getName()); + LOG_ERROR(&Poco::Logger::get("DiskLocal"), "Unbalanced reservations size for disk '{}'.", disk->getName()); } else { @@ -680,7 +680,7 @@ DiskS3Reservation::~DiskS3Reservation() } if (disk->reservation_count == 0) - LOG_ERROR(&Logger::get("DiskLocal"), "Unbalanced reservation count for disk '{}'.", disk->getName()); + LOG_ERROR(&Poco::Logger::get("DiskLocal"), "Unbalanced reservation count for disk '{}'.", disk->getName()); else --disk->reservation_count; } diff --git a/src/Disks/S3/ProxyListConfiguration.cpp b/src/Disks/S3/ProxyListConfiguration.cpp index 430b6412408..318b43a22dc 100644 --- a/src/Disks/S3/ProxyListConfiguration.cpp +++ b/src/Disks/S3/ProxyListConfiguration.cpp @@ -20,7 +20,7 @@ Aws::Client::ClientConfigurationPerRequest ProxyListConfiguration::getConfigurat cfg.proxyHost = proxies[index].getHost(); cfg.proxyPort = proxies[index].getPort(); - LOG_DEBUG(&Logger::get("AWSClient"), "Use proxy: {}", proxies[index].toString()); + LOG_DEBUG(&Poco::Logger::get("AWSClient"), "Use proxy: {}", proxies[index].toString()); return cfg; } diff --git a/src/Disks/S3/ProxyResolverConfiguration.cpp b/src/Disks/S3/ProxyResolverConfiguration.cpp index c0cbe4ac5bf..c11697fda0b 100644 --- a/src/Disks/S3/ProxyResolverConfiguration.cpp +++ b/src/Disks/S3/ProxyResolverConfiguration.cpp @@ -21,7 +21,7 @@ ProxyResolverConfiguration::ProxyResolverConfiguration(const Poco::URI & endpoin Aws::Client::ClientConfigurationPerRequest ProxyResolverConfiguration::getConfiguration(const Aws::Http::HttpRequest &) { - LOG_DEBUG(&Logger::get("AWSClient"), "Obtain proxy using resolver: {}", endpoint.toString()); + LOG_DEBUG(&Poco::Logger::get("AWSClient"), "Obtain proxy using resolver: {}", endpoint.toString()); /// 1 second is enough for now. /// TODO: Make timeouts configurable. @@ -49,7 +49,7 @@ Aws::Client::ClientConfigurationPerRequest ProxyResolverConfiguration::getConfig /// Read proxy host as string from response body. Poco::StreamCopier::copyToString(response_body_stream, proxy_host); - LOG_DEBUG(&Logger::get("AWSClient"), "Use proxy: {}://{}:{}", proxy_scheme, proxy_host, proxy_port); + LOG_DEBUG(&Poco::Logger::get("AWSClient"), "Use proxy: {}://{}:{}", proxy_scheme, proxy_host, proxy_port); cfg.proxyScheme = Aws::Http::SchemeMapper::FromString(proxy_scheme.c_str()); cfg.proxyHost = proxy_host; diff --git a/src/Disks/S3/registerDiskS3.cpp b/src/Disks/S3/registerDiskS3.cpp index 53292bb30bc..999a81bd413 100644 --- a/src/Disks/S3/registerDiskS3.cpp +++ b/src/Disks/S3/registerDiskS3.cpp @@ -46,7 +46,7 @@ namespace throw Exception("Only HTTP/HTTPS schemas allowed in proxy resolver config: " + proxy_scheme, ErrorCodes::BAD_ARGUMENTS); auto proxy_port = proxy_resolver_config.getUInt(prefix + ".proxy_port"); - LOG_DEBUG(&Logger::get("DiskS3"), "Configured proxy resolver: {}, Scheme: {}, Port: {}", endpoint.toString(), proxy_scheme, proxy_port); + LOG_DEBUG(&Poco::Logger::get("DiskS3"), "Configured proxy resolver: {}, Scheme: {}, Port: {}", endpoint.toString(), proxy_scheme, proxy_port); return std::make_shared(endpoint, proxy_scheme, proxy_port); } @@ -70,7 +70,7 @@ namespace proxies.push_back(proxy_uri); - LOG_DEBUG(&Logger::get("DiskS3"), "Configured proxy: {}", proxy_uri.toString()); + LOG_DEBUG(&Poco::Logger::get("DiskS3"), "Configured proxy: {}", proxy_uri.toString()); } if (!proxies.empty()) diff --git a/src/Disks/StoragePolicy.cpp b/src/Disks/StoragePolicy.cpp index 3f01ed38e1c..f684dce0496 100644 --- a/src/Disks/StoragePolicy.cpp +++ b/src/Disks/StoragePolicy.cpp @@ -245,7 +245,7 @@ StoragePolicySelector::StoragePolicySelector( "StoragePolicy name can contain only alphanumeric and '_' (" + name + ")", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG); policies.emplace(name, std::make_shared(name, config, config_prefix + "." + name, disks)); - LOG_INFO(&Logger::get("StoragePolicySelector"), "Storage policy {} loaded", backQuote(name)); + LOG_INFO(&Poco::Logger::get("StoragePolicySelector"), "Storage policy {} loaded", backQuote(name)); } constexpr auto default_storage_policy_name = "default"; diff --git a/src/Disks/VolumeJBOD.cpp b/src/Disks/VolumeJBOD.cpp index b9eb8038cac..7312f3d2365 100644 --- a/src/Disks/VolumeJBOD.cpp +++ b/src/Disks/VolumeJBOD.cpp @@ -20,7 +20,7 @@ VolumeJBOD::VolumeJBOD( DiskSelectorPtr disk_selector ) : IVolume(name_, config, config_prefix, disk_selector) { - Logger * logger = &Logger::get("StorageConfiguration"); + Poco::Logger * logger = &Poco::Logger::get("StorageConfiguration"); auto has_max_bytes = config.has(config_prefix + ".max_data_part_size_bytes"); auto has_max_ratio = config.has(config_prefix + ".max_data_part_size_ratio"); @@ -48,11 +48,11 @@ VolumeJBOD::VolumeJBOD( max_data_part_size = static_cast(sum_size * ratio / disks.size()); for (size_t i = 0; i < disks.size(); ++i) if (sizes[i] < max_data_part_size) - LOG_WARNING(logger, "Disk {} on volume {} have not enough space ({}) for containing part the size of max_data_part_size ({})", backQuote(disks[i]->getName()), backQuote(config_prefix), formatReadableSizeWithBinarySuffix(sizes[i]), formatReadableSizeWithBinarySuffix(max_data_part_size)); + LOG_WARNING(logger, "Disk {} on volume {} have not enough space ({}) for containing part the size of max_data_part_size ({})", backQuote(disks[i]->getName()), backQuote(config_prefix), ReadableSize(sizes[i]), ReadableSize(max_data_part_size)); } static constexpr UInt64 MIN_PART_SIZE = 8u * 1024u * 1024u; if (max_data_part_size != 0 && max_data_part_size < MIN_PART_SIZE) - LOG_WARNING(logger, "Volume {} max_data_part_size is too low ({} < {})", backQuote(name), formatReadableSizeWithBinarySuffix(max_data_part_size), formatReadableSizeWithBinarySuffix(MIN_PART_SIZE)); + LOG_WARNING(logger, "Volume {} max_data_part_size is too low ({} < {})", backQuote(name), ReadableSize(max_data_part_size), ReadableSize(MIN_PART_SIZE)); } DiskPtr VolumeJBOD::getNextDisk() diff --git a/src/IO/HTTPCommon.cpp b/src/IO/HTTPCommon.cpp index 14c97ee56f4..088ca0c246e 100644 --- a/src/IO/HTTPCommon.cpp +++ b/src/IO/HTTPCommon.cpp @@ -173,7 +173,7 @@ namespace auto msg = Poco::AnyCast(session_data); if (!msg.empty()) { - LOG_TRACE((&Logger::get("HTTPCommon")), "Failed communicating with {} with error '{}' will try to reconnect session", host, msg); + LOG_TRACE((&Poco::Logger::get("HTTPCommon")), "Failed communicating with {} with error '{}' will try to reconnect session", host, msg); /// Host can change IP const auto ip = DNSResolver::instance().resolveHost(host).toString(); if (ip != session->getHost()) diff --git a/src/IO/MMapReadBufferFromFileDescriptor.cpp b/src/IO/MMapReadBufferFromFileDescriptor.cpp index 027b95bc022..bbeec8959b4 100644 --- a/src/IO/MMapReadBufferFromFileDescriptor.cpp +++ b/src/IO/MMapReadBufferFromFileDescriptor.cpp @@ -34,7 +34,7 @@ void MMapReadBufferFromFileDescriptor::init(int fd_, size_t offset, size_t lengt { void * buf = mmap(nullptr, length, PROT_READ, MAP_PRIVATE, fd, offset); if (MAP_FAILED == buf) - throwFromErrno("MMapReadBufferFromFileDescriptor: Cannot mmap " + formatReadableSizeWithBinarySuffix(length) + ".", + throwFromErrno(fmt::format("MMapReadBufferFromFileDescriptor: Cannot mmap {}.", ReadableSize(length)), ErrorCodes::CANNOT_ALLOCATE_MEMORY); BufferBase::set(static_cast(buf), length, 0); @@ -84,7 +84,7 @@ MMapReadBufferFromFileDescriptor::~MMapReadBufferFromFileDescriptor() void MMapReadBufferFromFileDescriptor::finish() { if (0 != munmap(internalBuffer().begin(), length)) - throwFromErrno("MMapReadBufferFromFileDescriptor: Cannot munmap " + formatReadableSizeWithBinarySuffix(length) + ".", + throwFromErrno(fmt::format("MMapReadBufferFromFileDescriptor: Cannot munmap {}.", ReadableSize(length)), ErrorCodes::CANNOT_MUNMAP); length = 0; diff --git a/src/IO/ReadBufferFromS3.h b/src/IO/ReadBufferFromS3.h index 15d3b4f0beb..829b73d0af6 100644 --- a/src/IO/ReadBufferFromS3.h +++ b/src/IO/ReadBufferFromS3.h @@ -33,7 +33,7 @@ private: Aws::S3::Model::GetObjectResult read_result; std::unique_ptr impl; - Logger * log = &Logger::get("ReadBufferFromS3"); + Poco::Logger * log = &Poco::Logger::get("ReadBufferFromS3"); public: explicit ReadBufferFromS3( diff --git a/src/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h index 962c663a687..edd0b7f1579 100644 --- a/src/IO/ReadWriteBufferFromHTTP.h +++ b/src/IO/ReadWriteBufferFromHTTP.h @@ -127,7 +127,7 @@ namespace detail if (!credentials.getUsername().empty()) credentials.authenticate(request); - LOG_TRACE((&Logger::get("ReadWriteBufferFromHTTP")), "Sending request to {}", uri.toString()); + LOG_TRACE((&Poco::Logger::get("ReadWriteBufferFromHTTP")), "Sending request to {}", uri.toString()); auto sess = session->getSession(); diff --git a/src/IO/S3Common.cpp b/src/IO/S3Common.cpp index 3c2c5835078..b67a1723aca 100644 --- a/src/IO/S3Common.cpp +++ b/src/IO/S3Common.cpp @@ -16,16 +16,17 @@ namespace { -const std::pair & convertLogLevel(Aws::Utils::Logging::LogLevel log_level) +const std::pair & convertLogLevel(Aws::Utils::Logging::LogLevel log_level) { - static const std::unordered_map> mapping = { - {Aws::Utils::Logging::LogLevel::Off, {LogsLevel::none, Message::PRIO_FATAL}}, - {Aws::Utils::Logging::LogLevel::Fatal, {LogsLevel::error, Message::PRIO_FATAL}}, - {Aws::Utils::Logging::LogLevel::Error, {LogsLevel::error, Message::PRIO_ERROR}}, - {Aws::Utils::Logging::LogLevel::Warn, {LogsLevel::warning, Message::PRIO_WARNING}}, - {Aws::Utils::Logging::LogLevel::Info, {LogsLevel::information, Message::PRIO_INFORMATION}}, - {Aws::Utils::Logging::LogLevel::Debug, {LogsLevel::debug, Message::PRIO_DEBUG}}, - {Aws::Utils::Logging::LogLevel::Trace, {LogsLevel::trace, Message::PRIO_TRACE}}, + static const std::unordered_map> mapping = + { + {Aws::Utils::Logging::LogLevel::Off, {DB::LogsLevel::none, Poco::Message::PRIO_FATAL}}, + {Aws::Utils::Logging::LogLevel::Fatal, {DB::LogsLevel::error, Poco::Message::PRIO_FATAL}}, + {Aws::Utils::Logging::LogLevel::Error, {DB::LogsLevel::error, Poco::Message::PRIO_ERROR}}, + {Aws::Utils::Logging::LogLevel::Warn, {DB::LogsLevel::warning, Poco::Message::PRIO_WARNING}}, + {Aws::Utils::Logging::LogLevel::Info, {DB::LogsLevel::information, Poco::Message::PRIO_INFORMATION}}, + {Aws::Utils::Logging::LogLevel::Debug, {DB::LogsLevel::debug, Poco::Message::PRIO_DEBUG}}, + {Aws::Utils::Logging::LogLevel::Trace, {DB::LogsLevel::trace, Poco::Message::PRIO_TRACE}}, }; return mapping.at(log_level); } diff --git a/src/IO/WriteBufferFromHTTP.cpp b/src/IO/WriteBufferFromHTTP.cpp index 573ac82a1d3..70cbd7484b4 100644 --- a/src/IO/WriteBufferFromHTTP.cpp +++ b/src/IO/WriteBufferFromHTTP.cpp @@ -15,7 +15,7 @@ WriteBufferFromHTTP::WriteBufferFromHTTP( request.setHost(uri.getHost()); request.setChunkedTransferEncoding(true); - LOG_TRACE((&Logger::get("WriteBufferToHTTP")), "Sending request to {}", uri.toString()); + LOG_TRACE((&Poco::Logger::get("WriteBufferToHTTP")), "Sending request to {}", uri.toString()); ostr = &session->sendRequest(request); } diff --git a/src/IO/WriteBufferFromS3.h b/src/IO/WriteBufferFromS3.h index f928941e482..71c176749af 100644 --- a/src/IO/WriteBufferFromS3.h +++ b/src/IO/WriteBufferFromS3.h @@ -36,7 +36,7 @@ private: String upload_id; std::vector part_tags; - Logger * log = &Logger::get("WriteBufferFromS3"); + Poco::Logger * log = &Poco::Logger::get("WriteBufferFromS3"); public: explicit WriteBufferFromS3( diff --git a/src/IO/tests/read_float_perf.cpp b/src/IO/tests/read_float_perf.cpp index 8a870d4a960..aa0fa8ad15d 100644 --- a/src/IO/tests/read_float_perf.cpp +++ b/src/IO/tests/read_float_perf.cpp @@ -56,7 +56,8 @@ void NO_INLINE loop(ReadBuffer & in, WriteBuffer & out) } watch.stop(); - out << "Read in " << watch.elapsedSeconds() << " sec, " << formatReadableSizeWithBinarySuffix(in.count() / watch.elapsedSeconds()) << "/sec, result = " << sum << "\n"; + out << "Read in " << watch.elapsedSeconds() << " sec, " + << formatReadableSizeWithBinarySuffix(in.count() / watch.elapsedSeconds()) << "/sec, result = " << sum << "\n"; } diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 8a3d683add2..96a9b1fc1df 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -768,14 +768,14 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, co " ({} rows/sec., {}/sec. uncompressed, {}/sec. compressed)", elapsed_seconds, rows, - formatReadableSizeWithBinarySuffix(uncompressed_bytes), - formatReadableSizeWithBinarySuffix(compressed_bytes), + ReadableSize(uncompressed_bytes), + ReadableSize(compressed_bytes), uncompressed_bytes / rows, compressed_bytes / rows, uncompressed_bytes / compressed_bytes, rows / elapsed_seconds, - formatReadableSizeWithBinarySuffix(uncompressed_bytes / elapsed_seconds), - formatReadableSizeWithBinarySuffix(compressed_bytes / elapsed_seconds)); + ReadableSize(uncompressed_bytes / elapsed_seconds), + ReadableSize(compressed_bytes / elapsed_seconds)); } void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants) { @@ -871,7 +871,7 @@ void Aggregator::writeToTemporaryFileImpl( /// `data_variants` will not destroy them in the destructor, they are now owned by ColumnAggregateFunction objects. data_variants.aggregator = nullptr; - LOG_TRACE(log, "Max size of temporary block: {} rows, {}.", max_temporary_block_size_rows, formatReadableSizeWithBinarySuffix(max_temporary_block_size_bytes)); + LOG_TRACE(log, "Max size of temporary block: {} rows, {}.", max_temporary_block_size_rows, ReadableSize(max_temporary_block_size_bytes)); } @@ -943,9 +943,9 @@ void Aggregator::execute(const BlockInputStreamPtr & stream, AggregatedDataVaria size_t rows = result.sizeWithoutOverflowRow(); LOG_TRACE(log, "Aggregated. {} to {} rows (from {}) in {} sec. ({} rows/sec., {}/sec.)", - src_rows, rows, formatReadableSizeWithBinarySuffix(src_bytes), + src_rows, rows, ReadableSize(src_bytes), elapsed_seconds, src_rows / elapsed_seconds, - formatReadableSizeWithBinarySuffix(src_bytes / elapsed_seconds)); + ReadableSize(src_bytes / elapsed_seconds)); } @@ -1315,9 +1315,9 @@ BlocksList Aggregator::convertToBlocks(AggregatedDataVariants & data_variants, b double elapsed_seconds = watch.elapsedSeconds(); LOG_TRACE(log, "Converted aggregated data to blocks. {} rows, {} in {} sec. ({} rows/sec., {}/sec.)", - rows, formatReadableSizeWithBinarySuffix(bytes), + rows, ReadableSize(bytes), elapsed_seconds, rows / elapsed_seconds, - formatReadableSizeWithBinarySuffix(bytes / elapsed_seconds)); + ReadableSize(bytes / elapsed_seconds)); return blocks; } @@ -1567,7 +1567,7 @@ public: ~MergingAndConvertingBlockInputStream() override { - LOG_TRACE(&Logger::get(__PRETTY_FUNCTION__), "Waiting for threads to finish"); + LOG_TRACE(&Poco::Logger::get(__PRETTY_FUNCTION__), "Waiting for threads to finish"); /// We need to wait for threads to finish before destructor of 'parallel_merge_data', /// because the threads access 'parallel_merge_data'. @@ -2186,9 +2186,9 @@ Block Aggregator::mergeBlocks(BlocksList & blocks, bool final) size_t bytes = block.bytes(); double elapsed_seconds = watch.elapsedSeconds(); LOG_TRACE(log, "Merged partially aggregated blocks. {} rows, {}. in {} sec. ({} rows/sec., {}/sec.)", - rows, formatReadableSizeWithBinarySuffix(bytes), + rows, ReadableSize(bytes), elapsed_seconds, rows / elapsed_seconds, - formatReadableSizeWithBinarySuffix(bytes / elapsed_seconds)); + ReadableSize(bytes / elapsed_seconds)); if (isCancelled()) return {}; diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 1fa0ff282d6..abc1356787e 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -1048,7 +1048,7 @@ protected: std::mutex mutex; - Logger * log = &Logger::get("Aggregator"); + Poco::Logger * log = &Poco::Logger::get("Aggregator"); /// Returns true if you can abort the current task. CancellationHook isCancelled; diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp index 5460b4b3286..5d41b0e87ce 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp @@ -180,7 +180,7 @@ void SelectStreamFactory::createForShard( ProfileEvents::increment(ProfileEvents::DistributedConnectionMissingTable); if (shard_info.hasRemoteConnections()) { - LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "There is no table {} on local replica of shard {}, will try remote replicas.", main_table.getNameForLogs(), shard_info.shard_num); + LOG_WARNING(&Poco::Logger::get("ClusterProxy::SelectStreamFactory"), "There is no table {} on local replica of shard {}, will try remote replicas.", main_table.getNameForLogs(), shard_info.shard_num); emplace_remote_stream(); } else @@ -216,7 +216,7 @@ void SelectStreamFactory::createForShard( /// If we reached this point, local replica is stale. ProfileEvents::increment(ProfileEvents::DistributedConnectionStaleReplica); - LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "Local replica of shard {} is stale (delay: {}s.)", shard_info.shard_num, local_delay); + LOG_WARNING(&Poco::Logger::get("ClusterProxy::SelectStreamFactory"), "Local replica of shard {} is stale (delay: {}s.)", shard_info.shard_num, local_delay); if (!settings.fallback_to_stale_replicas_for_distributed_queries) { @@ -264,7 +264,7 @@ void SelectStreamFactory::createForShard( catch (const Exception & ex) { if (ex.code() == ErrorCodes::ALL_CONNECTION_TRIES_FAILED) - LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "Connections to remote replicas of local shard {} failed, will use stale local replica", shard_num); + LOG_WARNING(&Poco::Logger::get("ClusterProxy::SelectStreamFactory"), "Connections to remote replicas of local shard {} failed, will use stale local replica", shard_num); else throw; } diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 9c78fe59da1..5e2f4ecadab 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -287,7 +287,7 @@ void NamedSession::release() */ struct ContextShared { - Logger * log = &Logger::get("Context"); + Poco::Logger * log = &Poco::Logger::get("Context"); /// For access of most of shared objects. Recursive mutex. mutable std::recursive_mutex mutex; diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index ee91d903416..dac51b21081 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -220,7 +220,7 @@ static bool isSupportedAlterType(int type) DDLWorker::DDLWorker(const std::string & zk_root_dir, Context & context_, const Poco::Util::AbstractConfiguration * config, const String & prefix) - : context(context_), log(&Logger::get("DDLWorker")) + : context(context_), log(&Poco::Logger::get("DDLWorker")) { queue_dir = zk_root_dir; if (queue_dir.back() == '/') @@ -1073,7 +1073,7 @@ class DDLQueryStatusInputStream : public IBlockInputStream public: DDLQueryStatusInputStream(const String & zk_node_path, const DDLLogEntry & entry, const Context & context_) - : node_path(zk_node_path), context(context_), watch(CLOCK_MONOTONIC_COARSE), log(&Logger::get("DDLQueryStatusInputStream")) + : node_path(zk_node_path), context(context_), watch(CLOCK_MONOTONIC_COARSE), log(&Poco::Logger::get("DDLQueryStatusInputStream")) { sample = Block{ {std::make_shared(), "host"}, @@ -1235,7 +1235,7 @@ private: String node_path; const Context & context; Stopwatch watch; - Logger * log; + Poco::Logger * log; Block sample; diff --git a/src/Interpreters/DDLWorker.h b/src/Interpreters/DDLWorker.h index 2f63d9dadee..c30f888b5a1 100644 --- a/src/Interpreters/DDLWorker.h +++ b/src/Interpreters/DDLWorker.h @@ -101,7 +101,7 @@ private: private: Context & context; - Logger * log; + Poco::Logger * log; std::unique_ptr current_context; std::string host_fqdn; /// current host domain name diff --git a/src/Interpreters/EmbeddedDictionaries.cpp b/src/Interpreters/EmbeddedDictionaries.cpp index 9ab3cf2dcbe..58b21fdda52 100644 --- a/src/Interpreters/EmbeddedDictionaries.cpp +++ b/src/Interpreters/EmbeddedDictionaries.cpp @@ -124,7 +124,7 @@ EmbeddedDictionaries::EmbeddedDictionaries( std::unique_ptr geo_dictionaries_loader_, Context & context_, const bool throw_on_error) - : log(&Logger::get("EmbeddedDictionaries")) + : log(&Poco::Logger::get("EmbeddedDictionaries")) , context(context_) , geo_dictionaries_loader(std::move(geo_dictionaries_loader_)) , reload_period(context_.getConfigRef().getInt("builtin_dictionaries_reload_interval", 3600)) diff --git a/src/Interpreters/ExternalDictionariesLoader.cpp b/src/Interpreters/ExternalDictionariesLoader.cpp index 4e958a8c12b..ebbac4c5471 100644 --- a/src/Interpreters/ExternalDictionariesLoader.cpp +++ b/src/Interpreters/ExternalDictionariesLoader.cpp @@ -15,7 +15,7 @@ namespace DB /// Must not acquire Context lock in constructor to avoid possibility of deadlocks. ExternalDictionariesLoader::ExternalDictionariesLoader(Context & context_) - : ExternalLoader("external dictionary", &Logger::get("ExternalDictionariesLoader")) + : ExternalLoader("external dictionary", &Poco::Logger::get("ExternalDictionariesLoader")) , context(context_) { setConfigSettings({"dictionary", "name", "database"}); diff --git a/src/Interpreters/ExternalLoader.cpp b/src/Interpreters/ExternalLoader.cpp index fc6505e8446..cd46845e2ed 100644 --- a/src/Interpreters/ExternalLoader.cpp +++ b/src/Interpreters/ExternalLoader.cpp @@ -100,7 +100,7 @@ namespace class ExternalLoader::LoadablesConfigReader : private boost::noncopyable { public: - LoadablesConfigReader(const String & type_name_, Logger * log_) + LoadablesConfigReader(const String & type_name_, Poco::Logger * log_) : type_name(type_name_), log(log_) { } @@ -366,7 +366,7 @@ private: } const String type_name; - Logger * log; + Poco::Logger * log; std::mutex mutex; ExternalLoaderConfigSettings settings; @@ -389,7 +389,7 @@ public: LoadingDispatcher( const CreateObjectFunction & create_object_function_, const String & type_name_, - Logger * log_) + Poco::Logger * log_) : create_object(create_object_function_) , type_name(type_name_) , log(log_) @@ -1140,7 +1140,7 @@ private: const CreateObjectFunction create_object; const String type_name; - Logger * log; + Poco::Logger * log; mutable std::mutex mutex; std::condition_variable event; @@ -1220,7 +1220,7 @@ private: }; -ExternalLoader::ExternalLoader(const String & type_name_, Logger * log_) +ExternalLoader::ExternalLoader(const String & type_name_, Poco::Logger * log_) : config_files_reader(std::make_unique(type_name_, log_)) , loading_dispatcher(std::make_unique( [this](auto && a, auto && b, auto && c) { return createObject(a, b, c); }, diff --git a/src/Interpreters/ExternalLoader.h b/src/Interpreters/ExternalLoader.h index 9f9fa97b156..542a40e6cb2 100644 --- a/src/Interpreters/ExternalLoader.h +++ b/src/Interpreters/ExternalLoader.h @@ -82,7 +82,7 @@ public: template static constexpr bool is_vector_load_result_type = std::is_same_v || std::is_same_v; - ExternalLoader(const String & type_name_, Logger * log); + ExternalLoader(const String & type_name_, Poco::Logger * log); virtual ~ExternalLoader(); /// Adds a repository which will be used to read configurations from. diff --git a/src/Interpreters/ExternalModelsLoader.cpp b/src/Interpreters/ExternalModelsLoader.cpp index 31356e34855..0300bd44949 100644 --- a/src/Interpreters/ExternalModelsLoader.cpp +++ b/src/Interpreters/ExternalModelsLoader.cpp @@ -11,7 +11,7 @@ namespace ErrorCodes ExternalModelsLoader::ExternalModelsLoader(Context & context_) - : ExternalLoader("external model", &Logger::get("ExternalModelsLoader")) + : ExternalLoader("external model", &Poco::Logger::get("ExternalModelsLoader")) , context(context_) { setConfigSettings({"model", "name", {}}); diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp index cc2d62a68fe..5fd2239d104 100644 --- a/src/Interpreters/HashJoin.cpp +++ b/src/Interpreters/HashJoin.cpp @@ -203,7 +203,7 @@ HashJoin::HashJoin(std::shared_ptr table_join_, const Block & right_s , any_take_last_row(any_take_last_row_) , asof_inequality(table_join->getAsofInequality()) , data(std::make_shared()) - , log(&Logger::get("HashJoin")) + , log(&Poco::Logger::get("HashJoin")) { setSampleBlock(right_sample_block); } diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index c0da03627b2..4728c214db0 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -201,7 +201,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( , storage(storage_) , input(input_) , input_pipe(std::move(input_pipe_)) - , log(&Logger::get("InterpreterSelectQuery")) + , log(&Poco::Logger::get("InterpreterSelectQuery")) { checkStackSize(); diff --git a/src/Interpreters/PartLog.cpp b/src/Interpreters/PartLog.cpp index cc3a4e3de34..b8c3920c3e4 100644 --- a/src/Interpreters/PartLog.cpp +++ b/src/Interpreters/PartLog.cpp @@ -139,7 +139,7 @@ bool PartLog::addNewParts(Context & current_context, const PartLog::MutableDataP } catch (...) { - tryLogCurrentException(part_log ? part_log->log : &Logger::get("PartLog"), __PRETTY_FUNCTION__); + tryLogCurrentException(part_log ? part_log->log : &Poco::Logger::get("PartLog"), __PRETTY_FUNCTION__); return false; } diff --git a/src/Interpreters/ProcessList.cpp b/src/Interpreters/ProcessList.cpp index 9120f5959df..d86b5678f6d 100644 --- a/src/Interpreters/ProcessList.cpp +++ b/src/Interpreters/ProcessList.cpp @@ -84,7 +84,7 @@ ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * as if (!is_unlimited_query && max_size && processes.size() >= max_size) { if (queue_max_wait_ms) - LOG_WARNING(&Logger::get("ProcessList"), "Too many simultaneous queries, will wait {} ms.", queue_max_wait_ms); + LOG_WARNING(&Poco::Logger::get("ProcessList"), "Too many simultaneous queries, will wait {} ms.", queue_max_wait_ms); if (!queue_max_wait_ms || !have_space.wait_for(lock, std::chrono::milliseconds(queue_max_wait_ms), [&]{ return processes.size() < max_size; })) throw Exception("Too many simultaneous queries. Maximum: " + toString(max_size), ErrorCodes::TOO_MANY_SIMULTANEOUS_QUERIES); } @@ -231,7 +231,7 @@ ProcessListEntry::~ProcessListEntry() auto user_process_list_it = parent.user_to_queries.find(user); if (user_process_list_it == parent.user_to_queries.end()) { - LOG_ERROR(&Logger::get("ProcessList"), "Logical error: cannot find user in ProcessList"); + LOG_ERROR(&Poco::Logger::get("ProcessList"), "Logical error: cannot find user in ProcessList"); std::terminate(); } @@ -250,7 +250,7 @@ ProcessListEntry::~ProcessListEntry() if (!found) { - LOG_ERROR(&Logger::get("ProcessList"), "Logical error: cannot find query by query_id and pointer to ProcessListElement in ProcessListForUser"); + LOG_ERROR(&Poco::Logger::get("ProcessList"), "Logical error: cannot find query by query_id and pointer to ProcessListElement in ProcessListForUser"); std::terminate(); } parent.have_space.notify_all(); diff --git a/src/Interpreters/Set.h b/src/Interpreters/Set.h index 848abc9aa8a..a4c8fd59245 100644 --- a/src/Interpreters/Set.h +++ b/src/Interpreters/Set.h @@ -31,7 +31,7 @@ public: /// store all set elements in explicit form. /// This is needed for subsequent use for index. Set(const SizeLimits & limits_, bool fill_set_elements_, bool transform_null_in_) - : log(&Logger::get("Set")), + : log(&Poco::Logger::get("Set")), limits(limits_), fill_set_elements(fill_set_elements_), transform_null_in(transform_null_in_) { } @@ -105,7 +105,7 @@ private: /// Types for set_elements. DataTypes set_elements_types; - Logger * log; + Poco::Logger * log; /// Limitations on the maximum size of the set SizeLimits limits; diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index 229643b7e02..f1a72886901 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -41,7 +41,7 @@ std::shared_ptr createSystemLog( if (database != default_database_name) { /// System tables must be loaded before other tables, but loading order is undefined for all databases except `system` - LOG_ERROR(&Logger::get("SystemLog"), "Custom database name for a system table specified in config. Table `{}` will be created in `system` database instead of `{}`", table, database); + LOG_ERROR(&Poco::Logger::get("SystemLog"), "Custom database name for a system table specified in config. Table `{}` will be created in `system` database instead of `{}`", table, database); database = default_database_name; } diff --git a/src/Interpreters/SystemLog.h b/src/Interpreters/SystemLog.h index 649cfa28e6e..dd2f815ce92 100644 --- a/src/Interpreters/SystemLog.h +++ b/src/Interpreters/SystemLog.h @@ -152,7 +152,7 @@ public: ASTPtr getCreateTableQuery() override; protected: - Logger * log; + Poco::Logger * log; private: /* Saving thread data */ @@ -207,7 +207,7 @@ SystemLog::SystemLog(Context & context_, , flush_interval_milliseconds(flush_interval_milliseconds_) { assert(database_name_ == DatabaseCatalog::SYSTEM_DATABASE); - log = &Logger::get("SystemLog (" + database_name_ + "." + table_name_ + ")"); + log = &Poco::Logger::get("SystemLog (" + database_name_ + "." + table_name_ + ")"); } diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 8fc799d0b48..e1e2108c0fc 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -105,7 +105,7 @@ static void logQuery(const String & query, const Context & context, bool interna { if (internal) { - LOG_DEBUG(&Logger::get("executeQuery"), "(internal) {}", joinLines(query)); + LOG_DEBUG(&Poco::Logger::get("executeQuery"), "(internal) {}", joinLines(query)); } else { @@ -113,7 +113,7 @@ static void logQuery(const String & query, const Context & context, bool interna const auto & initial_query_id = context.getClientInfo().initial_query_id; const auto & current_user = context.getClientInfo().current_user; - LOG_DEBUG(&Logger::get("executeQuery"), "(from {}{}{}) {}", + LOG_DEBUG(&Poco::Logger::get("executeQuery"), "(from {}{}{}) {}", context.getClientInfo().current_address.toString(), (current_user != "default" ? ", user: " + context.getClientInfo().current_user : ""), (!initial_query_id.empty() && current_query_id != initial_query_id ? ", initial_query_id: " + initial_query_id : std::string()), @@ -145,10 +145,10 @@ static void setExceptionStackTrace(QueryLogElement & elem) static void logException(Context & context, QueryLogElement & elem) { if (elem.stack_trace.empty()) - LOG_ERROR(&Logger::get("executeQuery"), "{} (from {}) (in query: {})", + LOG_ERROR(&Poco::Logger::get("executeQuery"), "{} (from {}) (in query: {})", elem.exception, context.getClientInfo().current_address.toString(), joinLines(elem.query)); else - LOG_ERROR(&Logger::get("executeQuery"), "{} (from {}) (in query: {})" + LOG_ERROR(&Poco::Logger::get("executeQuery"), "{} (from {}) (in query: {})" ", Stack trace (when copying this message, always include the lines below):\n\n{}", elem.exception, context.getClientInfo().current_address.toString(), joinLines(elem.query), elem.stack_trace); } @@ -505,10 +505,10 @@ static std::tuple executeQueryImpl( if (elem.read_rows != 0) { - LOG_INFO(&Logger::get("executeQuery"), "Read {} rows, {} in {} sec., {} rows/sec., {}/sec.", - elem.read_rows, formatReadableSizeWithBinarySuffix(elem.read_bytes), elapsed_seconds, + LOG_INFO(&Poco::Logger::get("executeQuery"), "Read {} rows, {} in {} sec., {} rows/sec., {}/sec.", + elem.read_rows, ReadableSize(elem.read_bytes), elapsed_seconds, static_cast(elem.read_rows / elapsed_seconds), - formatReadableSizeWithBinarySuffix(elem.read_bytes / elapsed_seconds)); + ReadableSize(elem.read_bytes / elapsed_seconds)); } elem.thread_ids = std::move(info.thread_ids); @@ -585,7 +585,7 @@ static std::tuple executeQueryImpl( std::stringstream log_str; log_str << "Query pipeline:\n"; res.in->dumpTree(log_str); - LOG_DEBUG(&Logger::get("executeQuery"), log_str.str()); + LOG_DEBUG(&Poco::Logger::get("executeQuery"), log_str.str()); } } } diff --git a/src/Interpreters/tests/select_query.cpp b/src/Interpreters/tests/select_query.cpp index 8ce65ede05f..fb364d28086 100644 --- a/src/Interpreters/tests/select_query.cpp +++ b/src/Interpreters/tests/select_query.cpp @@ -24,8 +24,8 @@ int main(int, char **) try { Poco::AutoPtr channel = new Poco::ConsoleChannel(std::cerr); - Logger::root().setChannel(channel); - Logger::root().setLevel("trace"); + Poco::Logger::root().setChannel(channel); + Poco::Logger::root().setLevel("trace"); /// Pre-initialize the `DateLUT` so that the first initialization does not affect the measured execution speed. DateLUT::instance(); diff --git a/src/Processors/Formats/IRowInputFormat.cpp b/src/Processors/Formats/IRowInputFormat.cpp index e2676eb64c2..12d4db1f4a8 100644 --- a/src/Processors/Formats/IRowInputFormat.cpp +++ b/src/Processors/Formats/IRowInputFormat.cpp @@ -149,7 +149,7 @@ Chunk IRowInputFormat::generate() { if (num_errors && (params.allow_errors_num > 0 || params.allow_errors_ratio > 0)) { - Logger * log = &Logger::get("IRowInputFormat"); + Poco::Logger * log = &Poco::Logger::get("IRowInputFormat"); LOG_TRACE(log, "Skipped {} rows with errors while reading the input stream", num_errors); } diff --git a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp index 78a291de262..8017667909b 100644 --- a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp @@ -612,7 +612,7 @@ private: try { Poco::URI url(base_url, "/schemas/ids/" + std::to_string(id)); - LOG_TRACE((&Logger::get("AvroConfluentRowInputFormat")), "Fetching schema id = {}", id); + LOG_TRACE((&Poco::Logger::get("AvroConfluentRowInputFormat")), "Fetching schema id = {}", id); /// One second for connect/send/receive. Just in case. ConnectionTimeouts timeouts({1, 0}, {1, 0}, {1, 0}); @@ -629,7 +629,7 @@ private: Poco::JSON::Parser parser; auto json_body = parser.parse(*response_body).extract(); auto schema = json_body->getValue("schema"); - LOG_TRACE((&Logger::get("AvroConfluentRowInputFormat")), "Succesfully fetched schema id = {}\n{}", id, schema); + LOG_TRACE((&Poco::Logger::get("AvroConfluentRowInputFormat")), "Succesfully fetched schema id = {}\n{}", id, schema); return avro::compileJsonSchemaFromString(schema); } catch (const Exception &) diff --git a/src/Processors/Formats/Impl/MySQLOutputFormat.cpp b/src/Processors/Formats/Impl/MySQLOutputFormat.cpp index 8f1967d3704..94dfc3dc2c7 100644 --- a/src/Processors/Formats/Impl/MySQLOutputFormat.cpp +++ b/src/Processors/Formats/Impl/MySQLOutputFormat.cpp @@ -60,24 +60,25 @@ void MySQLOutputFormat::consume(Chunk chunk) void MySQLOutputFormat::finalize() { size_t affected_rows = 0; - std::stringstream human_readable_info; + std::string human_readable_info; if (QueryStatus * process_list_elem = context->getProcessListElement()) { CurrentThread::finalizePerformanceCounters(); QueryStatusInfo info = process_list_elem->getInfo(); affected_rows = info.written_rows; - human_readable_info << std::fixed << std::setprecision(3) - << "Read " << info.read_rows << " rows, " << formatReadableSizeWithBinarySuffix(info.read_bytes) << " in " << info.elapsed_seconds << " sec., " - << static_cast(info.read_rows / info.elapsed_seconds) << " rows/sec., " - << formatReadableSizeWithBinarySuffix(info.read_bytes / info.elapsed_seconds) << "/sec."; + human_readable_info = fmt::format( + "Read {} rows, {} in {} sec., {} rows/sec., {}/sec.", + info.read_rows, ReadableSize(info.read_bytes), info.elapsed_seconds, + static_cast(info.read_rows / info.elapsed_seconds), + ReadableSize(info.read_bytes / info.elapsed_seconds)); } const auto & header = getPort(PortKind::Main).getHeader(); if (header.columns() == 0) - packet_sender->sendPacket(OK_Packet(0x0, context->mysql.client_capabilities, affected_rows, 0, 0, "", human_readable_info.str()), true); + packet_sender->sendPacket(OK_Packet(0x0, context->mysql.client_capabilities, affected_rows, 0, 0, "", human_readable_info), true); else if (context->mysql.client_capabilities & CLIENT_DEPRECATE_EOF) - packet_sender->sendPacket(OK_Packet(0xfe, context->mysql.client_capabilities, affected_rows, 0, 0, "", human_readable_info.str()), true); + packet_sender->sendPacket(OK_Packet(0xfe, context->mysql.client_capabilities, affected_rows, 0, 0, "", human_readable_info), true); else packet_sender->sendPacket(EOF_Packet(0, 0), true); } diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp index 05276f87540..49a3d018098 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp @@ -27,7 +27,7 @@ CollapsingSortedAlgorithm::CollapsingSortedAlgorithm( size_t max_block_size, WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes, - Logger * log_) + Poco::Logger * log_) : IMergingAlgorithmWithSharedChunks(num_inputs, std::move(description_), out_row_sources_buf_, max_row_refs) , merged_data(header.cloneEmptyColumns(), use_average_block_sizes, max_block_size) , sign_column_number(header.getPositionByName(sign_column)) diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h index 3cbe95d96e1..d95fac2f02b 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h @@ -35,7 +35,7 @@ public: size_t max_block_size, WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes, - Logger * log_); + Poco::Logger * log_); Status merge() override; @@ -62,7 +62,7 @@ private: PODArray current_row_sources; /// Sources of rows with the current primary key size_t count_incorrect_data = 0; /// To prevent too many error messages from writing to the log. - Logger * log; + Poco::Logger * log; void reportIncorrectData(); void insertRow(RowRef & row); diff --git a/src/Processors/Merges/Algorithms/RowRef.h b/src/Processors/Merges/Algorithms/RowRef.h index efd04717e94..1b4da9781f8 100644 --- a/src/Processors/Merges/Algorithms/RowRef.h +++ b/src/Processors/Merges/Algorithms/RowRef.h @@ -83,7 +83,7 @@ public: { if (free_chunks.size() != chunks.size()) { - LOG_ERROR(&Logger::get("SharedChunkAllocator"), "SharedChunkAllocator was destroyed before RowRef was released. StackTrace: {}", StackTrace().toString()); + LOG_ERROR(&Poco::Logger::get("SharedChunkAllocator"), "SharedChunkAllocator was destroyed before RowRef was released. StackTrace: {}", StackTrace().toString()); return; } @@ -100,7 +100,7 @@ private: /// This may happen if allocator was removed before chunks. /// Log message and exit, because we don't want to throw exception in destructor. - LOG_ERROR(&Logger::get("SharedChunkAllocator"), "SharedChunkAllocator was destroyed before RowRef was released. StackTrace: {}", StackTrace().toString()); + LOG_ERROR(&Poco::Logger::get("SharedChunkAllocator"), "SharedChunkAllocator was destroyed before RowRef was released. StackTrace: {}", StackTrace().toString()); return; } diff --git a/src/Processors/Merges/CollapsingSortedTransform.h b/src/Processors/Merges/CollapsingSortedTransform.h index cdf7c4a1607..4e65504a101 100644 --- a/src/Processors/Merges/CollapsingSortedTransform.h +++ b/src/Processors/Merges/CollapsingSortedTransform.h @@ -29,7 +29,7 @@ public: max_block_size, out_row_sources_buf_, use_average_block_sizes, - &Logger::get("CollapsingSortedTransform")) + &Poco::Logger::get("CollapsingSortedTransform")) { } diff --git a/src/Processors/Merges/MergingSortedTransform.cpp b/src/Processors/Merges/MergingSortedTransform.cpp index defadd0caec..e680304ccee 100644 --- a/src/Processors/Merges/MergingSortedTransform.cpp +++ b/src/Processors/Merges/MergingSortedTransform.cpp @@ -43,7 +43,7 @@ void MergingSortedTransform::onFinish() const auto & merged_data = algorithm.getMergedData(); - auto * log = &Logger::get("MergingSortedTransform"); + auto * log = &Poco::Logger::get("MergingSortedTransform"); double seconds = total_stopwatch.elapsedSeconds(); @@ -53,7 +53,7 @@ void MergingSortedTransform::onFinish() LOG_DEBUG(log, "Merge sorted {} blocks, {} rows in {} sec., {} rows/sec., {}/sec", merged_data.totalChunks(), merged_data.totalMergedRows(), seconds, merged_data.totalMergedRows() / seconds, - formatReadableSizeWithBinarySuffix(merged_data.totalAllocatedBytes() / seconds)); + ReadableSize(merged_data.totalAllocatedBytes() / seconds)); } } diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp index fc8fd575c35..d8eff53f567 100644 --- a/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -542,9 +542,9 @@ void AggregatingTransform::initGenerate() size_t rows = variants.sizeWithoutOverflowRow(); LOG_TRACE(log, "Aggregated. {} to {} rows (from {}) in {} sec. ({} rows/sec., {}/sec.)", - src_rows, rows, formatReadableSizeWithBinarySuffix(src_bytes), + src_rows, rows, ReadableSize(src_bytes), elapsed_seconds, src_rows / elapsed_seconds, - formatReadableSizeWithBinarySuffix(src_bytes / elapsed_seconds)); + ReadableSize(src_bytes / elapsed_seconds)); if (params->aggregator.hasTemporaryFiles()) { @@ -593,7 +593,7 @@ void AggregatingTransform::initGenerate() for (const auto & file : files.files) processors.emplace_back(std::make_unique(header, file->path())); - LOG_TRACE(log, "Will merge {} temporary files of size {} compressed, {} uncompressed.", files.files.size(), formatReadableSizeWithBinarySuffix(files.sum_size_compressed), formatReadableSizeWithBinarySuffix(files.sum_size_uncompressed)); + LOG_TRACE(log, "Will merge {} temporary files of size {} compressed, {} uncompressed.", files.files.size(), ReadableSize(files.sum_size_compressed), ReadableSize(files.sum_size_uncompressed)); auto pipe = createMergingAggregatedMemoryEfficientPipe( header, params, files.files.size(), temporary_data_merge_threads); diff --git a/src/Processors/Transforms/AggregatingTransform.h b/src/Processors/Transforms/AggregatingTransform.h index ebfb14a387a..c2693579c67 100644 --- a/src/Processors/Transforms/AggregatingTransform.h +++ b/src/Processors/Transforms/AggregatingTransform.h @@ -88,7 +88,7 @@ private: Processors processors; AggregatingTransformParamsPtr params; - Logger * log = &Logger::get("AggregatingTransform"); + Poco::Logger * log = &Poco::Logger::get("AggregatingTransform"); ColumnRawPtrs key_columns; Aggregator::AggregateColumns aggregate_columns; diff --git a/src/Processors/Transforms/CreatingSetsTransform.h b/src/Processors/Transforms/CreatingSetsTransform.h index aeb7a43b61b..f6df6042959 100644 --- a/src/Processors/Transforms/CreatingSetsTransform.h +++ b/src/Processors/Transforms/CreatingSetsTransform.h @@ -54,7 +54,7 @@ private: size_t bytes_to_transfer = 0; using Logger = Poco::Logger; - Logger * log = &Logger::get("CreatingSetsBlockInputStream"); + Poco::Logger * log = &Poco::Logger::get("CreatingSetsBlockInputStream"); bool is_initialized = false; diff --git a/src/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp index 34eb833c411..428fbb6d528 100644 --- a/src/Processors/Transforms/MergeSortingTransform.cpp +++ b/src/Processors/Transforms/MergeSortingTransform.cpp @@ -31,7 +31,7 @@ class MergeSorter; class BufferingToFileTransform : public IAccumulatingTransform { public: - BufferingToFileTransform(const Block & header, Logger * log_, std::string path_) + BufferingToFileTransform(const Block & header, Poco::Logger * log_, std::string path_) : IAccumulatingTransform(header, header), log(log_) , path(std::move(path_)), file_buf_out(path), compressed_buf_out(file_buf_out) , out_stream(std::make_shared(compressed_buf_out, 0, header)) @@ -80,7 +80,7 @@ public: } private: - Logger * log; + Poco::Logger * log; std::string path; WriteBufferFromFile file_buf_out; CompressedWriteBuffer compressed_buf_out; @@ -267,7 +267,7 @@ void MergeSortingTransform::remerge() new_chunks.emplace_back(std::move(chunk)); } - LOG_DEBUG(log, "Memory usage is lowered from {} to {}", formatReadableSizeWithBinarySuffix(sum_bytes_in_blocks), formatReadableSizeWithBinarySuffix(new_sum_bytes_in_blocks)); + LOG_DEBUG(log, "Memory usage is lowered from {} to {}", ReadableSize(sum_bytes_in_blocks), ReadableSize(new_sum_bytes_in_blocks)); /// If the memory consumption was not lowered enough - we will not perform remerge anymore. 2 is a guess. if (new_sum_bytes_in_blocks * 2 > sum_bytes_in_blocks) diff --git a/src/Processors/Transforms/MergeSortingTransform.h b/src/Processors/Transforms/MergeSortingTransform.h index 043cb3f36c1..22812e08b40 100644 --- a/src/Processors/Transforms/MergeSortingTransform.h +++ b/src/Processors/Transforms/MergeSortingTransform.h @@ -41,7 +41,7 @@ private: size_t sum_rows_in_blocks = 0; size_t sum_bytes_in_blocks = 0; - Logger * log = &Logger::get("MergeSortingTransform"); + Poco::Logger * log = &Poco::Logger::get("MergeSortingTransform"); /// If remerge doesn't save memory at least several times, mark it as useless and don't do it anymore. bool remerge_is_useful = true; diff --git a/src/Processors/Transforms/MergingAggregatedTransform.h b/src/Processors/Transforms/MergingAggregatedTransform.h index cb1ce01976c..73e0d8cd013 100644 --- a/src/Processors/Transforms/MergingAggregatedTransform.h +++ b/src/Processors/Transforms/MergingAggregatedTransform.h @@ -21,7 +21,7 @@ protected: private: AggregatingTransformParamsPtr params; - Logger * log = &Logger::get("MergingAggregatedTransform"); + Poco::Logger * log = &Poco::Logger::get("MergingAggregatedTransform"); size_t max_threads; AggregatedDataVariants data_variants; diff --git a/src/Processors/tests/processors_test_aggregation.cpp b/src/Processors/tests/processors_test_aggregation.cpp index e3316432ba8..9b8bee67d52 100644 --- a/src/Processors/tests/processors_test_aggregation.cpp +++ b/src/Processors/tests/processors_test_aggregation.cpp @@ -184,8 +184,8 @@ try auto thread_group = CurrentThread::getGroup(); Poco::AutoPtr channel = new Poco::ConsoleChannel(std::cerr); - Logger::root().setChannel(channel); - Logger::root().setLevel("trace"); + Poco::Logger::root().setChannel(channel); + Poco::Logger::root().setLevel("trace"); registerAggregateFunctions(); auto & factory = AggregateFunctionFactory::instance(); diff --git a/src/Processors/tests/processors_test_merge_sorting_transform.cpp b/src/Processors/tests/processors_test_merge_sorting_transform.cpp index 470bf79a174..5e6720f0167 100644 --- a/src/Processors/tests/processors_test_merge_sorting_transform.cpp +++ b/src/Processors/tests/processors_test_merge_sorting_transform.cpp @@ -125,8 +125,8 @@ int main(int, char **) try { Poco::AutoPtr channel = new Poco::ConsoleChannel(std::cerr); - Logger::root().setChannel(channel); - Logger::root().setLevel("trace"); + Poco::Logger::root().setChannel(channel); + Poco::Logger::root().setLevel("trace"); auto disk = std::make_shared("tmp", ".", 0); auto tmp_volume = std::make_shared("tmp", std::vector{disk}, 0); diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp index 82068496159..84d23f10a55 100644 --- a/src/Server/HTTPHandler.cpp +++ b/src/Server/HTTPHandler.cpp @@ -225,7 +225,7 @@ void HTTPHandler::pushDelayedResults(Output & used_output) HTTPHandler::HTTPHandler(IServer & server_, const std::string & name) : server(server_) - , log(&Logger::get(name)) + , log(&Poco::Logger::get(name)) { server_display_name = server.config().getString("display_name", getFQDNOrHostName()); } diff --git a/src/Server/HTTPHandlerFactory.cpp b/src/Server/HTTPHandlerFactory.cpp index e916070be22..2f00aa0aa72 100644 --- a/src/Server/HTTPHandlerFactory.cpp +++ b/src/Server/HTTPHandlerFactory.cpp @@ -25,7 +25,7 @@ namespace ErrorCodes } HTTPRequestHandlerFactoryMain::HTTPRequestHandlerFactoryMain(const std::string & name_) - : log(&Logger::get(name_)), name(name_) + : log(&Poco::Logger::get(name_)), name(name_) { } diff --git a/src/Server/HTTPHandlerFactory.h b/src/Server/HTTPHandlerFactory.h index 55206018d41..273e337813e 100644 --- a/src/Server/HTTPHandlerFactory.h +++ b/src/Server/HTTPHandlerFactory.h @@ -18,7 +18,7 @@ class HTTPRequestHandlerFactoryMain : public Poco::Net::HTTPRequestHandlerFactor private: using TThis = HTTPRequestHandlerFactoryMain; - Logger * log; + Poco::Logger * log; std::string name; std::vector child_factories; diff --git a/src/Server/MySQLHandlerFactory.cpp b/src/Server/MySQLHandlerFactory.cpp index 5d78ed81068..18e05f88402 100644 --- a/src/Server/MySQLHandlerFactory.cpp +++ b/src/Server/MySQLHandlerFactory.cpp @@ -22,7 +22,7 @@ namespace ErrorCodes MySQLHandlerFactory::MySQLHandlerFactory(IServer & server_) : server(server_) - , log(&Logger::get("MySQLHandlerFactory")) + , log(&Poco::Logger::get("MySQLHandlerFactory")) { #if USE_SSL try diff --git a/src/Server/ReplicasStatusHandler.cpp b/src/Server/ReplicasStatusHandler.cpp index 986af59d3a4..55746409feb 100644 --- a/src/Server/ReplicasStatusHandler.cpp +++ b/src/Server/ReplicasStatusHandler.cpp @@ -96,7 +96,7 @@ void ReplicasStatusHandler::handleRequest(Poco::Net::HTTPServerRequest & request } catch (...) { - LOG_ERROR((&Logger::get("ReplicasStatusHandler")), "Cannot send exception to client"); + LOG_ERROR((&Poco::Logger::get("ReplicasStatusHandler")), "Cannot send exception to client"); } } } diff --git a/src/Server/TCPHandlerFactory.h b/src/Server/TCPHandlerFactory.h index a5532a8dc02..5ecd427bf8b 100644 --- a/src/Server/TCPHandlerFactory.h +++ b/src/Server/TCPHandlerFactory.h @@ -27,7 +27,7 @@ private: public: explicit TCPHandlerFactory(IServer & server_, bool secure_ = false) : server(server_) - , log(&Logger::get(std::string("TCP") + (secure_ ? "S" : "") + "HandlerFactory")) + , log(&Poco::Logger::get(std::string("TCP") + (secure_ ? "S" : "") + "HandlerFactory")) { } diff --git a/src/Storages/Distributed/DirectoryMonitor.cpp b/src/Storages/Distributed/DirectoryMonitor.cpp index 4dd62db0965..0e84f68e5fe 100644 --- a/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/src/Storages/Distributed/DirectoryMonitor.cpp @@ -88,7 +88,7 @@ StorageDistributedDirectoryMonitor::StorageDistributedDirectoryMonitor( , default_sleep_time{storage.global_context->getSettingsRef().distributed_directory_monitor_sleep_time_ms.totalMilliseconds()} , sleep_time{default_sleep_time} , max_sleep_time{storage.global_context->getSettingsRef().distributed_directory_monitor_max_sleep_time_ms.totalMilliseconds()} - , log{&Logger::get(getLoggerName())} + , log{&Poco::Logger::get(getLoggerName())} , monitor_blocker(monitor_blocker_) , bg_pool(bg_pool_) { @@ -301,7 +301,7 @@ void StorageDistributedDirectoryMonitor::processFile(const std::string & file_pa } void StorageDistributedDirectoryMonitor::readHeader( - ReadBuffer & in, Settings & insert_settings, std::string & insert_query, ClientInfo & client_info, Logger * log) + ReadBuffer & in, Settings & insert_settings, std::string & insert_query, ClientInfo & client_info, Poco::Logger * log) { UInt64 query_size; readVarUInt(query_size, in); @@ -542,7 +542,7 @@ public: : in(file_name) , decompressing_in(in) , block_in(decompressing_in, ClickHouseRevision::get()) - , log{&Logger::get("DirectoryMonitorBlockInputStream")} + , log{&Poco::Logger::get("DirectoryMonitorBlockInputStream")} { Settings insert_settings; String insert_query; @@ -576,7 +576,7 @@ private: Block first_block; Block header; - Logger * log; + Poco::Logger * log; }; BlockInputStreamPtr StorageDistributedDirectoryMonitor::createStreamFromFile(const String & file_name) diff --git a/src/Storages/Distributed/DirectoryMonitor.h b/src/Storages/Distributed/DirectoryMonitor.h index e2a913ee1ef..418cd430243 100644 --- a/src/Storages/Distributed/DirectoryMonitor.h +++ b/src/Storages/Distributed/DirectoryMonitor.h @@ -68,14 +68,14 @@ private: std::chrono::time_point last_decrease_time {std::chrono::system_clock::now()}; std::atomic quit {false}; std::mutex mutex; - Logger * log; + Poco::Logger * log; ActionBlocker & monitor_blocker; BackgroundSchedulePool & bg_pool; BackgroundSchedulePoolTaskHolder task_handle; /// Read insert query and insert settings for backward compatible. - static void readHeader(ReadBuffer & in, Settings & insert_settings, std::string & insert_query, ClientInfo & client_info, Logger * log); + static void readHeader(ReadBuffer & in, Settings & insert_settings, std::string & insert_query, ClientInfo & client_info, Poco::Logger * log); friend class DirectoryMonitorBlockInputStream; }; diff --git a/src/Storages/Distributed/DistributedBlockOutputStream.cpp b/src/Storages/Distributed/DistributedBlockOutputStream.cpp index a171e289695..5516e85b143 100644 --- a/src/Storages/Distributed/DistributedBlockOutputStream.cpp +++ b/src/Storages/Distributed/DistributedBlockOutputStream.cpp @@ -87,7 +87,7 @@ DistributedBlockOutputStream::DistributedBlockOutputStream( bool insert_sync_, UInt64 insert_timeout_) : context(context_), storage(storage_), query_ast(query_ast_), query_string(queryToString(query_ast_)), cluster(cluster_), insert_sync(insert_sync_), - insert_timeout(insert_timeout_), log(&Logger::get("DistributedBlockOutputStream")) + insert_timeout(insert_timeout_), log(&Poco::Logger::get("DistributedBlockOutputStream")) { } diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index c35a031df1a..0fc42e49582 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -140,7 +140,7 @@ StorageKafka::StorageKafka( , schema_name(global_context.getMacros()->expand(schema_name_)) , num_consumers(num_consumers_) , max_block_size(max_block_size_) - , log(&Logger::get("StorageKafka (" + table_id_.table_name + ")")) + , log(&Poco::Logger::get("StorageKafka (" + table_id_.table_name + ")")) , semaphore(0, num_consumers_) , skip_broken(skip_broken_) , intermediate_commit(intermediate_commit_) diff --git a/src/Storages/MergeTree/BackgroundProcessingPool.cpp b/src/Storages/MergeTree/BackgroundProcessingPool.cpp index ff195112929..8f6d7c19549 100644 --- a/src/Storages/MergeTree/BackgroundProcessingPool.cpp +++ b/src/Storages/MergeTree/BackgroundProcessingPool.cpp @@ -51,7 +51,7 @@ BackgroundProcessingPool::BackgroundProcessingPool(int size_, , thread_name(thread_name_) , settings(pool_settings) { - logger = &Logger::get(log_name); + logger = &Poco::Logger::get(log_name); LOG_INFO(logger, "Create {} with {} threads", log_name, size); threads.resize(size); diff --git a/src/Storages/MergeTree/DataPartsExchange.h b/src/Storages/MergeTree/DataPartsExchange.h index c0e8c0d2331..c1aff6bdba5 100644 --- a/src/Storages/MergeTree/DataPartsExchange.h +++ b/src/Storages/MergeTree/DataPartsExchange.h @@ -21,7 +21,7 @@ class Service final : public InterserverIOEndpoint { public: Service(MergeTreeData & data_) - : data(data_), log(&Logger::get(data.getLogName() + " (Replicated PartsService)")) {} + : data(data_), log(&Poco::Logger::get(data.getLogName() + " (Replicated PartsService)")) {} Service(const Service &) = delete; Service & operator=(const Service &) = delete; @@ -36,7 +36,7 @@ private: /// StorageReplicatedMergeTree::shutdown() waits for all parts exchange handlers to finish, /// so Service will never access dangling reference to storage MergeTreeData & data; - Logger * log; + Poco::Logger * log; }; /** Client for getting the parts from the table *MergeTree. @@ -44,7 +44,7 @@ private: class Fetcher final { public: - Fetcher(MergeTreeData & data_) : data(data_), log(&Logger::get("Fetcher")) {} + Fetcher(MergeTreeData & data_) : data(data_), log(&Poco::Logger::get("Fetcher")) {} Fetcher(const Fetcher &) = delete; Fetcher & operator=(const Fetcher &) = delete; @@ -75,7 +75,7 @@ private: PooledReadWriteBufferFromHTTP & in); MergeTreeData & data; - Logger * log; + Poco::Logger * log; }; } diff --git a/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp b/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp index 758401b7f4f..762dbc7d5b6 100644 --- a/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp +++ b/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp @@ -107,7 +107,7 @@ EphemeralLocksInAllPartitions::EphemeralLocksInAllPartitions( int rc = zookeeper.tryMulti(lock_ops, lock_responses); if (rc == Coordination::ZBADVERSION) { - LOG_TRACE(&Logger::get("EphemeralLocksInAllPartitions"), "Someone has inserted a block in a new partition while we were creating locks. Retry."); + LOG_TRACE(&Poco::Logger::get("EphemeralLocksInAllPartitions"), "Someone has inserted a block in a new partition while we were creating locks. Retry."); continue; } else if (rc != Coordination::ZOK) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index a2344be3887..168a218184d 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -136,7 +136,7 @@ MergeTreeData::MergeTreeData( , relative_data_path(relative_data_path_) , broken_part_callback(broken_part_callback_) , log_name(table_id_.getNameForLogs()) - , log(&Logger::get(log_name)) + , log(&Poco::Logger::get(log_name)) , storage_settings(std::move(storage_settings_)) , data_parts_by_info(data_parts_indexes.get()) , data_parts_by_state_and_info(data_parts_indexes.get()) @@ -2828,8 +2828,7 @@ inline ReservationPtr checkAndReturnReservation(UInt64 expected_size, Reservatio if (reservation) return reservation; - throw Exception("Cannot reserve " + formatReadableSizeWithBinarySuffix(expected_size) + ", not enough space", - ErrorCodes::NOT_ENOUGH_SPACE); + throw Exception(fmt::format("Cannot reserve {}, not enough space", ReadableSize(expected_size)), ErrorCodes::NOT_ENOUGH_SPACE); } } diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 9a3ef3088d5..d9bedd7c6db 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -698,7 +698,7 @@ protected: BrokenPartCallback broken_part_callback; String log_name; - Logger * log; + Poco::Logger * log; /// Storage settings. /// Use get and set to receive readonly versions. diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index f906b329d2d..1e8d4136308 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -153,7 +153,7 @@ void FutureMergedMutatedPart::updatePath(const MergeTreeData & storage, const Re } MergeTreeDataMergerMutator::MergeTreeDataMergerMutator(MergeTreeData & data_, size_t background_pool_size_) - : data(data_), background_pool_size(background_pool_size_), log(&Logger::get(data.getLogName() + " (MergerMutator)")) + : data(data_), background_pool_size(background_pool_size_), log(&Poco::Logger::get(data.getLogName() + " (MergerMutator)")) { } @@ -371,14 +371,13 @@ bool MergeTreeDataMergerMutator::selectAllPartsToMergeWithinPartition( ", {} required now (+{}% on overhead); suppressing similar warnings for the next hour", parts.front()->name, (*prev_it)->name, - formatReadableSizeWithBinarySuffix(available_disk_space), - formatReadableSizeWithBinarySuffix(sum_bytes), + ReadableSize(available_disk_space), + ReadableSize(sum_bytes), static_cast((DISK_USAGE_COEFFICIENT_TO_SELECT - 1.0) * 100)); } if (out_disable_reason) - *out_disable_reason = "Insufficient available disk space, required " + - formatReadableSizeWithDecimalSuffix(required_disk_space); + *out_disable_reason = fmt::format("Insufficient available disk space, required {}", ReadableSize(required_disk_space)); return false; } @@ -952,7 +951,15 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor /// Print overall profiling info. NOTE: it may duplicates previous messages { double elapsed_seconds = merge_entry->watch.elapsedSeconds(); - LOG_DEBUG(log, "Merge sorted {} rows, containing {} columns ({} merged, {} gathered) in {} sec., {} rows/sec., {}/sec.", merge_entry->rows_read, all_column_names.size(), merging_column_names.size(), gathering_column_names.size(), elapsed_seconds, merge_entry->rows_read / elapsed_seconds, formatReadableSizeWithBinarySuffix(merge_entry->bytes_read_uncompressed / elapsed_seconds)); + LOG_DEBUG(log, + "Merge sorted {} rows, containing {} columns ({} merged, {} gathered) in {} sec., {} rows/sec., {}/sec.", + merge_entry->rows_read, + all_column_names.size(), + merging_column_names.size(), + gathering_column_names.size(), + elapsed_seconds, + merge_entry->rows_read / elapsed_seconds, + ReadableSize(merge_entry->bytes_read_uncompressed / elapsed_seconds)); } if (merge_alg != MergeAlgorithm::Vertical) diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h index 84a3fcf1be2..d26e84eb18a 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -230,7 +230,7 @@ private: MergeTreeData & data; const size_t background_pool_size; - Logger * log; + Poco::Logger * log; /// When the last time you wrote to the log that the disk space was running out (not to write about this too often). time_t disk_space_warning_time = 0; diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 24e3e3ac69d..e4321b82166 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -84,7 +84,7 @@ namespace ErrorCodes MergeTreeDataSelectExecutor::MergeTreeDataSelectExecutor(const MergeTreeData & data_) - : data(data_), log(&Logger::get(data.getLogName() + " (SelectExecutor)")) + : data(data_), log(&Poco::Logger::get(data.getLogName() + " (SelectExecutor)")) { } diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h index 92fa98fd914..942e111635b 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h @@ -44,7 +44,7 @@ public: private: const MergeTreeData & data; - Logger * log; + Poco::Logger * log; Pipes spreadMarkRangesAmongStreams( RangesInDataParts && parts, diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.h b/src/Storages/MergeTree/MergeTreeDataWriter.h index e199aa3b43a..ffaa227641e 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.h +++ b/src/Storages/MergeTree/MergeTreeDataWriter.h @@ -34,7 +34,7 @@ using BlocksWithPartition = std::vector; class MergeTreeDataWriter { public: - MergeTreeDataWriter(MergeTreeData & data_) : data(data_), log(&Logger::get(data.getLogName() + " (Writer)")) {} + MergeTreeDataWriter(MergeTreeData & data_) : data(data_), log(&Poco::Logger::get(data.getLogName() + " (Writer)")) {} /** Split the block to blocks, each of them must be written as separate part. * (split rows by partition) @@ -50,7 +50,7 @@ public: private: MergeTreeData & data; - Logger * log; + Poco::Logger * log; }; } diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index 4e564b512d6..e84ff418bc3 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -179,7 +179,7 @@ bool MergeTreePartsMover::selectPartsForMove( if (!parts_to_move.empty()) { - LOG_TRACE(log, "Selected {} parts to move according to storage policy rules and {} parts according to TTL rules, {} total", parts_to_move_by_policy_rules, parts_to_move_by_ttl_rules, formatReadableSizeWithBinarySuffix(parts_to_move_total_size_bytes)); + LOG_TRACE(log, "Selected {} parts to move according to storage policy rules and {} parts according to TTL rules, {} total", parts_to_move_by_policy_rules, parts_to_move_by_ttl_rules, ReadableSize(parts_to_move_total_size_bytes)); return true; } else diff --git a/src/Storages/MergeTree/MergeTreePartsMover.h b/src/Storages/MergeTree/MergeTreePartsMover.h index 95a20dc1f77..8ddf5e165af 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.h +++ b/src/Storages/MergeTree/MergeTreePartsMover.h @@ -66,7 +66,7 @@ public: private: MergeTreeData * data; - Logger * log; + Poco::Logger * log; }; diff --git a/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp index b266b65e19e..9ca1446ef64 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -169,7 +169,7 @@ void MergeTreeReadPool::profileFeedback(const ReadBufferFromFileBase::ProfileInf ProfileEvents::increment(ProfileEvents::SlowRead); LOG_DEBUG(log, "Slow read, event №{}: read {} bytes in {} sec., {}/s.", backoff_state.num_events, info.bytes_read, info.nanoseconds / 1e9, - formatReadableSizeWithBinarySuffix(throughput)); + ReadableSize(throughput)); if (backoff_state.num_events < backoff_settings.min_events) return; diff --git a/src/Storages/MergeTree/MergeTreeReadPool.h b/src/Storages/MergeTree/MergeTreeReadPool.h index f639a6a4905..c43074f1962 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.h +++ b/src/Storages/MergeTree/MergeTreeReadPool.h @@ -133,7 +133,7 @@ private: mutable std::mutex mutex; - Logger * log = &Logger::get("MergeTreeReadPool"); + Poco::Logger * log = &Poco::Logger::get("MergeTreeReadPool"); }; using MergeTreeReadPoolPtr = std::shared_ptr; diff --git a/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h index 211bf9701cf..ea603bd468f 100644 --- a/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h @@ -69,7 +69,7 @@ private: Chunks chunks; - Logger * log = &Logger::get("MergeTreeReverseSelectProcessor"); + Poco::Logger * log = &Poco::Logger::get("MergeTreeReverseSelectProcessor"); }; } diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 4c64bfb6a18..d2438e20192 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -67,7 +67,7 @@ private: String path; bool is_first_task = true; - Logger * log = &Logger::get("MergeTreeSelectProcessor"); + Poco::Logger * log = &Poco::Logger::get("MergeTreeSelectProcessor"); }; } diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.h b/src/Storages/MergeTree/MergeTreeSequentialSource.h index dac559913aa..6155fef200a 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.h +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.h @@ -45,7 +45,7 @@ private: /// Should read using direct IO bool read_with_direct_io; - Logger * log = &Logger::get("MergeTreeSequentialSource"); + Poco::Logger * log = &Poco::Logger::get("MergeTreeSequentialSource"); std::shared_ptr mark_cache; using MergeTreeReaderPtr = std::unique_ptr; diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp index a8da0e8615c..61f99ac6d88 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp @@ -32,7 +32,7 @@ MergeTreeWhereOptimizer::MergeTreeWhereOptimizer( const Context & context, const MergeTreeData & data, const Names & queried_columns_, - Logger * log_) + Poco::Logger * log_) : table_columns{ext::map(data.getColumns().getAllPhysical(), [] (const NameAndTypePair & col) { return col.name; })}, queried_columns{queried_columns_}, diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp index d5e44166536..d8c1103809f 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp @@ -33,7 +33,7 @@ namespace ErrorCodes ReplicatedMergeTreeBlockOutputStream::ReplicatedMergeTreeBlockOutputStream( StorageReplicatedMergeTree & storage_, size_t quorum_, size_t quorum_timeout_ms_, size_t max_parts_per_block_, bool deduplicate_) : storage(storage_), quorum(quorum_), quorum_timeout_ms(quorum_timeout_ms_), max_parts_per_block(max_parts_per_block_), deduplicate(deduplicate_), - log(&Logger::get(storage.getLogName() + " (Replicated OutputStream)")) + log(&Poco::Logger::get(storage.getLogName() + " (Replicated OutputStream)")) { /// The quorum value `1` has the same meaning as if it is disabled. if (quorum == 1) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h index 0f6fc1e7cee..b8650c25c7d 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h @@ -63,7 +63,7 @@ private: bool last_block_is_duplicate = false; using Logger = Poco::Logger; - Logger * log; + Poco::Logger * log; }; } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp index 5fc2e2e3417..de91a5d5940 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp @@ -21,7 +21,7 @@ namespace ErrorCodes ReplicatedMergeTreeCleanupThread::ReplicatedMergeTreeCleanupThread(StorageReplicatedMergeTree & storage_) : storage(storage_) , log_name(storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreeCleanupThread)") - , log(&Logger::get(log_name)) + , log(&Poco::Logger::get(log_name)) { task = storage.global_context.getSchedulePool().createTask(log_name, [this]{ run(); }); } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h index 306ee29a5de..a787f99d907 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h @@ -34,7 +34,7 @@ public: private: StorageReplicatedMergeTree & storage; String log_name; - Logger * log; + Poco::Logger * log; BackgroundSchedulePool::TaskHolder task; pcg64 rng; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp index eeb6b9bddd7..8f99f315620 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp @@ -26,7 +26,7 @@ static const auto PART_CHECK_ERROR_SLEEP_MS = 5 * 1000; ReplicatedMergeTreePartCheckThread::ReplicatedMergeTreePartCheckThread(StorageReplicatedMergeTree & storage_) : storage(storage_) , log_name(storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreePartCheckThread)") - , log(&Logger::get(log_name)) + , log(&Poco::Logger::get(log_name)) { task = storage.global_context.getSchedulePool().createTask(log_name, [this] { run(); }); task->schedule(); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h index 198c9714f64..e86356e1346 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h @@ -77,7 +77,7 @@ private: StorageReplicatedMergeTree & storage; String log_name; - Logger * log; + Poco::Logger * log; using StringSet = std::set; using PartToCheck = std::pair; /// The name of the part and the minimum time to check (or zero, if not important). diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index c78f1f4f97a..4ea7ddda738 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -116,7 +116,7 @@ void ReplicatedMergeTreeQueue::initialize( zookeeper_path = zookeeper_path_; replica_path = replica_path_; logger_name = logger_name_; - log = &Logger::get(logger_name); + log = &Poco::Logger::get(logger_name); addVirtualParts(parts); } @@ -1030,11 +1030,18 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( if (!ignore_max_size && sum_parts_size_in_bytes > max_source_parts_size) { - String reason = "Not executing log entry " + entry.typeToString() + " for part " + entry.new_part_name - + " because source parts size (" + formatReadableSizeWithBinarySuffix(sum_parts_size_in_bytes) - + ") is greater than the current maximum (" + formatReadableSizeWithBinarySuffix(max_source_parts_size) + ")."; - LOG_DEBUG(log, reason); - out_postpone_reason = reason; + const char * format_str = "Not executing log entry {} for part {}" + " because source parts size ({}) is greater than the current maximum ({})."; + + LOG_DEBUG(log, format_str, + entry.typeToString(), entry.new_part_name, + ReadableSize(sum_parts_size_in_bytes), ReadableSize(max_source_parts_size)); + + /// Copy-paste of above because we need structured logging (instead of already formatted message). + out_postpone_reason = fmt::format(format_str, + entry.typeToString(), entry.new_part_name, + ReadableSize(sum_parts_size_in_bytes), ReadableSize(max_source_parts_size)); + return false; } } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index 8e58c8b7af2..4cbb86adb7b 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -56,7 +56,7 @@ private: String zookeeper_path; String replica_path; String logger_name; - Logger * log = nullptr; + Poco::Logger * log = nullptr; /// Protects the queue, future_parts and other queue state variables. mutable std::mutex state_mutex; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index 5b4f4f2a5be..93d652f2be0 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -42,7 +42,7 @@ static String generateActiveNodeIdentifier() ReplicatedMergeTreeRestartingThread::ReplicatedMergeTreeRestartingThread(StorageReplicatedMergeTree & storage_) : storage(storage_) , log_name(storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreeRestartingThread)") - , log(&Logger::get(log_name)) + , log(&Poco::Logger::get(log_name)) , active_node_identifier(generateActiveNodeIdentifier()) { const auto storage_settings = storage.getSettings(); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h index 88c6fe755d1..8641af07476 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h @@ -33,7 +33,7 @@ public: private: StorageReplicatedMergeTree & storage; String log_name; - Logger * log; + Poco::Logger * log; std::atomic need_stop {false}; /// The random data we wrote into `/replicas/me/is_active`. diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index 1072f0f3537..720c7a2d670 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -74,7 +74,7 @@ StorageBuffer::StorageBuffer( , max_thresholds(max_thresholds_) , destination_id(destination_id_) , allow_materialized(allow_materialized_) - , log(&Logger::get("StorageBuffer (" + table_id_.getFullTableName() + ")")) + , log(&Poco::Logger::get("StorageBuffer (" + table_id_.getFullTableName() + ")")) , bg_pool(global_context.getBufferFlushSchedulePool()) { setColumns(columns_); diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 62755c99642..574d93df566 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -278,7 +278,7 @@ StorageDistributed::StorageDistributed( , remote_database(remote_database_) , remote_table(remote_table_) , global_context(std::make_unique(context_)) - , log(&Logger::get("StorageDistributed (" + id_.table_name + ")")) + , log(&Poco::Logger::get("StorageDistributed (" + id_.table_name + ")")) , cluster_name(global_context->getMacros()->expand(cluster_name_)) , has_sharding_key(sharding_key_) , storage_policy(storage_policy_) diff --git a/src/Storages/StorageDistributed.h b/src/Storages/StorageDistributed.h index 5b91c800a1f..a7e3a073af4 100644 --- a/src/Storages/StorageDistributed.h +++ b/src/Storages/StorageDistributed.h @@ -128,7 +128,7 @@ public: ASTPtr remote_table_function_ptr; std::unique_ptr global_context; - Logger * log; + Poco::Logger * log; /// Used to implement TableFunctionRemote. std::shared_ptr owned_cluster; diff --git a/src/Storages/StorageFile.h b/src/Storages/StorageFile.h index 33d8385cf85..fa5034d946c 100644 --- a/src/Storages/StorageFile.h +++ b/src/Storages/StorageFile.h @@ -85,7 +85,7 @@ private: mutable std::shared_mutex rwlock; - Logger * log = &Logger::get("StorageFile"); + Poco::Logger * log = &Poco::Logger::get("StorageFile"); }; } diff --git a/src/Storages/StorageHDFS.h b/src/Storages/StorageHDFS.h index 48defd72911..5b250247b84 100644 --- a/src/Storages/StorageHDFS.h +++ b/src/Storages/StorageHDFS.h @@ -45,7 +45,7 @@ private: Context & context; String compression_method; - Logger * log = &Logger::get("StorageHDFS"); + Poco::Logger * log = &Poco::Logger::get("StorageHDFS"); }; } diff --git a/src/Storages/StorageSet.cpp b/src/Storages/StorageSet.cpp index 4682b873dc5..86bfed5ac84 100644 --- a/src/Storages/StorageSet.cpp +++ b/src/Storages/StorageSet.cpp @@ -191,8 +191,8 @@ void StorageSetOrJoinBase::restoreFromFile(const String & file_path) backup_stream.readSuffix(); /// TODO Add speed, compressed bytes, data volume in memory, compression ratio ... Generalize all statistics logging in project. - LOG_INFO(&Logger::get("StorageSetOrJoinBase"), "Loaded from backup file {}. {} rows, {}. State has {} unique rows.", - file_path, backup_stream.getProfileInfo().rows, formatReadableSizeWithBinarySuffix(backup_stream.getProfileInfo().bytes), getSize()); + LOG_INFO(&Poco::Logger::get("StorageSetOrJoinBase"), "Loaded from backup file {}. {} rows, {}. State has {} unique rows.", + file_path, backup_stream.getProfileInfo().rows, ReadableSize(backup_stream.getProfileInfo().bytes), getSize()); } diff --git a/src/Storages/StorageStripeLog.cpp b/src/Storages/StorageStripeLog.cpp index 5c3e93fc7f8..b61d52657dd 100644 --- a/src/Storages/StorageStripeLog.cpp +++ b/src/Storages/StorageStripeLog.cpp @@ -221,7 +221,7 @@ StorageStripeLog::StorageStripeLog( , table_path(relative_path_) , max_compress_block_size(max_compress_block_size_) , file_checker(disk, table_path + "sizes.json") - , log(&Logger::get("StorageStripeLog")) + , log(&Poco::Logger::get("StorageStripeLog")) { setColumns(columns_); setConstraints(constraints_); diff --git a/src/Storages/StorageStripeLog.h b/src/Storages/StorageStripeLog.h index c85f576bd43..ed8e5da081e 100644 --- a/src/Storages/StorageStripeLog.h +++ b/src/Storages/StorageStripeLog.h @@ -68,7 +68,7 @@ private: FileChecker file_checker; mutable std::shared_mutex rwlock; - Logger * log; + Poco::Logger * log; }; } diff --git a/src/Storages/StorageTinyLog.cpp b/src/Storages/StorageTinyLog.cpp index 5bdfa1fc374..2a62068516e 100644 --- a/src/Storages/StorageTinyLog.cpp +++ b/src/Storages/StorageTinyLog.cpp @@ -334,7 +334,7 @@ StorageTinyLog::StorageTinyLog( , table_path(relative_path_) , max_compress_block_size(max_compress_block_size_) , file_checker(disk, table_path + "sizes.json") - , log(&Logger::get("StorageTinyLog")) + , log(&Poco::Logger::get("StorageTinyLog")) { setColumns(columns_); setConstraints(constraints_); diff --git a/src/Storages/StorageTinyLog.h b/src/Storages/StorageTinyLog.h index a8be3be2435..102ec76fda3 100644 --- a/src/Storages/StorageTinyLog.h +++ b/src/Storages/StorageTinyLog.h @@ -71,7 +71,7 @@ private: FileChecker file_checker; mutable std::shared_mutex rwlock; - Logger * log; + Poco::Logger * log; void addFiles(const String & column_name, const IDataType & type); }; diff --git a/utils/zookeeper-cli/zookeeper-cli.cpp b/utils/zookeeper-cli/zookeeper-cli.cpp index 0a503e77250..6fd7b39ab68 100644 --- a/utils/zookeeper-cli/zookeeper-cli.cpp +++ b/utils/zookeeper-cli/zookeeper-cli.cpp @@ -66,8 +66,8 @@ int main(int argc, char ** argv) } Poco::AutoPtr channel = new Poco::ConsoleChannel(std::cerr); - Logger::root().setChannel(channel); - Logger::root().setLevel("trace"); + Poco::Logger::root().setChannel(channel); + Poco::Logger::root().setLevel("trace"); zkutil::ZooKeeper zk(argv[1]); LineReader lr({}, '\\');