Decrease log level for some s3 messages.

This commit is contained in:
Nikolai Kochetov 2021-12-20 18:18:54 +03:00
parent 9cc1933c54
commit 7dea7b7f76
8 changed files with 22 additions and 22 deletions

View File

@ -67,7 +67,7 @@ std::unique_ptr<ReadBufferFromFileBase> DiskAzureBlobStorage::readFile(
auto settings = current_settings.get();
auto metadata = readMeta(path);
LOG_TRACE(log, "Read from file by path: {}", backQuote(metadata_disk->getPath() + path));
LOG_TEST(log, "Read from file by path: {}", backQuote(metadata_disk->getPath() + path));
bool threadpool_read = read_settings.remote_fs_method == RemoteFSReadMethod::threadpool;

View File

@ -91,7 +91,7 @@ DiskCacheWrapper::readFile(
if (!cache_file_predicate(path))
return DiskDecorator::readFile(path, settings, size);
LOG_DEBUG(log, "Read file {} from cache", backQuote(path));
LOG_TEST(log, "Read file {} from cache", backQuote(path));
if (cache_disk->exists(path))
return cache_disk->readFile(path, settings, size);
@ -105,11 +105,11 @@ DiskCacheWrapper::readFile(
{
/// This thread will responsible for file downloading to cache.
metadata->status = DOWNLOADING;
LOG_DEBUG(log, "File {} doesn't exist in cache. Will download it", backQuote(path));
LOG_TEST(log, "File {} doesn't exist in cache. Will download it", backQuote(path));
}
else if (metadata->status == DOWNLOADING)
{
LOG_DEBUG(log, "Waiting for file {} download to cache", backQuote(path));
LOG_TEST(log, "Waiting for file {} download to cache", backQuote(path));
metadata->condition.wait(lock, [metadata] { return metadata->status == DOWNLOADED || metadata->status == ERROR; });
}
}
@ -134,7 +134,7 @@ DiskCacheWrapper::readFile(
}
cache_disk->moveFile(tmp_path, path);
LOG_DEBUG(log, "File {} downloaded to cache", backQuote(path));
LOG_TEST(log, "File {} downloaded to cache", backQuote(path));
}
catch (...)
{
@ -163,7 +163,7 @@ DiskCacheWrapper::writeFile(const String & path, size_t buf_size, WriteMode mode
if (!cache_file_predicate(path))
return DiskDecorator::writeFile(path, buf_size, mode);
LOG_DEBUG(log, "Write file {} to cache", backQuote(path));
LOG_TRACE(log, "Write file {} to cache", backQuote(path));
auto dir_path = directoryPath(path);
if (!cache_disk->exists(dir_path))

View File

@ -75,7 +75,7 @@ std::unique_ptr<ReadBufferFromFileBase> DiskHDFS::readFile(const String & path,
{
auto metadata = readMeta(path);
LOG_TRACE(log,
LOG_TEST(log,
"Read from file by path: {}. Existing HDFS objects: {}",
backQuote(metadata_disk->getPath() + path), metadata.remote_fs_objects.size());

View File

@ -177,7 +177,7 @@ IDiskRemote::Metadata IDiskRemote::createMeta(const String & path) const
void IDiskRemote::removeMeta(const String & path, RemoteFSPathKeeperPtr fs_paths_keeper)
{
LOG_DEBUG(log, "Remove file by path: {}", backQuote(metadata_disk->getPath() + path));
LOG_TRACE(log, "Remove file by path: {}", backQuote(metadata_disk->getPath() + path));
if (!metadata_disk->isFile(path))
throw Exception(ErrorCodes::CANNOT_DELETE_DIRECTORY, "Path '{}' is a directory", path);
@ -464,7 +464,7 @@ bool IDiskRemote::tryReserve(UInt64 bytes)
std::lock_guard lock(reservation_mutex);
if (bytes == 0)
{
LOG_DEBUG(log, "Reserving 0 bytes on remote_fs disk {}", backQuote(name));
LOG_TRACE(log, "Reserving 0 bytes on remote_fs disk {}", backQuote(name));
++reservation_count;
return true;
}
@ -473,7 +473,7 @@ bool IDiskRemote::tryReserve(UInt64 bytes)
UInt64 unreserved_space = available_space - std::min(available_space, reserved_bytes);
if (unreserved_space >= bytes)
{
LOG_DEBUG(log, "Reserving {} on disk {}, having unreserved {}.",
LOG_TRACE(log, "Reserving {} on disk {}, having unreserved {}.",
ReadableSize(bytes), backQuote(name), ReadableSize(unreserved_space));
++reservation_count;
reserved_bytes += bytes;

View File

@ -219,7 +219,7 @@ std::unique_ptr<ReadBufferFromFileBase> DiskS3::readFile(const String & path, co
auto settings = current_settings.get();
auto metadata = readMeta(path);
LOG_TRACE(log, "Read from file by path: {}. Existing S3 objects: {}",
LOG_TEST(log, "Read from file by path: {}. Existing S3 objects: {}",
backQuote(metadata_disk->getPath() + path), metadata.remote_fs_objects.size());
bool threadpool_read = read_settings.remote_fs_method == RemoteFSReadMethod::threadpool;

View File

@ -235,12 +235,12 @@ std::unique_ptr<ReadBuffer> ReadBufferFromS3::initialize()
throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to read beyond right offset ({} > {})", offset, read_until_position - 1);
req.SetRange(fmt::format("bytes={}-{}", offset, read_until_position - 1));
LOG_DEBUG(log, "Read S3 object. Bucket: {}, Key: {}, Range: {}-{}", bucket, key, offset, read_until_position - 1);
LOG_TEST(log, "Read S3 object. Bucket: {}, Key: {}, Range: {}-{}", bucket, key, offset, read_until_position - 1);
}
else
{
req.SetRange(fmt::format("bytes={}-", offset));
LOG_DEBUG(log, "Read S3 object. Bucket: {}, Key: {}, Offset: {}", bucket, key, offset);
LOG_TEST(log, "Read S3 object. Bucket: {}, Key: {}, Offset: {}", bucket, key, offset);
}
Aws::S3::Model::GetObjectOutcome outcome = client_ptr->GetObject(req);

View File

@ -119,7 +119,7 @@ void PocoHTTPClient::makeRequestInternal(
Poco::Logger * log = &Poco::Logger::get("AWSClient");
auto uri = request.GetUri().GetURIString();
LOG_DEBUG(log, "Make request to: {}", uri);
LOG_TEST(log, "Make request to: {}", uri);
enum class S3MetricType
{
@ -251,7 +251,7 @@ void PocoHTTPClient::makeRequestInternal(
if (request.GetContentBody())
{
LOG_TRACE(log, "Writing request body.");
LOG_TEST(log, "Writing request body.");
if (attempt > 0) /// rewind content body buffer.
{
@ -259,24 +259,24 @@ void PocoHTTPClient::makeRequestInternal(
request.GetContentBody()->seekg(0);
}
auto size = Poco::StreamCopier::copyStream(*request.GetContentBody(), request_body_stream);
LOG_DEBUG(log, "Written {} bytes to request body", size);
LOG_TEST(log, "Written {} bytes to request body", size);
}
LOG_TRACE(log, "Receiving response...");
LOG_TEST(log, "Receiving response...");
auto & response_body_stream = session->receiveResponse(poco_response);
watch.stop();
ProfileEvents::increment(select_metric(S3MetricType::Microseconds), watch.elapsedMicroseconds());
int status_code = static_cast<int>(poco_response.getStatus());
LOG_DEBUG(log, "Response status: {}, {}", status_code, poco_response.getReason());
LOG_TEST(log, "Response status: {}, {}", status_code, poco_response.getReason());
if (poco_response.getStatus() == Poco::Net::HTTPResponse::HTTP_TEMPORARY_REDIRECT)
{
auto location = poco_response.get("location");
remote_host_filter.checkURL(Poco::URI(location));
uri = location;
LOG_DEBUG(log, "Redirecting request to new location: {}", location);
LOG_TEST(log, "Redirecting request to new location: {}", location);
ProfileEvents::increment(select_metric(S3MetricType::Redirects));
@ -292,7 +292,7 @@ void PocoHTTPClient::makeRequestInternal(
response->AddHeader(header_name, header_value);
headers_ss << header_name << ": " << header_value << "; ";
}
LOG_DEBUG(log, "Received headers: {}", headers_ss.str());
LOG_TEST(log, "Received headers: {}", headers_ss.str());
if (status_code == 429 || status_code == 503)
{ // API throttling

View File

@ -51,8 +51,8 @@ const std::pair<DB::LogsLevel, Poco::Message::Priority> & convertLogLevel(Aws::U
{Aws::Utils::Logging::LogLevel::Error, {DB::LogsLevel::error, Poco::Message::PRIO_ERROR}},
{Aws::Utils::Logging::LogLevel::Warn, {DB::LogsLevel::warning, Poco::Message::PRIO_WARNING}},
{Aws::Utils::Logging::LogLevel::Info, {DB::LogsLevel::information, Poco::Message::PRIO_INFORMATION}},
{Aws::Utils::Logging::LogLevel::Debug, {DB::LogsLevel::debug, Poco::Message::PRIO_DEBUG}},
{Aws::Utils::Logging::LogLevel::Trace, {DB::LogsLevel::trace, Poco::Message::PRIO_TRACE}},
{Aws::Utils::Logging::LogLevel::Debug, {DB::LogsLevel::debug, Poco::Message::PRIO_TEST}},
{Aws::Utils::Logging::LogLevel::Trace, {DB::LogsLevel::trace, Poco::Message::PRIO_TEST}},
};
return mapping.at(log_level);
}