mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 09:32:01 +00:00
Merge pull request #41649 from ClickHouse/supress_one_more_key_doesnt_exists
Release AWS SDK log level + replace one exception
This commit is contained in:
commit
57be648984
@ -71,12 +71,14 @@ const char * S3_LOGGER_TAG_NAMES[][2] = {
|
||||
|
||||
const std::pair<DB::LogsLevel, Poco::Message::Priority> & convertLogLevel(Aws::Utils::Logging::LogLevel log_level)
|
||||
{
|
||||
/// We map levels to our own logger 1 to 1 except WARN+ levels. In most cases we failover such errors with retries
|
||||
/// and don't want to see them as Errors in our logs.
|
||||
static const std::unordered_map<Aws::Utils::Logging::LogLevel, std::pair<DB::LogsLevel, Poco::Message::Priority>> mapping =
|
||||
{
|
||||
{Aws::Utils::Logging::LogLevel::Off, {DB::LogsLevel::none, Poco::Message::PRIO_FATAL}},
|
||||
{Aws::Utils::Logging::LogLevel::Fatal, {DB::LogsLevel::error, Poco::Message::PRIO_FATAL}},
|
||||
{Aws::Utils::Logging::LogLevel::Error, {DB::LogsLevel::error, Poco::Message::PRIO_ERROR}},
|
||||
{Aws::Utils::Logging::LogLevel::Warn, {DB::LogsLevel::warning, Poco::Message::PRIO_WARNING}},
|
||||
{Aws::Utils::Logging::LogLevel::Off, {DB::LogsLevel::none, Poco::Message::PRIO_INFORMATION}},
|
||||
{Aws::Utils::Logging::LogLevel::Fatal, {DB::LogsLevel::information, Poco::Message::PRIO_INFORMATION}},
|
||||
{Aws::Utils::Logging::LogLevel::Error, {DB::LogsLevel::information, Poco::Message::PRIO_INFORMATION}},
|
||||
{Aws::Utils::Logging::LogLevel::Warn, {DB::LogsLevel::information, Poco::Message::PRIO_INFORMATION}},
|
||||
{Aws::Utils::Logging::LogLevel::Info, {DB::LogsLevel::information, Poco::Message::PRIO_INFORMATION}},
|
||||
{Aws::Utils::Logging::LogLevel::Debug, {DB::LogsLevel::debug, Poco::Message::PRIO_TEST}},
|
||||
{Aws::Utils::Logging::LogLevel::Trace, {DB::LogsLevel::trace, Poco::Message::PRIO_TEST}},
|
||||
|
@ -962,14 +962,32 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDiskRemoteMeta(
|
||||
}
|
||||
|
||||
assertEOF(in);
|
||||
MergeTreeData::MutableDataPartPtr new_data_part;
|
||||
try
|
||||
{
|
||||
data_part_storage_builder->commit();
|
||||
|
||||
data_part_storage_builder->commit();
|
||||
new_data_part = data.createPart(part_name, data_part_storage);
|
||||
new_data_part->version.setCreationTID(Tx::PrehistoricTID, nullptr);
|
||||
new_data_part->is_temp = true;
|
||||
new_data_part->modification_time = time(nullptr);
|
||||
|
||||
MergeTreeData::MutableDataPartPtr new_data_part = data.createPart(part_name, data_part_storage);
|
||||
new_data_part->version.setCreationTID(Tx::PrehistoricTID, nullptr);
|
||||
new_data_part->is_temp = true;
|
||||
new_data_part->modification_time = time(nullptr);
|
||||
new_data_part->loadColumnsChecksumsIndexes(true, false);
|
||||
new_data_part->loadColumnsChecksumsIndexes(true, false);
|
||||
}
|
||||
#if USE_AWS_S3
|
||||
catch (const S3Exception & ex)
|
||||
{
|
||||
if (ex.getS3ErrorCode() == Aws::S3::S3Errors::NO_SUCH_KEY)
|
||||
{
|
||||
throw Exception(ErrorCodes::S3_ERROR, "Cannot fetch part {} because we lost lock and it was concurrently removed", part_name);
|
||||
}
|
||||
throw;
|
||||
}
|
||||
#endif
|
||||
catch (...) /// Redundant catch, just to be able to add first one with #if
|
||||
{
|
||||
throw;
|
||||
}
|
||||
|
||||
data.lockSharedData(*new_data_part, /* replace_existing_lock = */ true, {});
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user