Removed infinite retries.

This commit is contained in:
Vladimir Chebotarev 2021-04-19 11:02:36 +03:00
parent f3e152739b
commit 9d4295f261
9 changed files with 19 additions and 19 deletions

View File

@ -70,7 +70,7 @@ class IColumn;
M(UInt64, connections_with_failover_max_tries, DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES, "The maximum number of attempts to connect to replicas.", 0) \
M(UInt64, s3_min_upload_part_size, 512*1024*1024, "The minimum size of part to upload during multipart upload to S3.", 0) \
M(UInt64, s3_max_single_part_upload_size, 64*1024*1024, "The maximum size of object to upload using singlepart upload to S3.", 0) \
M(Int64, s3_max_single_read_retries, 16, "The maximum number of retries during single S3 read.", 0) \
M(UInt64, s3_max_single_read_retries, 4, "The maximum number of retries during single S3 read.", 0) \
M(UInt64, s3_max_redirects, 10, "Max number of S3 redirects hops allowed.", 0) \
M(UInt64, s3_max_connections, 1024, "The maximum number of connections per server.", 0) \
M(Bool, extremes, false, "Calculate minimums and maximums of the result columns. They can be output in JSON-formats.", IMPORTANT) \

View File

@ -249,7 +249,7 @@ class ReadIndirectBufferFromS3 final : public ReadBufferFromFileBase
{
public:
ReadIndirectBufferFromS3(
std::shared_ptr<Aws::S3::S3Client> client_ptr_, const String & bucket_, DiskS3::Metadata metadata_, Int64 s3_max_single_read_retries_, size_t buf_size_)
std::shared_ptr<Aws::S3::S3Client> client_ptr_, const String & bucket_, DiskS3::Metadata metadata_, UInt64 s3_max_single_read_retries_, size_t buf_size_)
: client_ptr(std::move(client_ptr_))
, bucket(bucket_)
, metadata(std::move(metadata_))
@ -350,7 +350,7 @@ private:
std::shared_ptr<Aws::S3::S3Client> client_ptr;
const String & bucket;
DiskS3::Metadata metadata;
Int64 s3_max_single_read_retries;
UInt64 s3_max_single_read_retries;
size_t buf_size;
size_t absolute_position = 0;
@ -564,7 +564,7 @@ DiskS3::DiskS3(
String bucket_,
String s3_root_path_,
String metadata_path_,
Int64 s3_max_single_read_retries_,
UInt64 s3_max_single_read_retries_,
size_t min_upload_part_size_,
size_t max_single_part_upload_size_,
size_t min_bytes_for_seek_,

View File

@ -40,7 +40,7 @@ public:
String bucket_,
String s3_root_path_,
String metadata_path_,
Int64 s3_max_single_read_retries_,
UInt64 s3_max_single_read_retries_,
size_t min_upload_part_size_,
size_t max_single_part_upload_size_,
size_t min_bytes_for_seek_,
@ -181,7 +181,7 @@ private:
const String bucket;
const String s3_root_path;
String metadata_path;
Int64 s3_max_single_read_retries;
UInt64 s3_max_single_read_retries;
size_t min_upload_part_size;
size_t max_single_part_upload_size;
size_t min_bytes_for_seek;

View File

@ -31,7 +31,7 @@ namespace ErrorCodes
ReadBufferFromS3::ReadBufferFromS3(
std::shared_ptr<Aws::S3::S3Client> client_ptr_, const String & bucket_, const String & key_, Int64 s3_max_single_read_retries_, size_t buffer_size_)
std::shared_ptr<Aws::S3::S3Client> client_ptr_, const String & bucket_, const String & key_, UInt64 s3_max_single_read_retries_, size_t buffer_size_)
: SeekableReadBuffer(nullptr, 0)
, client_ptr(std::move(client_ptr_))
, bucket(bucket_)
@ -52,7 +52,7 @@ bool ReadBufferFromS3::nextImpl()
Stopwatch watch;
bool next_result = false;
for (Int64 attempt = s3_max_single_read_retries; s3_max_single_read_retries < 0 || attempt >= 0; --attempt)
for (Int64 attempt = static_cast<Int64>(s3_max_single_read_retries); attempt >= 0; --attempt)
{
if (!impl)
impl = initialize();

View File

@ -27,7 +27,7 @@ private:
std::shared_ptr<Aws::S3::S3Client> client_ptr;
String bucket;
String key;
Int64 s3_max_single_read_retries;
UInt64 s3_max_single_read_retries;
size_t buffer_size;
off_t offset = 0;
Aws::S3::Model::GetObjectResult read_result;
@ -40,7 +40,7 @@ public:
std::shared_ptr<Aws::S3::S3Client> client_ptr_,
const String & bucket_,
const String & key_,
Int64 s3_max_single_read_retries_,
UInt64 s3_max_single_read_retries_,
size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE);
bool nextImpl() override;

View File

@ -166,7 +166,7 @@ StorageS3Source::StorageS3Source(
ContextPtr context_,
const ColumnsDescription & columns_,
UInt64 max_block_size_,
Int64 s3_max_single_read_retries_,
UInt64 s3_max_single_read_retries_,
const String compression_hint_,
const std::shared_ptr<Aws::S3::S3Client> & client_,
const String & bucket_,
@ -314,7 +314,7 @@ StorageS3::StorageS3(
const String & secret_access_key_,
const StorageID & table_id_,
const String & format_name_,
Int64 s3_max_single_read_retries_,
UInt64 s3_max_single_read_retries_,
UInt64 min_upload_part_size_,
UInt64 max_single_part_upload_size_,
UInt64 max_connections_,
@ -478,10 +478,10 @@ void registerStorageS3Impl(const String & name, StorageFactory & factory)
secret_access_key = engine_args[2]->as<ASTLiteral &>().value.safeGet<String>();
}
UInt64 s3_max_single_read_retries = args.getLocalContext()->getSettingsRef().s3_max_single_read_retries;
UInt64 min_upload_part_size = args.getLocalContext()->getSettingsRef().s3_min_upload_part_size;
UInt64 max_single_part_upload_size = args.getLocalContext()->getSettingsRef().s3_max_single_part_upload_size;
UInt64 max_connections = args.getLocalContext()->getSettingsRef().s3_max_connections;
Int64 s3_max_single_read_retries = args.getLocalContext()->getSettingsRef().s3_max_single_read_retries;
String compression_method;
String format_name;

View File

@ -55,7 +55,7 @@ public:
ContextPtr context_,
const ColumnsDescription & columns_,
UInt64 max_block_size_,
Int64 s3_max_single_read_retries_,
UInt64 s3_max_single_read_retries_,
const String compression_hint_,
const std::shared_ptr<Aws::S3::S3Client> & client_,
const String & bucket,
@ -72,7 +72,7 @@ private:
String format;
ColumnsDescription columns_desc;
UInt64 max_block_size;
Int64 s3_max_single_read_retries;
UInt64 s3_max_single_read_retries;
String compression_hint;
std::shared_ptr<Aws::S3::S3Client> client;
Block sample_block;
@ -102,7 +102,7 @@ public:
const String & secret_access_key,
const StorageID & table_id_,
const String & format_name_,
Int64 s3_max_single_read_retries_,
UInt64 s3_max_single_read_retries_,
UInt64 min_upload_part_size_,
UInt64 max_single_part_upload_size_,
UInt64 max_connections_,
@ -148,7 +148,7 @@ private:
ClientAuthentificaiton client_auth;
String format_name;
Int64 s3_max_single_read_retries;
UInt64 s3_max_single_read_retries;
size_t min_upload_part_size;
size_t max_single_part_upload_size;
String compression_method;

View File

@ -83,10 +83,10 @@ StoragePtr TableFunctionS3::executeImpl(const ASTPtr & /*ast_function*/, Context
{
Poco::URI uri (filename);
S3::URI s3_uri (uri);
UInt64 s3_max_single_read_retries = context->getSettingsRef().s3_max_single_read_retries;
UInt64 min_upload_part_size = context->getSettingsRef().s3_min_upload_part_size;
UInt64 max_single_part_upload_size = context->getSettingsRef().s3_max_single_part_upload_size;
UInt64 max_connections = context->getSettingsRef().s3_max_connections;
Int64 s3_max_single_read_retries = context->getSettingsRef().s3_max_single_read_retries;
StoragePtr storage = StorageS3::create(
s3_uri,

View File

@ -109,10 +109,10 @@ StoragePtr TableFunctionS3Cluster::executeImpl(
Poco::URI uri (filename);
S3::URI s3_uri (uri);
/// Actually this parameters are not used
UInt64 s3_max_single_read_retries = context->getSettingsRef().s3_max_single_read_retries;
UInt64 min_upload_part_size = context->getSettingsRef().s3_min_upload_part_size;
UInt64 max_single_part_upload_size = context->getSettingsRef().s3_max_single_part_upload_size;
UInt64 max_connections = context->getSettingsRef().s3_max_connections;
Int64 s3_max_single_read_retries = context->getSettingsRef().s3_max_single_read_retries;
storage = StorageS3::create(
s3_uri, access_key_id, secret_access_key, StorageID(getDatabaseName(), table_name),
format,