diff --git a/src/Disks/DiskObjectStorage.cpp b/src/Disks/DiskObjectStorage.cpp index 8f472c713b7..01f01fdeaa4 100644 --- a/src/Disks/DiskObjectStorage.cpp +++ b/src/Disks/DiskObjectStorage.cpp @@ -29,7 +29,8 @@ namespace ErrorCodes extern const int FILE_DOESNT_EXIST; extern const int BAD_FILE_TYPE; extern const int MEMORY_LIMIT_EXCEEDED; - extern const int SUPPORT_IS_DISABLED; + extern const int BAD_ARGUMENTS; + extern const int LOGICAL_ERROR; } static String revisionToString(UInt64 revision) diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h index cf8b1a09ce9..e4a0b84448c 100644 --- a/src/Disks/IDisk.h +++ b/src/Disks/IDisk.h @@ -290,7 +290,7 @@ public: virtual bool isReadOnly() const { return false; } - /// Check if disk is broken. Broken disks will have 0 space and connot be used. + /// Check if disk is broken. Broken disks will have 0 space and cannot be used. virtual bool isBroken() const { return false; } /// Invoked when Global Context is shutdown. diff --git a/src/Disks/S3/diskSettings.cpp b/src/Disks/S3/diskSettings.cpp index c4cd3253a21..579f160abd4 100644 --- a/src/Disks/S3/diskSettings.cpp +++ b/src/Disks/S3/diskSettings.cpp @@ -5,6 +5,11 @@ namespace DB { +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + std::unique_ptr getSettings(const Poco::Util::AbstractConfiguration & config, const String & config_prefix, ContextPtr context) { S3Settings::ReadWriteSettings rw_settings; diff --git a/src/Disks/S3ObjectStorage.cpp b/src/Disks/S3ObjectStorage.cpp index 0a7bd45d546..e6c3d357265 100644 --- a/src/Disks/S3ObjectStorage.cpp +++ b/src/Disks/S3ObjectStorage.cpp @@ -35,10 +35,6 @@ namespace DB namespace ErrorCodes { extern const int S3_ERROR; - extern const int FILE_ALREADY_EXISTS; - extern const int UNKNOWN_FORMAT; - extern const int BAD_ARGUMENTS; - extern const int LOGICAL_ERROR; } namespace @@ -82,14 +78,10 @@ bool S3ObjectStorage::exists(const std::string & path) const if (!object_head.IsSuccess()) { if (object_head.GetError().GetErrorType() == Aws::S3::S3Errors::RESOURCE_NOT_FOUND) - { - LOG_DEBUG(&Poco::Logger::get("DEBUG"), "OBJECT DOESNT {} EXISTS", path); return false; - } throwIfError(object_head); } - LOG_DEBUG(&Poco::Logger::get("DEBUG"), "OBJECT {} EXISTS", path); return true; } @@ -102,31 +94,31 @@ std::unique_ptr S3ObjectStorage::readObjects( /// NOLINT std::optional) const { - ReadSettings disk_read_settings{read_settings}; - if (cache) - { - if (IFileCache::isReadOnly()) - disk_read_settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = true; + ReadSettings disk_read_settings{read_settings}; + if (cache) + { + if (IFileCache::isReadOnly()) + disk_read_settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = true; - disk_read_settings.remote_fs_cache = cache; - } + disk_read_settings.remote_fs_cache = cache; + } - auto settings_ptr = s3_settings.get(); + auto settings_ptr = s3_settings.get(); - auto s3_impl = std::make_unique( - client.get(), bucket, version_id, common_path_prefix, blobs_to_read, - settings_ptr->s3_settings.max_single_read_retries, disk_read_settings); + auto s3_impl = std::make_unique( + client.get(), bucket, version_id, common_path_prefix, blobs_to_read, + settings_ptr->s3_settings.max_single_read_retries, disk_read_settings); - if (read_settings.remote_fs_method == RemoteFSReadMethod::threadpool) - { - auto reader = getThreadPoolReader(); - return std::make_unique(reader, disk_read_settings, std::move(s3_impl)); - } - else - { - auto buf = std::make_unique(std::move(s3_impl)); - return std::make_unique(std::move(buf), settings_ptr->min_bytes_for_seek); - } + if (read_settings.remote_fs_method == RemoteFSReadMethod::threadpool) + { + auto reader = getThreadPoolReader(); + return std::make_unique(reader, disk_read_settings, std::move(s3_impl)); + } + else + { + auto buf = std::make_unique(std::move(s3_impl)); + return std::make_unique(std::move(buf), settings_ptr->min_bytes_for_seek); + } } std::unique_ptr S3ObjectStorage::readObject( /// NOLINT @@ -135,8 +127,8 @@ std::unique_ptr S3ObjectStorage::readObject( /// NOLINT std::optional, std::optional) const { - auto settings_ptr = s3_settings.get(); - return std::make_unique(client.get(), bucket, path, version_id, settings_ptr->s3_settings.max_single_read_retries, read_settings); + auto settings_ptr = s3_settings.get(); + return std::make_unique(client.get(), bucket, path, version_id, settings_ptr->s3_settings.max_single_read_retries, read_settings); } diff --git a/src/Disks/S3ObjectStorage.h b/src/Disks/S3ObjectStorage.h index 7632a643130..81595d4385d 100644 --- a/src/Disks/S3ObjectStorage.h +++ b/src/Disks/S3ObjectStorage.h @@ -56,7 +56,7 @@ public: , s3_settings(std::move(s3_settings_)) , version_id(std::move(version_id_)) {} - + bool exists(const std::string & path) const override; std::unique_ptr readObject( /// NOLINT diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py index c7041c05403..38553f27ac1 100644 --- a/tests/integration/test_merge_tree_s3/test.py +++ b/tests/integration/test_merge_tree_s3/test.py @@ -229,7 +229,9 @@ def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name): assert ( node.query("SELECT count(distinct(id)) FROM s3_test FORMAT Values") == "(8192)" ) - wait_for_delete_s3_objects(cluster, FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD, timeout=45) + wait_for_delete_s3_objects( + cluster, FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD, timeout=45 + ) @pytest.mark.parametrize("node_name", ["node"])