From cbcf267f5526ab7b72383071fcd86f646e0b4e6e Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 5 Sep 2022 22:54:38 +0200 Subject: [PATCH 1/9] One more line of logging in write buffer from S3 --- src/IO/WriteBufferFromS3.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index 7646e2514a5..2fb6502b0c4 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -174,6 +174,8 @@ void WriteBufferFromS3::finalizeImpl() if (!response.IsSuccess()) throw Exception(ErrorCodes::S3_ERROR, "Object {} from bucket {} disappeared immediately after upload, it's a bug in S3 or S3 API.", key, bucket); + else + LOG_TRACE(log, "Object {} exists after upload", key); } } From 8977af51505719a56ad65a9f9bc4bbfda7bfb16a Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 5 Sep 2022 22:59:25 +0200 Subject: [PATCH 2/9] Remove some outdated assertions --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 24 ------------------- 1 file changed, 24 deletions(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index a52de88321c..d89e3637a6f 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -532,25 +532,6 @@ void IMergeTreeDataPart::removeIfNeeded() LOG_TRACE(storage.log, "Removed part from old location {}", path); } } - catch (const Exception & ex) - { - tryLogCurrentException(__PRETTY_FUNCTION__, fmt::format("while removing part {} with path {}", name, path)); - - /// In this case we want to avoid assertions, because such errors are unavoidable in setup - /// with zero-copy replication. - if (const auto * keeper_exception = dynamic_cast(&ex)) - { - if (Coordination::isHardwareError(keeper_exception->code)) - return; - } - - /// FIXME If part it temporary, then directory will not be removed for 1 day (temporary_directories_lifetime). - /// If it's tmp_merge_ or tmp_fetch_, - /// then all future attempts to execute part producing operation will fail with "directory already exists". - assert(!is_temp); - assert(state != MergeTreeDataPartState::DeleteOnDestroy); - assert(state != MergeTreeDataPartState::Temporary); - } catch (...) { tryLogCurrentException(__PRETTY_FUNCTION__, fmt::format("while removing part {} with path {}", name, path)); @@ -558,11 +539,6 @@ void IMergeTreeDataPart::removeIfNeeded() /// FIXME If part it temporary, then directory will not be removed for 1 day (temporary_directories_lifetime). /// If it's tmp_merge_ or tmp_fetch_, /// then all future attempts to execute part producing operation will fail with "directory already exists". - /// - /// For remote disks this issue is really frequent, so we don't about server here - assert(!is_temp); - assert(state != MergeTreeDataPartState::DeleteOnDestroy); - assert(state != MergeTreeDataPartState::Temporary); } } From 6ea7f1e0118af6d667dd03e10b76bb7fbb1bfa37 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 6 Sep 2022 13:59:55 +0200 Subject: [PATCH 3/9] Better exception handling for ReadBufferFromS3 --- .../ObjectStorages/S3/S3ObjectStorage.cpp | 4 +-- src/IO/ReadBufferFromS3.cpp | 14 ++++++-- src/IO/S3Common.cpp | 20 +++++++++++ src/IO/S3Common.h | 36 +++++++++++++++++-- src/IO/WriteBufferFromS3.cpp | 9 ++--- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- 6 files changed, 73 insertions(+), 12 deletions(-) diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index b3fa36ea169..08eba4758a0 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -56,7 +56,7 @@ void throwIfError(const Aws::Utils::Outcome & response) if (!response.IsSuccess()) { const auto & err = response.GetError(); - throw Exception(ErrorCodes::S3_ERROR, "{} (Code: {})", err.GetMessage(), static_cast(err.GetErrorType())); + throw S3Exception(fmt::format("{} (Code: {})", err.GetMessage(), static_cast(err.GetErrorType())), err.GetErrorType()); } } @@ -70,7 +70,7 @@ void throwIfUnexpectedError(const Aws::Utils::Outcome & response, if (!response.IsSuccess() && (!if_exists || !isNotFoundError(response.GetError().GetErrorType()))) { const auto & err = response.GetError(); - throw Exception(ErrorCodes::S3_ERROR, "{} (Code: {})", err.GetMessage(), static_cast(err.GetErrorType())); + throw S3Exception(fmt::format("{} (Code: {})", err.GetMessage(), static_cast(err.GetErrorType())), err.GetErrorType()); } } diff --git a/src/IO/ReadBufferFromS3.cpp b/src/IO/ReadBufferFromS3.cpp index f7fd06cac08..38b59dae186 100644 --- a/src/IO/ReadBufferFromS3.cpp +++ b/src/IO/ReadBufferFromS3.cpp @@ -130,12 +130,19 @@ bool ReadBufferFromS3::nextImpl() ProfileEvents::increment(ProfileEvents::ReadBufferFromS3Microseconds, watch.elapsedMicroseconds()); break; } - catch (const Exception & e) + catch (const S3Exception & e) { watch.stop(); ProfileEvents::increment(ProfileEvents::ReadBufferFromS3Microseconds, watch.elapsedMicroseconds()); ProfileEvents::increment(ProfileEvents::ReadBufferFromS3RequestsErrors, 1); + /// It doesn't make sense to retry Access Denied or No Such Key + if (!e.isRetryableError()) + { + tryLogCurrentException(log); + throw; + } + LOG_DEBUG( log, "Caught exception while reading S3 object. Bucket: {}, Key: {}, Version: {}, Offset: {}, Attempt: {}, Message: {}", @@ -306,7 +313,10 @@ std::unique_ptr ReadBufferFromS3::initialize() return std::make_unique(read_result.GetBody(), buffer_size); } else - throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR); + { + const auto & error = outcome.GetError(); + throw S3Exception(error.GetMessage(), error.GetErrorType()); + } } SeekableReadBufferPtr ReadBufferS3Factory::getReader() diff --git a/src/IO/S3Common.cpp b/src/IO/S3Common.cpp index 1ff1c609952..2e2f5078769 100644 --- a/src/IO/S3Common.cpp +++ b/src/IO/S3Common.cpp @@ -35,6 +35,26 @@ # include +namespace DB +{ + +bool S3Exception::isRetryableError() const +{ + /// Looks like these list is quite conservative, add more codes if you wish + static const std::unordered_set unretryable_errors = { + Aws::S3::S3Errors::NO_SUCH_KEY, + Aws::S3::S3Errors::ACCESS_DENIED, + Aws::S3::S3Errors::INVALID_ACCESS_KEY_ID, + Aws::S3::S3Errors::INVALID_SIGNATURE, + Aws::S3::S3Errors::NO_SUCH_UPLOAD, + Aws::S3::S3Errors::NO_SUCH_BUCKET, + }; + + return !unretryable_errors.contains(code); +} + +} + namespace { diff --git a/src/IO/S3Common.h b/src/IO/S3Common.h index 46a09ee8901..b190f0afdc5 100644 --- a/src/IO/S3Common.h +++ b/src/IO/S3Common.h @@ -7,23 +7,53 @@ #include #include #include +#include #include #include +#include + namespace Aws::S3 { class S3Client; } + namespace DB { - class RemoteHostFilter; - struct HttpHeader; - using HeaderCollection = std::vector; +namespace ErrorCodes +{ + extern const int S3_ERROR; } +class RemoteHostFilter; +struct HttpHeader; +using HeaderCollection = std::vector; + +class S3Exception : public Exception +{ +public: + S3Exception(const std::string & msg, const Aws::S3::S3Errors code_) + : Exception(msg, ErrorCodes::S3_ERROR) + , code(code_) + {} + + Aws::S3::S3Errors getS3ErrorCode() const + { + return code; + } + + bool isRetryableError() const; + +private: + const Aws::S3::S3Errors code; +}; +} + + namespace DB::S3 { + class ClientFactory { public: diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index 2fb6502b0c4..cc46af361cd 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -173,7 +174,7 @@ void WriteBufferFromS3::finalizeImpl() auto response = client_ptr->HeadObject(request); if (!response.IsSuccess()) - throw Exception(ErrorCodes::S3_ERROR, "Object {} from bucket {} disappeared immediately after upload, it's a bug in S3 or S3 API.", key, bucket); + throw S3Exception(fmt::format("Object {} from bucket {} disappeared immediately after upload, it's a bug in S3 or S3 API.", key, bucket), response.GetError().GetErrorType()); else LOG_TRACE(log, "Object {} exists after upload", key); } @@ -199,7 +200,7 @@ void WriteBufferFromS3::createMultipartUpload() LOG_TRACE(log, "Multipart upload has created. Bucket: {}, Key: {}, Upload id: {}", bucket, key, multipart_upload_id); } else - throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR); + throw S3Exception(outcome.GetError().GetMessage(), outcome.GetError().GetErrorType()); } void WriteBufferFromS3::writePart() @@ -311,7 +312,7 @@ void WriteBufferFromS3::processUploadRequest(UploadPartTask & task) LOG_TRACE(log, "Writing part finished. Bucket: {}, Key: {}, Upload_id: {}, Etag: {}, Parts: {}", bucket, key, multipart_upload_id, task.tag, part_tags.size()); } else - throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR); + throw S3Exception(outcome.GetError().GetMessage(), outcome.GetError().GetErrorType()); total_parts_uploaded++; } @@ -432,7 +433,7 @@ void WriteBufferFromS3::processPutRequest(const PutObjectTask & task) if (outcome.IsSuccess()) LOG_TRACE(log, "Single part upload has completed. Bucket: {}, Key: {}, Object size: {}, WithPool: {}", bucket, key, task.req.GetContentLength(), with_pool); else - throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR); + throw S3Exception(outcome.GetError().GetMessage(), outcome.GetError().GetErrorType()); } void WriteBufferFromS3::waitForReadyBackGroundTasks() diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 762c3d52627..421aa33db97 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -7654,7 +7654,7 @@ std::pair StorageReplicatedMergeTree::unlockSharedDataByID( } else { - LOG_TRACE(logger, "Can't remove parent zookeeper lock {} for part {}, because children {} ({}) were concurrently created", + LOG_TRACE(logger, "Can't remove parent zookeeper lock {} for part {}, because children {} ({}) exists", zookeeper_part_node, part_name, children.size(), fmt::join(children, ", ")); } } From 422b1658eba9de10b176a75b2178f5688b3aa0f9 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 6 Sep 2022 14:42:48 +0200 Subject: [PATCH 4/9] Review fix --- src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp | 2 +- src/IO/S3Common.h | 11 ++++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index 08eba4758a0..45304ac2fac 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -70,7 +70,7 @@ void throwIfUnexpectedError(const Aws::Utils::Outcome & response, if (!response.IsSuccess() && (!if_exists || !isNotFoundError(response.GetError().GetErrorType()))) { const auto & err = response.GetError(); - throw S3Exception(fmt::format("{} (Code: {})", err.GetMessage(), static_cast(err.GetErrorType())), err.GetErrorType()); + throw S3Exception(err.GetErrorType(), "{} (Code: {})", err.GetMessage(), static_cast(err.GetErrorType())); } } diff --git a/src/IO/S3Common.h b/src/IO/S3Common.h index b190f0afdc5..3b1e3d82bc9 100644 --- a/src/IO/S3Common.h +++ b/src/IO/S3Common.h @@ -33,7 +33,16 @@ using HeaderCollection = std::vector; class S3Exception : public Exception { public: - S3Exception(const std::string & msg, const Aws::S3::S3Errors code_) + + // Format message with fmt::format, like the logging functions. + template + S3Exception(Aws::S3::S3Errors code_, fmt::format_string fmt, Args &&... args) + : Exception(fmt::format(fmt, std::forward(args)...), ErrorCodes::S3_ERROR) + , code(code_) + { + } + + S3Exception(const std::string & msg, Aws::S3::S3Errors code_) : Exception(msg, ErrorCodes::S3_ERROR) , code(code_) {} From ceed9f418bb22319308bac2f5fa134e249008390 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 6 Sep 2022 18:22:44 +0200 Subject: [PATCH 5/9] Return better errors handling --- src/IO/ReadBufferFromS3.cpp | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/IO/ReadBufferFromS3.cpp b/src/IO/ReadBufferFromS3.cpp index 38b59dae186..e8f2b555bd4 100644 --- a/src/IO/ReadBufferFromS3.cpp +++ b/src/IO/ReadBufferFromS3.cpp @@ -130,14 +130,24 @@ bool ReadBufferFromS3::nextImpl() ProfileEvents::increment(ProfileEvents::ReadBufferFromS3Microseconds, watch.elapsedMicroseconds()); break; } - catch (const S3Exception & e) + catch (const Exception & e) { watch.stop(); ProfileEvents::increment(ProfileEvents::ReadBufferFromS3Microseconds, watch.elapsedMicroseconds()); ProfileEvents::increment(ProfileEvents::ReadBufferFromS3RequestsErrors, 1); - /// It doesn't make sense to retry Access Denied or No Such Key - if (!e.isRetryableError()) + if (const auto * s3_exception = dynamic_cast(&e)) + { + /// It doesn't make sense to retry Access Denied or No Such Key + if (!s3_exception->isRetryableError()) + { + tryLogCurrentException(log); + throw; + } + } + + /// It doesn't make sense to retry allocator errors + if (e.code() == ErrorCodes::CANNOT_ALLOCATE_MEMORY) { tryLogCurrentException(log); throw; From 09e97a638152b9f267d916067ce8a42a48ed33dc Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 6 Sep 2022 18:38:34 +0200 Subject: [PATCH 6/9] Fix style --- src/IO/ReadBufferFromS3.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/IO/ReadBufferFromS3.cpp b/src/IO/ReadBufferFromS3.cpp index e8f2b555bd4..7e02addd21c 100644 --- a/src/IO/ReadBufferFromS3.cpp +++ b/src/IO/ReadBufferFromS3.cpp @@ -34,6 +34,7 @@ namespace ErrorCodes extern const int CANNOT_SEEK_THROUGH_FILE; extern const int SEEK_POSITION_OUT_OF_BOUND; extern const int LOGICAL_ERROR; + extern const int CANNOT_ALLOCATE_MEMORY; } From b778b9f37f2e484678142411f1ca22234ffd9b6f Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 6 Sep 2022 19:25:58 +0200 Subject: [PATCH 7/9] Improve logging better --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 9 ++++++ src/Storages/MergeTree/MergeTreeData.cpp | 7 +++-- src/Storages/StorageReplicatedMergeTree.cpp | 28 +++++++++++++++++-- 3 files changed, 39 insertions(+), 5 deletions(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index d89e3637a6f..1bc73c82dbe 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1409,7 +1409,10 @@ std::pair IMergeTreeDataPart::canRemovePart() const { /// NOTE: It's needed for zero-copy replication if (force_keep_shared_data) + { + LOG_DEBUG(storage.log, "Blobs for part {} cannot be removed because it's forced to be keeped", name); return std::make_pair(false, NameSet{}); + } return storage.unlockSharedData(*this); } @@ -1433,6 +1436,12 @@ void IMergeTreeDataPart::remove() const auto [can_remove, files_not_to_remove] = canRemovePart(); + if (!can_remove) + LOG_TRACE(storage.log, "Blobs of part {} cannot be removed", name); + + if (!files_not_to_remove.empty()) + LOG_TRACE(storage.log, "Some blobs ({}) of part {} cannot be removed", fmt::join(files_not_to_remove, ", "), name); + if (!isStoredOnDisk()) return; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index e6f770ce7c0..f37f14c6924 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1901,7 +1901,10 @@ void MergeTreeData::clearPartsFromFilesystem(const DataPartsVector & parts, bool void MergeTreeData::clearPartsFromFilesystemImpl(const DataPartsVector & parts_to_remove, NameSet * part_names_successed) { const auto settings = getSettings(); - if (parts_to_remove.size() > 1 && settings->max_part_removal_threads > 1 && parts_to_remove.size() > settings->concurrent_part_removal_threshold) + if (parts_to_remove.size() > 1 + && !settings->allow_remote_fs_zero_copy_replication + && settings->max_part_removal_threads > 1 + && parts_to_remove.size() > settings->concurrent_part_removal_threshold) { /// Parallel parts removal. size_t num_threads = std::min(settings->max_part_removal_threads, parts_to_remove.size()); @@ -1916,7 +1919,7 @@ void MergeTreeData::clearPartsFromFilesystemImpl(const DataPartsVector & parts_t if (thread_group) CurrentThread::attachToIfDetached(thread_group); - LOG_DEBUG(log, "Removing part from filesystem {}", part->name); + LOG_DEBUG(log, "Removing part from filesystem {} (concurrently)", part->name); part->remove(); if (part_names_successed) { diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 421aa33db97..b17fea7b9f9 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -7538,21 +7538,39 @@ void StorageReplicatedMergeTree::lockSharedData(const IMergeTreeDataPart & part, std::pair StorageReplicatedMergeTree::unlockSharedData(const IMergeTreeDataPart & part) const { - if (!part.data_part_storage || !part.isStoredOnDisk()) + auto settings = getSettings(); + if (!settings->allow_remote_fs_zero_copy_replication) return std::make_pair(true, NameSet{}); - if (!part.data_part_storage || !part.data_part_storage->supportZeroCopyReplication()) + if (!part.data_part_storage || !part.isStoredOnDisk()) + { + LOG_TRACE(log, "Part {} is not stored on disk, blobs can be removed", part.name); return std::make_pair(true, NameSet{}); + } + + if (!part.data_part_storage || !part.data_part_storage->supportZeroCopyReplication()) + { + LOG_TRACE(log, "Part {} is not stored on zero-copy replicaed disk, blobs can be removed", part.name); + return std::make_pair(true, NameSet{}); + } /// If part is temporary refcount file may be absent if (part.data_part_storage->exists(IMergeTreeDataPart::FILE_FOR_REFERENCES_CHECK)) { auto ref_count = part.data_part_storage->getRefCount(IMergeTreeDataPart::FILE_FOR_REFERENCES_CHECK); if (ref_count > 0) /// Keep part shard info for frozen backups + { + LOG_TRACE(log, "Part {} has more than zero local references ({}), blobs cannot be removed", part.name, ref_count); return std::make_pair(false, NameSet{}); + } + else + { + LOG_TRACE(log, "Part {} local references is zero, will check blobs can be removed in zookeeper", part.name); + } } else { + LOG_TRACE(log, "Part {} looks temporary, because checksums file doesn't exists, blobs can be removed", part.name); /// Temporary part with some absent file cannot be locked in shared mode return std::make_pair(true, NameSet{}); } @@ -7600,10 +7618,14 @@ std::pair StorageReplicatedMergeTree::unlockSharedDataByID( if (!children.empty()) { - LOG_TRACE(logger, "Found {} ({}) zookeper locks for {}", zookeeper_part_uniq_node, children.size(), fmt::join(children, ", ")); + LOG_TRACE(logger, "Found {} ({}) zookeper locks for {}", children.size(), fmt::join(children, ", "), zookeeper_part_uniq_node); part_has_no_more_locks = false; continue; } + else + { + LOG_TRACE(logger, "No more children left for for {}, will try to remove the whole node", zookeeper_part_uniq_node); + } auto error_code = zookeeper_ptr->tryRemove(zookeeper_part_uniq_node); From 6ded03c000c24e3c1837fe9fe96ac32ed6956911 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 7 Sep 2022 00:00:10 +0200 Subject: [PATCH 8/9] Disable fetch shortcut for zero copy replication --- src/Storages/StorageReplicatedMergeTree.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index aafa2be5c55..00b99d3e6df 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -3831,9 +3831,10 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora LOG_DEBUG(log, "Fetching part {} from {}", part_name, source_replica_path); + auto settings_ptr = getSettings(); TableLockHolder table_lock_holder; if (!to_detached) - table_lock_holder = lockForShare(RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); + table_lock_holder = lockForShare(RWLockImpl::NO_QUERY, settings_ptr->lock_acquire_timeout_for_background_operations); /// Logging Stopwatch stopwatch; @@ -3857,7 +3858,8 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora covered_part_info.mutation = 0; auto source_part = getActiveContainingPart(covered_part_info); - if (source_part) + /// Fetch for zero-copy replication is cheap and straightforward, so we don't use local clone here + if (source_part && (!settings_ptr->allow_remote_fs_zero_copy_replication || !source_part->data_part_storage->supportZeroCopyReplication())) { auto source_part_header = ReplicatedMergeTreePartHeader::fromColumnsAndChecksums( source_part->getColumns(), source_part->checksums); @@ -3897,7 +3899,6 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora part_to_clone = source_part; } } - } ReplicatedMergeTreeAddress address; From 81c98dadd2dd0c15afc2d23c8dc1875f409ec4f2 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 7 Sep 2022 11:01:06 +0200 Subject: [PATCH 9/9] Remove redundant change --- src/Storages/MergeTree/MergeTreeData.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 8fe02359702..52046c2b158 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1902,7 +1902,6 @@ void MergeTreeData::clearPartsFromFilesystemImpl(const DataPartsVector & parts_t { const auto settings = getSettings(); if (parts_to_remove.size() > 1 - && !settings->allow_remote_fs_zero_copy_replication && settings->max_part_removal_threads > 1 && parts_to_remove.size() > settings->concurrent_part_removal_threshold) {