recheck 404 response for complete multipart upload

This commit is contained in:
Sema Checherinda 2023-11-08 18:23:33 +01:00
parent 1e981120c1
commit 7f34e0b24e
2 changed files with 35 additions and 18 deletions

View File

@ -387,27 +387,44 @@ Model::CompleteMultipartUploadOutcome Client::CompleteMultipartUpload(const Comp
auto outcome = doRequestWithRetryNetworkErrors</*IsReadMethod*/ false>( auto outcome = doRequestWithRetryNetworkErrors</*IsReadMethod*/ false>(
request, [this](const Model::CompleteMultipartUploadRequest & req) { return CompleteMultipartUpload(req); }); request, [this](const Model::CompleteMultipartUploadRequest & req) { return CompleteMultipartUpload(req); });
if (!outcome.IsSuccess() || provider_type != ProviderType::GCS)
return outcome;
const auto & key = request.GetKey(); const auto & key = request.GetKey();
const auto & bucket = request.GetBucket(); const auto & bucket = request.GetBucket();
/// For GCS we will try to compose object at the end, otherwise we cannot do a native copy if (!outcome.IsSuccess()
/// for the object (e.g. for backups) && outcome.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_UPLOAD)
/// We don't care if the compose fails, because the upload was still successful, only the {
/// performance for copying the object will be affected auto check_request = HeadObjectRequest()
S3::ComposeObjectRequest compose_req; .WithBucket(bucket)
compose_req.SetBucket(bucket); .WithKey(key);
compose_req.SetKey(key); auto check_outcome = HeadObject(check_request);
compose_req.SetComponentNames({key});
compose_req.SetContentType("binary/octet-stream");
auto compose_outcome = ComposeObject(compose_req);
if (compose_outcome.IsSuccess()) /// if the key exists, than MultipartUpload has been completed at some of the retries
LOG_TRACE(log, "Composing object was successful"); /// rewrite outcome with success status
else if (check_outcome.IsSuccess())
LOG_INFO(log, "Failed to compose object. Message: {}, Key: {}, Bucket: {}", compose_outcome.GetError().GetMessage(), key, bucket); outcome = Aws::S3::Model::CompleteMultipartUploadOutcome(Aws::S3::Model::CompleteMultipartUploadResult());
}
if (outcome.IsSuccess() && provider_type == ProviderType::GCS)
{
/// For GCS we will try to compose object at the end, otherwise we cannot do a native copy
/// for the object (e.g. for backups)
/// We don't care if the compose fails, because the upload was still successful, only the
/// performance for copying the object will be affected
S3::ComposeObjectRequest compose_req;
compose_req.SetBucket(bucket);
compose_req.SetKey(key);
compose_req.SetComponentNames({key});
compose_req.SetContentType("binary/octet-stream");
auto compose_outcome = ComposeObject(compose_req);
if (compose_outcome.IsSuccess())
LOG_TRACE(log, "Composing object was successful");
else
LOG_INFO(
log,
"Failed to compose object. Message: {}, Key: {}, Bucket: {}",
compose_outcome.GetError().GetMessage(), key, bucket);
}
return outcome; return outcome;
} }

View File

@ -582,7 +582,7 @@ void WriteBufferFromS3::completeMultipartUpload()
if (outcome.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY) if (outcome.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY)
{ {
/// For unknown reason, at least MinIO can respond with NO_SUCH_KEY for put requests /// For unknown reason, at least MinIO can respond with NO_SUCH_KEY for put requests
/// BTW, NO_SUCH_UPLOAD is expected error and we shouldn't retry it /// BTW, NO_SUCH_UPLOAD is expected error and we shouldn't retry it here, DB::S3::Client take care of it
LOG_INFO(log, "Multipart upload failed with NO_SUCH_KEY error, will retry. {}, Parts: {}", getVerboseLogDetails(), multipart_tags.size()); LOG_INFO(log, "Multipart upload failed with NO_SUCH_KEY error, will retry. {}, Parts: {}", getVerboseLogDetails(), multipart_tags.size());
} }
else else