2019-12-06 14:37:21 +00:00
|
|
|
#include <Common/config.h>
|
|
|
|
|
|
|
|
#if USE_AWS_S3
|
2019-06-01 21:18:20 +00:00
|
|
|
|
2022-03-14 19:15:07 +00:00
|
|
|
#include <base/logger_useful.h>
|
|
|
|
#include <Common/FileCache.h>
|
2019-06-17 07:16:43 +00:00
|
|
|
|
2022-03-14 19:15:07 +00:00
|
|
|
#include <IO/WriteBufferFromS3.h>
|
|
|
|
#include <IO/WriteHelpers.h>
|
2022-04-01 14:45:15 +00:00
|
|
|
#include <Interpreters/Context.h>
|
2019-12-03 16:23:24 +00:00
|
|
|
|
2022-03-14 19:15:07 +00:00
|
|
|
#include <aws/s3/S3Client.h>
|
|
|
|
#include <aws/s3/model/CreateMultipartUploadRequest.h>
|
|
|
|
#include <aws/s3/model/CompleteMultipartUploadRequest.h>
|
|
|
|
#include <aws/s3/model/PutObjectRequest.h>
|
|
|
|
#include <aws/s3/model/UploadPartRequest.h>
|
|
|
|
|
|
|
|
#include <utility>
|
2019-06-01 21:18:20 +00:00
|
|
|
|
|
|
|
|
2020-07-10 09:32:34 +00:00
|
|
|
namespace ProfileEvents
|
|
|
|
{
|
|
|
|
extern const Event S3WriteBytes;
|
2022-03-21 11:30:25 +00:00
|
|
|
extern const Event RemoteFSCacheDownloadBytes;
|
2020-07-10 09:32:34 +00:00
|
|
|
}
|
|
|
|
|
2019-06-01 21:18:20 +00:00
|
|
|
namespace DB
|
|
|
|
{
|
2019-09-23 07:42:02 +00:00
|
|
|
// S3 protocol does not allow to have multipart upload with more than 10000 parts.
|
|
|
|
// In case server does not return an error on exceeding that number, we print a warning
|
|
|
|
// because custom S3 implementation may allow relaxed requirements on that.
|
2019-09-22 10:42:47 +00:00
|
|
|
const int S3_WARN_MAX_PARTS = 10000;
|
|
|
|
|
|
|
|
|
2019-06-17 07:16:43 +00:00
|
|
|
namespace ErrorCodes
|
|
|
|
{
|
2019-12-03 16:23:24 +00:00
|
|
|
extern const int S3_ERROR;
|
2022-03-24 14:32:08 +00:00
|
|
|
extern const int LOGICAL_ERROR;
|
2019-06-17 07:16:43 +00:00
|
|
|
}
|
|
|
|
|
2022-02-01 10:36:51 +00:00
|
|
|
struct WriteBufferFromS3::UploadPartTask
|
|
|
|
{
|
|
|
|
Aws::S3::Model::UploadPartRequest req;
|
|
|
|
bool is_finised = false;
|
|
|
|
std::string tag;
|
|
|
|
std::exception_ptr exception;
|
2022-04-01 14:45:15 +00:00
|
|
|
std::optional<FileSegmentsHolder> cache_files;
|
2022-02-01 10:36:51 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct WriteBufferFromS3::PutObjectTask
|
|
|
|
{
|
|
|
|
Aws::S3::Model::PutObjectRequest req;
|
|
|
|
bool is_finised = false;
|
|
|
|
std::exception_ptr exception;
|
|
|
|
};
|
2019-06-17 07:16:43 +00:00
|
|
|
|
2019-06-01 21:18:20 +00:00
|
|
|
WriteBufferFromS3::WriteBufferFromS3(
|
2019-12-03 16:23:24 +00:00
|
|
|
std::shared_ptr<Aws::S3::S3Client> client_ptr_,
|
|
|
|
const String & bucket_,
|
|
|
|
const String & key_,
|
2019-06-21 05:24:01 +00:00
|
|
|
size_t minimum_upload_part_size_,
|
2022-02-08 16:38:04 +00:00
|
|
|
size_t upload_part_size_multiply_factor_,
|
|
|
|
size_t upload_part_size_multiply_threshold_,
|
2020-12-09 14:09:04 +00:00
|
|
|
size_t max_single_part_upload_size_,
|
2020-12-08 18:31:57 +00:00
|
|
|
std::optional<std::map<String, String>> object_metadata_,
|
2022-02-01 10:36:51 +00:00
|
|
|
size_t buffer_size_,
|
2022-03-14 19:15:07 +00:00
|
|
|
ScheduleFunc schedule_,
|
2022-03-23 14:35:15 +00:00
|
|
|
const String & blob_name_,
|
2022-03-14 19:15:07 +00:00
|
|
|
FileCachePtr cache_)
|
2019-12-03 16:23:24 +00:00
|
|
|
: BufferWithOwnMemory<WriteBuffer>(buffer_size_, nullptr, 0)
|
|
|
|
, bucket(bucket_)
|
|
|
|
, key(key_)
|
2020-12-08 18:31:57 +00:00
|
|
|
, object_metadata(std::move(object_metadata_))
|
2019-12-03 16:23:24 +00:00
|
|
|
, client_ptr(std::move(client_ptr_))
|
2022-02-08 16:38:04 +00:00
|
|
|
, upload_part_size(minimum_upload_part_size_)
|
|
|
|
, upload_part_size_multiply_factor(upload_part_size_multiply_factor_)
|
|
|
|
, upload_part_size_multiply_threshold(upload_part_size_multiply_threshold_)
|
2020-12-09 14:09:04 +00:00
|
|
|
, max_single_part_upload_size(max_single_part_upload_size_)
|
2022-02-01 08:19:26 +00:00
|
|
|
, schedule(std::move(schedule_))
|
2022-03-23 14:35:15 +00:00
|
|
|
, blob_name(blob_name_)
|
2022-03-14 19:15:07 +00:00
|
|
|
, cache(cache_)
|
2021-03-17 14:20:55 +00:00
|
|
|
{
|
|
|
|
allocateBuffer();
|
|
|
|
}
|
2019-06-01 21:18:20 +00:00
|
|
|
|
2019-06-17 00:06:14 +00:00
|
|
|
void WriteBufferFromS3::nextImpl()
|
|
|
|
{
|
|
|
|
if (!offset())
|
|
|
|
return;
|
|
|
|
|
2022-02-10 13:35:50 +00:00
|
|
|
/// Buffer in a bad state after exception
|
|
|
|
if (temporary_buffer->tellp() == -1)
|
|
|
|
allocateBuffer();
|
|
|
|
|
2022-03-21 11:30:25 +00:00
|
|
|
size_t size = offset();
|
|
|
|
temporary_buffer->write(working_buffer.begin(), size);
|
|
|
|
|
2022-04-01 14:45:15 +00:00
|
|
|
ThreadGroupStatusPtr running_group = CurrentThread::isInitialized() && CurrentThread::get().getThreadGroup()
|
|
|
|
? CurrentThread::get().getThreadGroup()
|
|
|
|
: MainThreadStatus::getInstance().getThreadGroup();
|
|
|
|
|
|
|
|
if (CurrentThread::isInitialized())
|
|
|
|
query_context = CurrentThread::get().getQueryContext();
|
|
|
|
|
|
|
|
if (!query_context)
|
|
|
|
{
|
|
|
|
if (!shared_query_context)
|
|
|
|
{
|
|
|
|
ContextPtr global_context = CurrentThread::isInitialized() ? CurrentThread::get().getGlobalContext() : nullptr;
|
|
|
|
if (global_context)
|
|
|
|
{
|
|
|
|
shared_query_context = Context::createCopy(global_context);
|
|
|
|
shared_query_context->makeQueryContext();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (shared_query_context)
|
|
|
|
{
|
|
|
|
shared_query_context->setCurrentQueryId(toString(UUIDHelpers::generateV4()));
|
|
|
|
query_context = shared_query_context;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-21 11:30:25 +00:00
|
|
|
if (cacheEnabled())
|
|
|
|
{
|
2022-03-23 14:35:15 +00:00
|
|
|
if (blob_name.empty())
|
|
|
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Empty blob name");
|
2022-03-21 11:30:25 +00:00
|
|
|
|
2022-03-23 14:35:15 +00:00
|
|
|
auto cache_key = cache->hash(blob_name);
|
2022-04-01 14:45:15 +00:00
|
|
|
file_segments_holder.emplace(cache->setDownloading(cache_key, current_download_offset, size));
|
2022-03-31 13:27:48 +00:00
|
|
|
current_download_offset += size;
|
2022-03-21 11:30:25 +00:00
|
|
|
|
|
|
|
size_t remaining_size = size;
|
2022-04-01 14:45:15 +00:00
|
|
|
for (const auto & file_segment : file_segments_holder->file_segments)
|
2022-03-21 11:30:25 +00:00
|
|
|
{
|
|
|
|
size_t current_size = std::min(file_segment->range().size(), remaining_size);
|
|
|
|
remaining_size -= current_size;
|
|
|
|
|
|
|
|
if (file_segment->reserve(current_size))
|
|
|
|
{
|
2022-04-01 14:45:15 +00:00
|
|
|
file_segment->writeInMemory(working_buffer.begin(), current_size);
|
2022-03-21 11:30:25 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-06-17 00:42:47 +00:00
|
|
|
|
2020-07-10 09:32:34 +00:00
|
|
|
ProfileEvents::increment(ProfileEvents::S3WriteBytes, offset());
|
|
|
|
|
2020-12-09 14:09:04 +00:00
|
|
|
last_part_size += offset();
|
|
|
|
|
|
|
|
/// Data size exceeds singlepart upload threshold, need to use multipart upload.
|
|
|
|
if (multipart_upload_id.empty() && last_part_size > max_single_part_upload_size)
|
|
|
|
createMultipartUpload();
|
|
|
|
|
2022-02-08 16:38:04 +00:00
|
|
|
if (!multipart_upload_id.empty() && last_part_size > upload_part_size)
|
2019-06-17 00:42:47 +00:00
|
|
|
{
|
2020-12-09 14:09:04 +00:00
|
|
|
writePart();
|
2022-02-08 16:38:04 +00:00
|
|
|
|
2021-03-17 14:20:55 +00:00
|
|
|
allocateBuffer();
|
2019-06-17 00:42:47 +00:00
|
|
|
}
|
2022-02-01 10:36:51 +00:00
|
|
|
|
|
|
|
waitForReadyBackGroundTasks();
|
2019-06-17 00:06:14 +00:00
|
|
|
}
|
|
|
|
|
2021-03-17 14:20:55 +00:00
|
|
|
void WriteBufferFromS3::allocateBuffer()
|
|
|
|
{
|
2022-02-09 11:13:53 +00:00
|
|
|
if (total_parts_uploaded != 0 && total_parts_uploaded % upload_part_size_multiply_threshold == 0)
|
|
|
|
upload_part_size *= upload_part_size_multiply_factor;
|
2022-02-08 16:38:04 +00:00
|
|
|
|
2021-03-17 14:20:55 +00:00
|
|
|
temporary_buffer = Aws::MakeShared<Aws::StringStream>("temporary buffer");
|
|
|
|
temporary_buffer->exceptions(std::ios::badbit);
|
|
|
|
last_part_size = 0;
|
|
|
|
}
|
|
|
|
|
2021-11-10 22:58:56 +00:00
|
|
|
WriteBufferFromS3::~WriteBufferFromS3()
|
2019-06-01 21:18:20 +00:00
|
|
|
{
|
2022-03-01 12:13:46 +00:00
|
|
|
try
|
|
|
|
{
|
|
|
|
finalize();
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
tryLogCurrentException(__PRETTY_FUNCTION__);
|
|
|
|
}
|
2020-09-30 13:24:36 +00:00
|
|
|
}
|
2020-01-27 18:44:30 +00:00
|
|
|
|
2022-03-21 11:30:25 +00:00
|
|
|
bool WriteBufferFromS3::cacheEnabled() const
|
2022-03-14 19:15:07 +00:00
|
|
|
{
|
2022-03-29 17:49:42 +00:00
|
|
|
return cache != nullptr && !IFileCache::shouldBypassCache();
|
2022-03-14 19:15:07 +00:00
|
|
|
}
|
|
|
|
|
2022-02-01 10:36:51 +00:00
|
|
|
void WriteBufferFromS3::preFinalize()
|
2020-09-30 13:24:36 +00:00
|
|
|
{
|
2020-12-09 14:09:04 +00:00
|
|
|
next();
|
2020-09-30 13:24:36 +00:00
|
|
|
|
2020-12-09 14:09:04 +00:00
|
|
|
if (multipart_upload_id.empty())
|
|
|
|
{
|
|
|
|
makeSinglepartUpload();
|
2020-09-30 13:24:36 +00:00
|
|
|
}
|
2020-12-09 14:09:04 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
/// Write rest of the data as last part.
|
|
|
|
writePart();
|
2022-02-01 02:49:40 +00:00
|
|
|
}
|
2022-02-01 10:36:51 +00:00
|
|
|
|
|
|
|
is_prefinalized = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void WriteBufferFromS3::finalizeImpl()
|
|
|
|
{
|
|
|
|
if (!is_prefinalized)
|
|
|
|
preFinalize();
|
|
|
|
|
|
|
|
waitForAllBackGroundTasks();
|
|
|
|
|
|
|
|
if (!multipart_upload_id.empty())
|
|
|
|
completeMultipartUpload();
|
2020-09-30 13:24:36 +00:00
|
|
|
}
|
2019-06-10 02:35:33 +00:00
|
|
|
|
2020-12-09 14:09:04 +00:00
|
|
|
void WriteBufferFromS3::createMultipartUpload()
|
2019-06-17 00:42:47 +00:00
|
|
|
{
|
2019-12-03 16:23:24 +00:00
|
|
|
Aws::S3::Model::CreateMultipartUploadRequest req;
|
|
|
|
req.SetBucket(bucket);
|
|
|
|
req.SetKey(key);
|
2022-02-09 02:25:22 +00:00
|
|
|
|
|
|
|
/// If we don't do it, AWS SDK can mistakenly set it to application/xml, see https://github.com/aws/aws-sdk-cpp/issues/1840
|
|
|
|
req.SetContentType("binary/octet-stream");
|
|
|
|
|
2020-12-08 18:31:57 +00:00
|
|
|
if (object_metadata.has_value())
|
|
|
|
req.SetMetadata(object_metadata.value());
|
2019-06-17 07:16:43 +00:00
|
|
|
|
2019-12-03 16:23:24 +00:00
|
|
|
auto outcome = client_ptr->CreateMultipartUpload(req);
|
2019-06-17 07:16:43 +00:00
|
|
|
|
2019-12-06 14:48:56 +00:00
|
|
|
if (outcome.IsSuccess())
|
|
|
|
{
|
2020-12-09 14:09:04 +00:00
|
|
|
multipart_upload_id = outcome.GetResult().GetUploadId();
|
2022-02-09 04:19:33 +00:00
|
|
|
LOG_TRACE(log, "Multipart upload has created. Bucket: {}, Key: {}, Upload id: {}", bucket, key, multipart_upload_id);
|
2019-06-17 07:16:43 +00:00
|
|
|
}
|
2019-12-06 14:48:56 +00:00
|
|
|
else
|
|
|
|
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
2019-06-17 00:42:47 +00:00
|
|
|
}
|
2019-06-17 18:06:28 +00:00
|
|
|
|
2020-12-09 14:09:04 +00:00
|
|
|
void WriteBufferFromS3::writePart()
|
2019-06-17 00:42:47 +00:00
|
|
|
{
|
2021-03-17 14:20:55 +00:00
|
|
|
auto size = temporary_buffer->tellp();
|
|
|
|
|
2022-02-09 04:19:33 +00:00
|
|
|
LOG_TRACE(log, "Writing part. Bucket: {}, Key: {}, Upload_id: {}, Size: {}", bucket, key, multipart_upload_id, size);
|
2021-03-17 14:20:55 +00:00
|
|
|
|
|
|
|
if (size < 0)
|
2022-02-10 13:35:50 +00:00
|
|
|
{
|
|
|
|
LOG_WARNING(log, "Skipping part upload. Buffer is in bad state, it means that we have tried to upload something, but got an exception.");
|
|
|
|
return;
|
|
|
|
}
|
2021-03-17 14:20:55 +00:00
|
|
|
|
|
|
|
if (size == 0)
|
|
|
|
{
|
2022-02-09 04:19:33 +00:00
|
|
|
LOG_TRACE(log, "Skipping writing part. Buffer is empty.");
|
2020-01-27 18:44:30 +00:00
|
|
|
return;
|
2021-03-17 14:20:55 +00:00
|
|
|
}
|
2020-01-27 18:44:30 +00:00
|
|
|
|
2019-09-22 10:42:47 +00:00
|
|
|
if (part_tags.size() == S3_WARN_MAX_PARTS)
|
2019-06-22 05:58:05 +00:00
|
|
|
{
|
2019-09-22 10:42:47 +00:00
|
|
|
// Don't throw exception here by ourselves but leave the decision to take by S3 server.
|
2020-05-23 22:24:01 +00:00
|
|
|
LOG_WARNING(log, "Maximum part number in S3 protocol has reached (too many parts). Server may not accept this whole upload.");
|
2019-06-22 05:58:05 +00:00
|
|
|
}
|
|
|
|
|
2022-02-01 08:19:26 +00:00
|
|
|
if (schedule)
|
2022-02-01 10:36:51 +00:00
|
|
|
{
|
|
|
|
UploadPartTask * task = nullptr;
|
|
|
|
int part_number;
|
|
|
|
{
|
|
|
|
std::lock_guard lock(bg_tasks_mutex);
|
|
|
|
task = &upload_object_tasks.emplace_back();
|
|
|
|
++num_added_bg_tasks;
|
|
|
|
part_number = num_added_bg_tasks;
|
|
|
|
}
|
|
|
|
|
|
|
|
fillUploadRequest(task->req, part_number);
|
2022-02-01 08:19:26 +00:00
|
|
|
schedule([this, task]()
|
2022-02-01 10:36:51 +00:00
|
|
|
{
|
|
|
|
try
|
|
|
|
{
|
|
|
|
processUploadRequest(*task);
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
task->exception = std::current_exception();
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
std::lock_guard lock(bg_tasks_mutex);
|
|
|
|
task->is_finised = true;
|
|
|
|
++num_finished_bg_tasks;
|
|
|
|
|
2022-02-04 12:28:14 +00:00
|
|
|
/// Notification under mutex is important here.
|
|
|
|
/// Othervies, WriteBuffer could be destroyed in between
|
|
|
|
/// Releasing lock and condvar notification.
|
|
|
|
bg_tasks_condvar.notify_one();
|
|
|
|
}
|
2022-04-01 14:45:15 +00:00
|
|
|
|
|
|
|
finalizeCacheIfNeeded();
|
|
|
|
}, query_context);
|
2022-02-01 10:36:51 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
UploadPartTask task;
|
|
|
|
fillUploadRequest(task.req, part_tags.size() + 1);
|
|
|
|
processUploadRequest(task);
|
|
|
|
part_tags.push_back(task.tag);
|
2022-04-01 14:45:15 +00:00
|
|
|
finalizeCacheIfNeeded();
|
2022-02-01 10:36:51 +00:00
|
|
|
}
|
|
|
|
}
|
2021-12-29 14:04:21 +00:00
|
|
|
|
2022-02-01 10:36:51 +00:00
|
|
|
void WriteBufferFromS3::fillUploadRequest(Aws::S3::Model::UploadPartRequest & req, int part_number)
|
|
|
|
{
|
2019-12-03 16:23:24 +00:00
|
|
|
req.SetBucket(bucket);
|
|
|
|
req.SetKey(key);
|
2022-02-01 10:36:51 +00:00
|
|
|
req.SetPartNumber(part_number);
|
2020-12-09 14:09:04 +00:00
|
|
|
req.SetUploadId(multipart_upload_id);
|
2022-02-01 10:36:51 +00:00
|
|
|
req.SetContentLength(temporary_buffer->tellp());
|
2020-12-09 14:09:04 +00:00
|
|
|
req.SetBody(temporary_buffer);
|
2022-02-09 02:25:22 +00:00
|
|
|
|
|
|
|
/// If we don't do it, AWS SDK can mistakenly set it to application/xml, see https://github.com/aws/aws-sdk-cpp/issues/1840
|
|
|
|
req.SetContentType("binary/octet-stream");
|
2022-02-01 10:36:51 +00:00
|
|
|
}
|
2019-06-10 02:35:33 +00:00
|
|
|
|
2022-02-01 10:36:51 +00:00
|
|
|
void WriteBufferFromS3::processUploadRequest(UploadPartTask & task)
|
|
|
|
{
|
|
|
|
auto outcome = client_ptr->UploadPart(task.req);
|
2019-06-10 02:35:33 +00:00
|
|
|
|
2019-12-06 14:48:56 +00:00
|
|
|
if (outcome.IsSuccess())
|
|
|
|
{
|
2022-02-01 10:36:51 +00:00
|
|
|
task.tag = outcome.GetResult().GetETag();
|
2022-02-09 04:19:33 +00:00
|
|
|
LOG_TRACE(log, "Writing part finished. Bucket: {}, Key: {}, Upload_id: {}, Etag: {}, Parts: {}", bucket, key, multipart_upload_id, task.tag, part_tags.size());
|
2019-06-17 07:16:43 +00:00
|
|
|
}
|
2019-12-06 14:48:56 +00:00
|
|
|
else
|
|
|
|
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
2022-02-08 16:38:04 +00:00
|
|
|
|
|
|
|
total_parts_uploaded++;
|
2019-06-01 21:18:20 +00:00
|
|
|
}
|
|
|
|
|
2020-12-09 14:09:04 +00:00
|
|
|
void WriteBufferFromS3::completeMultipartUpload()
|
2019-06-17 00:06:14 +00:00
|
|
|
{
|
2022-02-09 04:19:33 +00:00
|
|
|
LOG_TRACE(log, "Completing multipart upload. Bucket: {}, Key: {}, Upload_id: {}, Parts: {}", bucket, key, multipart_upload_id, part_tags.size());
|
2021-03-17 14:20:55 +00:00
|
|
|
|
|
|
|
if (part_tags.empty())
|
|
|
|
throw Exception("Failed to complete multipart upload. No parts have uploaded", ErrorCodes::S3_ERROR);
|
2020-11-11 12:15:16 +00:00
|
|
|
|
2020-12-09 14:09:04 +00:00
|
|
|
Aws::S3::Model::CompleteMultipartUploadRequest req;
|
|
|
|
req.SetBucket(bucket);
|
|
|
|
req.SetKey(key);
|
|
|
|
req.SetUploadId(multipart_upload_id);
|
2020-11-11 12:15:16 +00:00
|
|
|
|
2020-12-09 14:09:04 +00:00
|
|
|
Aws::S3::Model::CompletedMultipartUpload multipart_upload;
|
|
|
|
for (size_t i = 0; i < part_tags.size(); ++i)
|
|
|
|
{
|
|
|
|
Aws::S3::Model::CompletedPart part;
|
|
|
|
multipart_upload.AddParts(part.WithETag(part_tags[i]).WithPartNumber(i + 1));
|
|
|
|
}
|
2020-11-11 12:15:16 +00:00
|
|
|
|
2020-12-09 14:09:04 +00:00
|
|
|
req.SetMultipartUpload(multipart_upload);
|
2020-11-11 12:15:16 +00:00
|
|
|
|
2020-12-09 14:09:04 +00:00
|
|
|
auto outcome = client_ptr->CompleteMultipartUpload(req);
|
2020-11-11 12:15:16 +00:00
|
|
|
|
2020-12-09 14:09:04 +00:00
|
|
|
if (outcome.IsSuccess())
|
2022-02-09 04:19:33 +00:00
|
|
|
LOG_TRACE(log, "Multipart upload has completed. Bucket: {}, Key: {}, Upload_id: {}, Parts: {}", bucket, key, multipart_upload_id, part_tags.size());
|
2020-12-09 14:09:04 +00:00
|
|
|
else
|
2022-02-01 10:36:51 +00:00
|
|
|
{
|
|
|
|
throw Exception(ErrorCodes::S3_ERROR, "{} Tags:{}",
|
|
|
|
outcome.GetError().GetMessage(),
|
|
|
|
fmt::join(part_tags.begin(), part_tags.end(), " "));
|
|
|
|
}
|
2020-12-09 14:09:04 +00:00
|
|
|
}
|
2020-01-27 18:44:30 +00:00
|
|
|
|
2020-12-09 14:09:04 +00:00
|
|
|
void WriteBufferFromS3::makeSinglepartUpload()
|
|
|
|
{
|
2021-03-17 14:20:55 +00:00
|
|
|
auto size = temporary_buffer->tellp();
|
2022-02-01 08:19:26 +00:00
|
|
|
bool with_pool = bool(schedule);
|
2021-03-17 14:20:55 +00:00
|
|
|
|
2022-02-09 04:19:33 +00:00
|
|
|
LOG_TRACE(log, "Making single part upload. Bucket: {}, Key: {}, Size: {}, WithPool: {}", bucket, key, size, with_pool);
|
2019-12-03 16:23:24 +00:00
|
|
|
|
2021-03-17 14:20:55 +00:00
|
|
|
if (size < 0)
|
2022-02-10 13:35:50 +00:00
|
|
|
{
|
|
|
|
LOG_WARNING(log, "Skipping single part upload. Buffer is in bad state, it mean that we have tried to upload something, but got an exception.");
|
|
|
|
return;
|
|
|
|
}
|
2021-03-17 14:20:55 +00:00
|
|
|
|
|
|
|
if (size == 0)
|
|
|
|
{
|
2022-02-09 04:19:33 +00:00
|
|
|
LOG_TRACE(log, "Skipping single part upload. Buffer is empty.");
|
2021-03-17 14:20:55 +00:00
|
|
|
return;
|
|
|
|
}
|
2020-01-27 18:44:30 +00:00
|
|
|
|
2022-02-01 08:19:26 +00:00
|
|
|
if (schedule)
|
2022-02-01 10:36:51 +00:00
|
|
|
{
|
|
|
|
put_object_task = std::make_unique<PutObjectTask>();
|
|
|
|
fillPutRequest(put_object_task->req);
|
2022-02-01 08:19:26 +00:00
|
|
|
schedule([this]()
|
2022-02-01 10:36:51 +00:00
|
|
|
{
|
|
|
|
try
|
|
|
|
{
|
|
|
|
processPutRequest(*put_object_task);
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
put_object_task->exception = std::current_exception();
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
std::lock_guard lock(bg_tasks_mutex);
|
|
|
|
put_object_task->is_finised = true;
|
2022-02-04 12:28:14 +00:00
|
|
|
|
|
|
|
/// Notification under mutex is important here.
|
|
|
|
/// Othervies, WriteBuffer could be destroyed in between
|
|
|
|
/// Releasing lock and condvar notification.
|
|
|
|
bg_tasks_condvar.notify_one();
|
2022-02-01 10:36:51 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 14:45:15 +00:00
|
|
|
finalizeCacheIfNeeded();
|
|
|
|
}, query_context);
|
2022-02-01 10:36:51 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
PutObjectTask task;
|
|
|
|
fillPutRequest(task.req);
|
|
|
|
processPutRequest(task);
|
2022-04-01 14:45:15 +00:00
|
|
|
finalizeCacheIfNeeded();
|
2022-02-01 10:36:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void WriteBufferFromS3::fillPutRequest(Aws::S3::Model::PutObjectRequest & req)
|
|
|
|
{
|
2020-12-09 14:09:04 +00:00
|
|
|
req.SetBucket(bucket);
|
|
|
|
req.SetKey(key);
|
2022-02-01 10:36:51 +00:00
|
|
|
req.SetContentLength(temporary_buffer->tellp());
|
2020-12-09 14:09:04 +00:00
|
|
|
req.SetBody(temporary_buffer);
|
2020-12-11 15:28:41 +00:00
|
|
|
if (object_metadata.has_value())
|
|
|
|
req.SetMetadata(object_metadata.value());
|
2022-02-09 02:25:22 +00:00
|
|
|
|
|
|
|
/// If we don't do it, AWS SDK can mistakenly set it to application/xml, see https://github.com/aws/aws-sdk-cpp/issues/1840
|
|
|
|
req.SetContentType("binary/octet-stream");
|
2022-02-01 10:36:51 +00:00
|
|
|
}
|
2020-11-11 12:15:16 +00:00
|
|
|
|
2022-02-01 10:36:51 +00:00
|
|
|
void WriteBufferFromS3::processPutRequest(PutObjectTask & task)
|
|
|
|
{
|
|
|
|
auto outcome = client_ptr->PutObject(task.req);
|
2022-02-01 08:19:26 +00:00
|
|
|
bool with_pool = bool(schedule);
|
2020-07-09 14:09:17 +00:00
|
|
|
|
2020-12-09 14:09:04 +00:00
|
|
|
if (outcome.IsSuccess())
|
2022-02-09 04:19:33 +00:00
|
|
|
LOG_TRACE(log, "Single part upload has completed. Bucket: {}, Key: {}, Object size: {}, WithPool: {}", bucket, key, task.req.GetContentLength(), with_pool);
|
2020-07-09 14:09:17 +00:00
|
|
|
else
|
2020-12-09 14:09:04 +00:00
|
|
|
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
2019-06-17 00:06:14 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 14:45:15 +00:00
|
|
|
void WriteBufferFromS3::finalizeCacheIfNeeded()
|
|
|
|
{
|
|
|
|
if (!file_segments_holder)
|
|
|
|
return;
|
|
|
|
|
|
|
|
auto & file_segments = file_segments_holder->file_segments;
|
|
|
|
for (auto file_segment_it = file_segments.begin(); file_segment_it != file_segments.end();)
|
|
|
|
{
|
|
|
|
try
|
|
|
|
{
|
|
|
|
size_t size = (*file_segment_it)->finalizeWrite();
|
|
|
|
file_segment_it = file_segments.erase(file_segment_it);
|
|
|
|
|
|
|
|
ProfileEvents::increment(ProfileEvents::RemoteFSCacheDownloadBytes, size);
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
tryLogCurrentException(__PRETTY_FUNCTION__);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-01 10:36:51 +00:00
|
|
|
void WriteBufferFromS3::waitForReadyBackGroundTasks()
|
|
|
|
{
|
2022-02-01 08:19:26 +00:00
|
|
|
if (schedule)
|
2022-02-01 10:36:51 +00:00
|
|
|
{
|
|
|
|
std::lock_guard lock(bg_tasks_mutex);
|
|
|
|
{
|
|
|
|
while (!upload_object_tasks.empty() && upload_object_tasks.front().is_finised)
|
|
|
|
{
|
|
|
|
auto & task = upload_object_tasks.front();
|
2022-03-02 17:22:12 +00:00
|
|
|
auto exception = task.exception;
|
2022-02-01 10:36:51 +00:00
|
|
|
auto tag = std::move(task.tag);
|
|
|
|
upload_object_tasks.pop_front();
|
|
|
|
|
|
|
|
if (exception)
|
|
|
|
{
|
|
|
|
waitForAllBackGroundTasks();
|
|
|
|
std::rethrow_exception(exception);
|
|
|
|
}
|
|
|
|
|
|
|
|
part_tags.push_back(tag);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void WriteBufferFromS3::waitForAllBackGroundTasks()
|
|
|
|
{
|
2022-02-01 08:19:26 +00:00
|
|
|
if (schedule)
|
2022-02-01 10:36:51 +00:00
|
|
|
{
|
|
|
|
std::unique_lock lock(bg_tasks_mutex);
|
|
|
|
bg_tasks_condvar.wait(lock, [this]() { return num_added_bg_tasks == num_finished_bg_tasks; });
|
|
|
|
|
|
|
|
while (!upload_object_tasks.empty())
|
|
|
|
{
|
|
|
|
auto & task = upload_object_tasks.front();
|
|
|
|
if (task.exception)
|
2022-02-25 19:04:48 +00:00
|
|
|
std::rethrow_exception(task.exception);
|
2022-02-01 10:36:51 +00:00
|
|
|
|
|
|
|
part_tags.push_back(task.tag);
|
|
|
|
|
|
|
|
upload_object_tasks.pop_front();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (put_object_task)
|
|
|
|
{
|
|
|
|
bg_tasks_condvar.wait(lock, [this]() { return put_object_task->is_finised; });
|
|
|
|
if (put_object_task->exception)
|
2022-02-25 19:04:48 +00:00
|
|
|
std::rethrow_exception(put_object_task->exception);
|
2022-02-01 10:36:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-01 21:18:20 +00:00
|
|
|
}
|
2019-12-06 14:37:21 +00:00
|
|
|
|
2019-12-09 12:36:06 +00:00
|
|
|
#endif
|