mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-04 13:32:13 +00:00
Added flag use_native_copy and updated to use StartCopyFromUri for native copy with large files
This commit is contained in:
parent
91bad5bc39
commit
c14605caa7
@ -167,7 +167,8 @@ std::unique_ptr<AzureObjectStorageSettings> getAzureBlobStorageSettings(const Po
|
||||
config.getInt(config_prefix + ".list_object_keys_size", 1000),
|
||||
config.getUInt64(config_prefix + ".min_upload_part_size", 16 * 1024 * 1024),
|
||||
config.getUInt64(config_prefix + ".max_upload_part_size", 5ULL * 1024 * 1024 * 1024),
|
||||
config.getUInt64(config_prefix + ".max_part_number", 10000)
|
||||
config.getUInt64(config_prefix + ".max_part_number", 10000),
|
||||
config.getBool(config_prefix + ".use_native_copy", false)
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,8 @@ struct AzureObjectStorageSettings
|
||||
int list_object_keys_size_,
|
||||
size_t min_upload_part_size_,
|
||||
size_t max_upload_part_size_,
|
||||
size_t max_part_number_)
|
||||
size_t max_part_number_,
|
||||
bool use_native_copy_)
|
||||
: max_single_part_upload_size(max_single_part_upload_size_)
|
||||
, min_bytes_for_seek(min_bytes_for_seek_)
|
||||
, max_single_read_retries(max_single_read_retries_)
|
||||
@ -36,6 +37,7 @@ struct AzureObjectStorageSettings
|
||||
, min_upload_part_size(min_upload_part_size_)
|
||||
, max_upload_part_size(max_upload_part_size_)
|
||||
, max_part_number(max_part_number_)
|
||||
, use_native_copy(use_native_copy_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -49,6 +51,7 @@ struct AzureObjectStorageSettings
|
||||
size_t min_upload_part_size = 16 * 1024 * 1024;
|
||||
size_t max_upload_part_size = 5ULL * 1024 * 1024 * 1024;
|
||||
size_t max_part_number = 10000;
|
||||
bool use_native_copy = false;
|
||||
};
|
||||
|
||||
using AzureClient = Azure::Storage::Blobs::BlobContainerClient;
|
||||
@ -134,10 +137,7 @@ public:
|
||||
|
||||
bool isRemote() const override { return true; }
|
||||
|
||||
MultiVersion<Azure::Storage::Blobs::BlobContainerClient> & getClient()
|
||||
{
|
||||
return client;
|
||||
}
|
||||
MultiVersion<Azure::Storage::Blobs::BlobContainerClient> & getClient() { return client; }
|
||||
|
||||
private:
|
||||
const String name;
|
||||
|
@ -30,6 +30,7 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int INVALID_CONFIG_PARAMETER;
|
||||
extern const int AZURE_BLOB_STORAGE_ERROR;
|
||||
}
|
||||
|
||||
|
||||
@ -358,15 +359,34 @@ void copyAzureBlobStorageFile(
|
||||
bool for_disk_azure_blob_storage)
|
||||
{
|
||||
|
||||
if (size < max_single_operation_copy_size)
|
||||
if (settings->use_native_copy )
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::AzureCopyObject);
|
||||
if (for_disk_azure_blob_storage)
|
||||
ProfileEvents::increment(ProfileEvents::DiskAzureCopyObject);
|
||||
|
||||
auto block_blob_client_src = src_client.get()->GetBlockBlobClient(src_blob);
|
||||
auto block_blob_client_dest = dest_client.get()->GetBlockBlobClient(dest_blob);
|
||||
auto uri = block_blob_client_src.GetUrl();
|
||||
block_blob_client_dest.CopyFromUri(uri);
|
||||
auto source_uri = block_blob_client_src.GetUrl();
|
||||
|
||||
if (size < max_single_operation_copy_size)
|
||||
{
|
||||
block_blob_client_dest.CopyFromUri(source_uri);
|
||||
}
|
||||
else
|
||||
{
|
||||
Azure::Storage::Blobs::StartBlobCopyOperation operation = block_blob_client_dest.StartCopyFromUri(source_uri);
|
||||
|
||||
// Wait for the operation to finish, checking for status every 100 second.
|
||||
auto copy_response = operation.PollUntilDone(std::chrono::milliseconds(100));
|
||||
auto properties_model = copy_response.Value;
|
||||
|
||||
if (properties_model.CopySource.HasValue())
|
||||
{
|
||||
throw Exception(ErrorCodes::AZURE_BLOB_STORAGE_ERROR, "Copy failed");
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user