mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-04 21:42:39 +00:00
Renamed Bucket-Key to Container-Blob
This commit is contained in:
parent
4122de9721
commit
df221f7db6
@ -134,8 +134,8 @@ void BackupReaderAzureBlobStorage::copyFileToDisk(const String & path_in_backup,
|
||||
fs::path(configuration.blob_path) / path_in_backup,
|
||||
0,
|
||||
file_size,
|
||||
/* dest_bucket= */ blob_path[1],
|
||||
/* dest_key= */ blob_path[0],
|
||||
/* dest_container */ blob_path[1],
|
||||
/* dest_path */ blob_path[0],
|
||||
settings,
|
||||
read_settings,
|
||||
object_attributes,
|
||||
@ -178,7 +178,7 @@ void BackupWriterAzureBlobStorage::copyFileFromDisk(const String & path_in_backu
|
||||
auto source_data_source_description = src_disk->getDataSourceDescription();
|
||||
if (source_data_source_description.sameKind(data_source_description) && (source_data_source_description.is_encrypted == copy_encrypted))
|
||||
{
|
||||
/// getBlobPath() can return more than 3 elements if the file is stored as multiple objects in AzureBlobStorage bucket.
|
||||
/// getBlobPath() can return more than 3 elements if the file is stored as multiple objects in AzureBlobStorage container.
|
||||
/// In this case we can't use the native copy.
|
||||
if (auto blob_path = src_disk->getBlobPath(src_path); blob_path.size() == 2)
|
||||
{
|
||||
@ -200,8 +200,8 @@ void BackupWriterAzureBlobStorage::copyFileFromDisk(const String & path_in_backu
|
||||
copyAzureBlobStorageFile(
|
||||
src_client,
|
||||
client,
|
||||
/* src_bucket */ blob_path[1],
|
||||
/* src_key= */ blob_path[0],
|
||||
/* src_container */ blob_path[1],
|
||||
/* src_path */ blob_path[0],
|
||||
start_pos,
|
||||
length,
|
||||
configuration.container,
|
||||
@ -238,8 +238,8 @@ void BackupWriterAzureBlobStorage::copyFile(const String & destination, const St
|
||||
fs::path(configuration.blob_path),
|
||||
0,
|
||||
size,
|
||||
/* dest_bucket= */ destination,
|
||||
/* dest_key= */ configuration.blob_path,
|
||||
/* dest_container */ destination,
|
||||
/* dest_path */ configuration.blob_path,
|
||||
settings,
|
||||
read_settings,
|
||||
{},
|
||||
|
@ -46,8 +46,8 @@ namespace
|
||||
std::shared_ptr<Azure::Storage::Blobs::BlobContainerClient> client_,
|
||||
size_t offset_,
|
||||
size_t total_size_,
|
||||
const String & dest_bucket_,
|
||||
const String & dest_key_,
|
||||
const String & dest_container_,
|
||||
const String & dest_blob_,
|
||||
std::shared_ptr<AzureObjectStorageSettings> settings_,
|
||||
const std::optional<std::map<String, String>> & object_metadata_,
|
||||
ThreadPoolCallbackRunner<void> schedule_,
|
||||
@ -57,8 +57,8 @@ namespace
|
||||
, client(client_)
|
||||
, offset (offset_)
|
||||
, total_size (total_size_)
|
||||
, dest_bucket(dest_bucket_)
|
||||
, dest_key(dest_key_)
|
||||
, dest_container(dest_container_)
|
||||
, dest_blob(dest_blob_)
|
||||
, settings(settings_)
|
||||
, object_metadata(object_metadata_)
|
||||
, schedule(schedule_)
|
||||
@ -75,8 +75,8 @@ namespace
|
||||
std::shared_ptr<Azure::Storage::Blobs::BlobContainerClient> client;
|
||||
size_t offset;
|
||||
size_t total_size;
|
||||
const String & dest_bucket;
|
||||
const String & dest_key;
|
||||
const String & dest_container;
|
||||
const String & dest_blob;
|
||||
std::shared_ptr<AzureObjectStorageSettings> settings;
|
||||
const std::optional<std::map<String, String>> & object_metadata;
|
||||
ThreadPoolCallbackRunner<void> schedule;
|
||||
@ -170,7 +170,7 @@ namespace
|
||||
|
||||
void completeMultipartUpload()
|
||||
{
|
||||
auto block_blob_client = client->GetBlockBlobClient(dest_key);
|
||||
auto block_blob_client = client->GetBlockBlobClient(dest_blob);
|
||||
block_blob_client.CommitBlockList(block_ids);
|
||||
}
|
||||
|
||||
@ -207,7 +207,7 @@ namespace
|
||||
|
||||
void uploadPart(size_t part_offset, size_t part_size)
|
||||
{
|
||||
LOG_TRACE(log, "Writing part. Bucket: {}, Key: {}, Size: {}", dest_bucket, dest_key, part_size);
|
||||
LOG_TRACE(log, "Writing part. Container: {}, Blob: {}, Size: {}", dest_container, dest_blob, part_size);
|
||||
|
||||
if (!part_size)
|
||||
{
|
||||
@ -286,7 +286,7 @@ namespace
|
||||
|
||||
std::lock_guard lock(bg_tasks_mutex); /// Protect bg_tasks from race
|
||||
task.block_id = block_id;
|
||||
LOG_TRACE(log, "Writing part finished. Bucket: {}, Key: {}, block_id: {}, Parts: {}", dest_bucket, dest_key, block_id, bg_tasks.size());
|
||||
LOG_TRACE(log, "Writing part finished. Container: {}, Blob: {}, block_id: {}, Parts: {}", dest_container, dest_blob, block_id, bg_tasks.size());
|
||||
}
|
||||
|
||||
String processUploadPartRequest(UploadPartTask & task)
|
||||
@ -295,7 +295,7 @@ namespace
|
||||
if (for_disk_azure_blob_storage)
|
||||
ProfileEvents::increment(ProfileEvents::DiskAzureUploadPart);
|
||||
|
||||
auto block_blob_client = client->GetBlockBlobClient(dest_key);
|
||||
auto block_blob_client = client->GetBlockBlobClient(dest_blob);
|
||||
task.block_id = getRandomASCIIString(64);
|
||||
Azure::Core::IO::MemoryBodyStream memory(reinterpret_cast<const uint8_t *>(task.data), task.size);
|
||||
block_blob_client.StageBlock(task.block_id, memory);
|
||||
@ -330,14 +330,14 @@ void copyDataToAzureBlobStorageFile(
|
||||
size_t offset,
|
||||
size_t size,
|
||||
std::shared_ptr<Azure::Storage::Blobs::BlobContainerClient> & dest_client,
|
||||
const String & dest_bucket,
|
||||
const String & dest_key,
|
||||
const String & dest_container,
|
||||
const String & dest_blob,
|
||||
std::shared_ptr<AzureObjectStorageSettings> settings,
|
||||
const std::optional<std::map<String, String>> & object_metadata,
|
||||
ThreadPoolCallbackRunner<void> schedule,
|
||||
bool for_disk_azure_blob_storage)
|
||||
{
|
||||
UploadHelper helper{create_read_buffer, dest_client, offset, size, dest_bucket, dest_key, settings, object_metadata, schedule, for_disk_azure_blob_storage, &Poco::Logger::get("copyDataToAzureBlobStorageFile")};
|
||||
UploadHelper helper{create_read_buffer, dest_client, offset, size, dest_container, dest_blob, settings, object_metadata, schedule, for_disk_azure_blob_storage, &Poco::Logger::get("copyDataToAzureBlobStorageFile")};
|
||||
helper.performCopy();
|
||||
}
|
||||
|
||||
@ -345,12 +345,12 @@ void copyDataToAzureBlobStorageFile(
|
||||
void copyAzureBlobStorageFile(
|
||||
std::shared_ptr<Azure::Storage::Blobs::BlobContainerClient> src_client,
|
||||
std::shared_ptr<Azure::Storage::Blobs::BlobContainerClient> dest_client,
|
||||
const String & src_bucket,
|
||||
const String & src_key,
|
||||
const String & src_container,
|
||||
const String & src_blob,
|
||||
size_t offset,
|
||||
size_t size,
|
||||
const String & dest_bucket,
|
||||
const String & dest_key,
|
||||
const String & dest_container,
|
||||
const String & dest_blob,
|
||||
std::shared_ptr<AzureObjectStorageSettings> settings,
|
||||
const ReadSettings & read_settings,
|
||||
const std::optional<std::map<String, String>> & object_metadata,
|
||||
@ -363,21 +363,21 @@ void copyAzureBlobStorageFile(
|
||||
ProfileEvents::increment(ProfileEvents::AzureCopyObject);
|
||||
if (for_disk_azure_blob_storage)
|
||||
ProfileEvents::increment(ProfileEvents::DiskAzureCopyObject);
|
||||
auto block_blob_client_src = src_client->GetBlockBlobClient(src_key);
|
||||
auto block_blob_client_dest = dest_client->GetBlockBlobClient(dest_key);
|
||||
auto block_blob_client_src = src_client->GetBlockBlobClient(src_blob);
|
||||
auto block_blob_client_dest = dest_client->GetBlockBlobClient(dest_blob);
|
||||
auto uri = block_blob_client_src.GetUrl();
|
||||
block_blob_client_dest.CopyFromUri(uri);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_TRACE(&Poco::Logger::get("copyAzureBlobStorageFile"), "Reading from Bucket: {}, Key: {}", src_bucket, src_key);
|
||||
LOG_TRACE(&Poco::Logger::get("copyAzureBlobStorageFile"), "Reading from Container: {}, Blob: {}", src_container, src_blob);
|
||||
auto create_read_buffer = [&]
|
||||
{
|
||||
return std::make_unique<ReadBufferFromAzureBlobStorage>(src_client, src_key, read_settings, settings->max_single_read_retries,
|
||||
return std::make_unique<ReadBufferFromAzureBlobStorage>(src_client, src_blob, read_settings, settings->max_single_read_retries,
|
||||
settings->max_single_download_retries);
|
||||
};
|
||||
|
||||
UploadHelper helper{create_read_buffer, dest_client, offset, size, dest_bucket, dest_key, settings, object_metadata, schedule, for_disk_azure_blob_storage, &Poco::Logger::get("copyAzureBlobStorageFile")};
|
||||
UploadHelper helper{create_read_buffer, dest_client, offset, size, dest_container, dest_blob, settings, object_metadata, schedule, for_disk_azure_blob_storage, &Poco::Logger::get("copyAzureBlobStorageFile")};
|
||||
helper.performCopy();
|
||||
}
|
||||
}
|
||||
|
@ -23,12 +23,12 @@ using CreateReadBuffer = std::function<std::unique_ptr<SeekableReadBuffer>()>;
|
||||
void copyAzureBlobStorageFile(
|
||||
std::shared_ptr<Azure::Storage::Blobs::BlobContainerClient> src_client,
|
||||
std::shared_ptr<Azure::Storage::Blobs::BlobContainerClient> dest_client,
|
||||
const String & src_bucket,
|
||||
const String & src_key,
|
||||
const String & src_container,
|
||||
const String & src_path,
|
||||
size_t src_offset,
|
||||
size_t src_size,
|
||||
const String & dest_bucket,
|
||||
const String & dest_key,
|
||||
const String & dest_container,
|
||||
const String & dest_path,
|
||||
std::shared_ptr<AzureObjectStorageSettings> settings,
|
||||
const ReadSettings & read_settings,
|
||||
const std::optional<std::map<String, String>> & object_metadata = std::nullopt,
|
||||
@ -46,8 +46,8 @@ void copyDataToAzureBlobStorageFile(
|
||||
size_t offset,
|
||||
size_t size,
|
||||
std::shared_ptr<Azure::Storage::Blobs::BlobContainerClient> & client,
|
||||
const String & dest_container,
|
||||
const String & dest_bucket,
|
||||
const String & dest_key,
|
||||
std::shared_ptr<AzureObjectStorageSettings> settings,
|
||||
const std::optional<std::map<String, String>> & object_metadata = std::nullopt,
|
||||
ThreadPoolCallbackRunner<void> schedule_ = {},
|
||||
|
Loading…
Reference in New Issue
Block a user