mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 00:22:29 +00:00
Merge pull request #53326 from azat/backups/base-backup-credentials
Add ability to override credentials for accessing base backup in S3
This commit is contained in:
commit
90b174d3dc
@ -39,6 +39,7 @@ public:
|
||||
std::optional<UUID> backup_uuid;
|
||||
bool deduplicate_files = true;
|
||||
bool allow_s3_native_copy = true;
|
||||
bool use_same_s3_credentials_for_base_backup = false;
|
||||
ReadSettings read_settings;
|
||||
WriteSettings write_settings;
|
||||
};
|
||||
|
@ -77,12 +77,14 @@ namespace
|
||||
|
||||
|
||||
BackupImpl::BackupImpl(
|
||||
const String & backup_name_for_logging_,
|
||||
const BackupInfo & backup_info_,
|
||||
const ArchiveParams & archive_params_,
|
||||
const std::optional<BackupInfo> & base_backup_info_,
|
||||
std::shared_ptr<IBackupReader> reader_,
|
||||
const ContextPtr & context_)
|
||||
: backup_name_for_logging(backup_name_for_logging_)
|
||||
const ContextPtr & context_,
|
||||
bool use_same_s3_credentials_for_base_backup_)
|
||||
: backup_info(backup_info_)
|
||||
, backup_name_for_logging(backup_info.toStringForLogging())
|
||||
, use_archive(!archive_params_.archive_name.empty())
|
||||
, archive_params(archive_params_)
|
||||
, open_mode(OpenMode::READ)
|
||||
@ -90,13 +92,14 @@ BackupImpl::BackupImpl(
|
||||
, is_internal_backup(false)
|
||||
, version(INITIAL_BACKUP_VERSION)
|
||||
, base_backup_info(base_backup_info_)
|
||||
, use_same_s3_credentials_for_base_backup(use_same_s3_credentials_for_base_backup_)
|
||||
{
|
||||
open(context_);
|
||||
}
|
||||
|
||||
|
||||
BackupImpl::BackupImpl(
|
||||
const String & backup_name_for_logging_,
|
||||
const BackupInfo & backup_info_,
|
||||
const ArchiveParams & archive_params_,
|
||||
const std::optional<BackupInfo> & base_backup_info_,
|
||||
std::shared_ptr<IBackupWriter> writer_,
|
||||
@ -104,8 +107,10 @@ BackupImpl::BackupImpl(
|
||||
bool is_internal_backup_,
|
||||
const std::shared_ptr<IBackupCoordination> & coordination_,
|
||||
const std::optional<UUID> & backup_uuid_,
|
||||
bool deduplicate_files_)
|
||||
: backup_name_for_logging(backup_name_for_logging_)
|
||||
bool deduplicate_files_,
|
||||
bool use_same_s3_credentials_for_base_backup_)
|
||||
: backup_info(backup_info_)
|
||||
, backup_name_for_logging(backup_info.toStringForLogging())
|
||||
, use_archive(!archive_params_.archive_name.empty())
|
||||
, archive_params(archive_params_)
|
||||
, open_mode(OpenMode::WRITE)
|
||||
@ -116,6 +121,7 @@ BackupImpl::BackupImpl(
|
||||
, version(CURRENT_BACKUP_VERSION)
|
||||
, base_backup_info(base_backup_info_)
|
||||
, deduplicate_files(deduplicate_files_)
|
||||
, use_same_s3_credentials_for_base_backup(use_same_s3_credentials_for_base_backup_)
|
||||
, log(&Poco::Logger::get("BackupImpl"))
|
||||
{
|
||||
open(context_);
|
||||
@ -162,10 +168,16 @@ void BackupImpl::open(const ContextPtr & context)
|
||||
|
||||
if (base_backup_info)
|
||||
{
|
||||
if (use_same_s3_credentials_for_base_backup)
|
||||
backup_info.copyS3CredentialsTo(*base_backup_info);
|
||||
|
||||
BackupFactory::CreateParams params;
|
||||
params.backup_info = *base_backup_info;
|
||||
params.open_mode = OpenMode::READ;
|
||||
params.context = context;
|
||||
/// use_same_s3_credentials_for_base_backup should be inherited for base backups
|
||||
params.use_same_s3_credentials_for_base_backup = use_same_s3_credentials_for_base_backup;
|
||||
|
||||
base_backup = BackupFactory::instance().createBackup(params);
|
||||
|
||||
if (open_mode == OpenMode::WRITE)
|
||||
|
@ -35,14 +35,15 @@ public:
|
||||
};
|
||||
|
||||
BackupImpl(
|
||||
const String & backup_name_for_logging_,
|
||||
const BackupInfo & backup_info_,
|
||||
const ArchiveParams & archive_params_,
|
||||
const std::optional<BackupInfo> & base_backup_info_,
|
||||
std::shared_ptr<IBackupReader> reader_,
|
||||
const ContextPtr & context_);
|
||||
const ContextPtr & context_,
|
||||
bool use_same_s3_credentials_for_base_backup_);
|
||||
|
||||
BackupImpl(
|
||||
const String & backup_name_for_logging_,
|
||||
const BackupInfo & backup_info_,
|
||||
const ArchiveParams & archive_params_,
|
||||
const std::optional<BackupInfo> & base_backup_info_,
|
||||
std::shared_ptr<IBackupWriter> writer_,
|
||||
@ -50,7 +51,8 @@ public:
|
||||
bool is_internal_backup_,
|
||||
const std::shared_ptr<IBackupCoordination> & coordination_,
|
||||
const std::optional<UUID> & backup_uuid_,
|
||||
bool deduplicate_files_);
|
||||
bool deduplicate_files_,
|
||||
bool use_same_s3_credentials_for_base_backup_);
|
||||
|
||||
~BackupImpl() override;
|
||||
|
||||
@ -109,6 +111,7 @@ private:
|
||||
|
||||
std::unique_ptr<SeekableReadBuffer> readFileImpl(const SizeAndChecksum & size_and_checksum, bool read_encrypted) const;
|
||||
|
||||
BackupInfo backup_info;
|
||||
const String backup_name_for_logging;
|
||||
const bool use_archive;
|
||||
const ArchiveParams archive_params;
|
||||
@ -145,6 +148,7 @@ private:
|
||||
|
||||
bool writing_finalized = false;
|
||||
bool deduplicate_files = true;
|
||||
bool use_same_s3_credentials_for_base_backup = false;
|
||||
const Poco::Logger * log;
|
||||
};
|
||||
|
||||
|
@ -98,4 +98,23 @@ String BackupInfo::toStringForLogging() const
|
||||
return toAST()->formatForLogging();
|
||||
}
|
||||
|
||||
void BackupInfo::copyS3CredentialsTo(BackupInfo & dest) const
|
||||
{
|
||||
/// named_collection case, no need to update
|
||||
if (!dest.id_arg.empty() || !id_arg.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "use_same_s3_credentials_for_base_backup is not compatible with named_collections");
|
||||
|
||||
if (backup_engine_name != "S3")
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "use_same_s3_credentials_for_base_backup supported only for S3, got {}", toStringForLogging());
|
||||
if (dest.backup_engine_name != "S3")
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "use_same_s3_credentials_for_base_backup supported only for S3, got {}", dest.toStringForLogging());
|
||||
if (args.size() != 3)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "use_same_s3_credentials_for_base_backup requires access_key_id, secret_access_key, got {}", toStringForLogging());
|
||||
|
||||
auto & dest_args = dest.args;
|
||||
dest_args.resize(3);
|
||||
dest_args[1] = args[1];
|
||||
dest_args[2] = args[2];
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -23,6 +23,8 @@ struct BackupInfo
|
||||
static BackupInfo fromAST(const IAST & ast);
|
||||
|
||||
String toStringForLogging() const;
|
||||
|
||||
void copyS3CredentialsTo(BackupInfo & dest) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ namespace ErrorCodes
|
||||
M(Bool, decrypt_files_from_encrypted_disks) \
|
||||
M(Bool, deduplicate_files) \
|
||||
M(Bool, allow_s3_native_copy) \
|
||||
M(Bool, use_same_s3_credentials_for_base_backup) \
|
||||
M(Bool, read_from_filesystem_cache) \
|
||||
M(UInt64, shard_num) \
|
||||
M(UInt64, replica_num) \
|
||||
|
@ -44,6 +44,9 @@ struct BackupSettings
|
||||
/// Whether native copy is allowed (optimization for cloud storages, that sometimes could have bugs)
|
||||
bool allow_s3_native_copy = true;
|
||||
|
||||
/// Whether base backup to S3 should inherit credentials from the BACKUP query.
|
||||
bool use_same_s3_credentials_for_base_backup = false;
|
||||
|
||||
/// Allow to use the filesystem cache in passive mode - benefit from the existing cache entries,
|
||||
/// but don't put more entries into the cache.
|
||||
bool read_from_filesystem_cache = true;
|
||||
|
@ -386,6 +386,7 @@ void BackupsWorker::doBackup(
|
||||
backup_create_params.backup_uuid = backup_settings.backup_uuid;
|
||||
backup_create_params.deduplicate_files = backup_settings.deduplicate_files;
|
||||
backup_create_params.allow_s3_native_copy = backup_settings.allow_s3_native_copy;
|
||||
backup_create_params.use_same_s3_credentials_for_base_backup = backup_settings.use_same_s3_credentials_for_base_backup;
|
||||
backup_create_params.read_settings = getReadSettingsForBackup(context, backup_settings);
|
||||
backup_create_params.write_settings = getWriteSettingsForBackup(context);
|
||||
BackupMutablePtr backup = BackupFactory::instance().createBackup(backup_create_params);
|
||||
@ -693,6 +694,7 @@ void BackupsWorker::doRestore(
|
||||
backup_open_params.base_backup_info = restore_settings.base_backup_info;
|
||||
backup_open_params.password = restore_settings.password;
|
||||
backup_open_params.allow_s3_native_copy = restore_settings.allow_s3_native_copy;
|
||||
backup_open_params.use_same_s3_credentials_for_base_backup = restore_settings.use_same_s3_credentials_for_base_backup;
|
||||
backup_open_params.read_settings = getReadSettingsForRestore(context);
|
||||
backup_open_params.write_settings = getWriteSettingsForRestore(context);
|
||||
BackupPtr backup = BackupFactory::instance().createBackup(backup_open_params);
|
||||
|
@ -163,6 +163,7 @@ namespace
|
||||
M(Bool, allow_unresolved_access_dependencies) \
|
||||
M(RestoreUDFCreationMode, create_function) \
|
||||
M(Bool, allow_s3_native_copy) \
|
||||
M(Bool, use_same_s3_credentials_for_base_backup) \
|
||||
M(Bool, internal) \
|
||||
M(String, host_id) \
|
||||
M(OptionalString, storage_policy) \
|
||||
|
@ -110,6 +110,9 @@ struct RestoreSettings
|
||||
/// Whether native copy is allowed (optimization for cloud storages, that sometimes could have bugs)
|
||||
bool allow_s3_native_copy = true;
|
||||
|
||||
/// Whether base backup from S3 should inherit credentials from the RESTORE query.
|
||||
bool use_same_s3_credentials_for_base_backup = false;
|
||||
|
||||
/// Internal, should not be specified by user.
|
||||
bool internal = false;
|
||||
|
||||
|
@ -47,7 +47,6 @@ void registerBackupEngineS3(BackupFactory & factory)
|
||||
auto creator_fn = []([[maybe_unused]] const BackupFactory::CreateParams & params) -> std::unique_ptr<IBackup>
|
||||
{
|
||||
#if USE_AWS_S3
|
||||
String backup_name_for_logging = params.backup_info.toStringForLogging();
|
||||
const String & id_arg = params.backup_info.id_arg;
|
||||
const auto & args = params.backup_info.args;
|
||||
|
||||
@ -115,7 +114,13 @@ void registerBackupEngineS3(BackupFactory & factory)
|
||||
params.write_settings,
|
||||
params.context);
|
||||
|
||||
return std::make_unique<BackupImpl>(backup_name_for_logging, archive_params, params.base_backup_info, reader, params.context);
|
||||
return std::make_unique<BackupImpl>(
|
||||
params.backup_info,
|
||||
archive_params,
|
||||
params.base_backup_info,
|
||||
reader,
|
||||
params.context,
|
||||
params.use_same_s3_credentials_for_base_backup);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -129,7 +134,7 @@ void registerBackupEngineS3(BackupFactory & factory)
|
||||
params.context);
|
||||
|
||||
return std::make_unique<BackupImpl>(
|
||||
backup_name_for_logging,
|
||||
params.backup_info,
|
||||
archive_params,
|
||||
params.base_backup_info,
|
||||
writer,
|
||||
@ -137,7 +142,8 @@ void registerBackupEngineS3(BackupFactory & factory)
|
||||
params.is_internal_backup,
|
||||
params.backup_coordination,
|
||||
params.backup_uuid,
|
||||
params.deduplicate_files);
|
||||
params.deduplicate_files,
|
||||
params.use_same_s3_credentials_for_base_backup);
|
||||
}
|
||||
#else
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "S3 support is disabled");
|
||||
|
@ -103,7 +103,6 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory)
|
||||
{
|
||||
auto creator_fn = [](const BackupFactory::CreateParams & params) -> std::unique_ptr<IBackup>
|
||||
{
|
||||
String backup_name_for_logging = params.backup_info.toStringForLogging();
|
||||
const String & engine_name = params.backup_info.backup_engine_name;
|
||||
|
||||
if (!params.backup_info.id_arg.empty())
|
||||
@ -172,7 +171,13 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory)
|
||||
reader = std::make_shared<BackupReaderFile>(path, params.read_settings, params.write_settings);
|
||||
else
|
||||
reader = std::make_shared<BackupReaderDisk>(disk, path, params.read_settings, params.write_settings);
|
||||
return std::make_unique<BackupImpl>(backup_name_for_logging, archive_params, params.base_backup_info, reader, params.context);
|
||||
return std::make_unique<BackupImpl>(
|
||||
params.backup_info,
|
||||
archive_params,
|
||||
params.base_backup_info,
|
||||
reader,
|
||||
params.context,
|
||||
params.use_same_s3_credentials_for_base_backup);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -182,7 +187,7 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory)
|
||||
else
|
||||
writer = std::make_shared<BackupWriterDisk>(disk, path, params.read_settings, params.write_settings);
|
||||
return std::make_unique<BackupImpl>(
|
||||
backup_name_for_logging,
|
||||
params.backup_info,
|
||||
archive_params,
|
||||
params.base_backup_info,
|
||||
writer,
|
||||
@ -190,7 +195,8 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory)
|
||||
params.is_internal_backup,
|
||||
params.backup_coordination,
|
||||
params.backup_uuid,
|
||||
params.deduplicate_files);
|
||||
params.deduplicate_files,
|
||||
params.use_same_s3_credentials_for_base_backup);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -0,0 +1,14 @@
|
||||
use_same_s3_credentials_for_base_backup for S3
|
||||
BACKUP_CREATED
|
||||
BACKUP_CREATED
|
||||
The request signature we calculated does not match the signature you provided. Check your key and signing method. (S3_ERROR)
|
||||
BACKUP_CREATED
|
||||
The request signature we calculated does not match the signature you provided. Check your key and signing method. (S3_ERROR)
|
||||
RESTORED
|
||||
RESTORED
|
||||
use_same_s3_credentials_for_base_backup for S3 (invalid arguments)
|
||||
BACKUP_CREATED
|
||||
NUMBER_OF_ARGUMENTS_DOESNT_MATCH
|
||||
use_same_s3_credentials_for_base_backup for Disk
|
||||
BACKUP_CREATED
|
||||
BAD_ARGUMENTS
|
@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest
|
||||
# Tag: no-fasttest - requires S3
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
drop table if exists data;
|
||||
create table data (key Int) engine=MergeTree() order by tuple();
|
||||
insert into data select * from numbers(10);
|
||||
"
|
||||
|
||||
function write_invalid_password_to_base_backup()
|
||||
{
|
||||
local name=$1 && shift
|
||||
local content
|
||||
content=$($CLICKHOUSE_CLIENT -q "select replace(line, 'testtest', 'INVALID_PASSWORD') from s3($(s3_location $name/.backup), 'LineAsString')") || return 1
|
||||
$CLICKHOUSE_CLIENT --param_content="$content" -q "insert into function s3($(s3_location $name/.backup), 'LineAsString') settings s3_truncate_on_insert=1 values ({content:String})"
|
||||
}
|
||||
|
||||
# Returns the arguments for the BACKUP TO S3() function, i.e. (url, access_key_id, secret_access_key)
|
||||
function s3_location() { echo "'http://localhost:11111/test/backups/$CLICKHOUSE_DATABASE/use_same_s3_credentials_for_base_backup_base_$*', 'test', 'testtest'"; }
|
||||
|
||||
echo 'use_same_s3_credentials_for_base_backup for S3'
|
||||
$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO S3($(s3_location base))" | cut -f2
|
||||
$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO S3($(s3_location inc_1)) SETTINGS base_backup=S3($(s3_location base))" | cut -f2
|
||||
write_invalid_password_to_base_backup inc_1
|
||||
$CLICKHOUSE_CLIENT --format Null -q "BACKUP TABLE data TO S3($(s3_location inc_2)) SETTINGS base_backup=S3($(s3_location inc_1))" |& grep -m1 -o 'The request signature we calculated does not match the signature you provided. Check your key and signing method. (S3_ERROR)'
|
||||
$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO S3($(s3_location inc_3)) SETTINGS base_backup=S3($(s3_location inc_1)), use_same_s3_credentials_for_base_backup=1" | cut -f2
|
||||
|
||||
$CLICKHOUSE_CLIENT --format Null -q "RESTORE TABLE data AS data FROM S3($(s3_location inc_1))" |& grep -m1 -o 'The request signature we calculated does not match the signature you provided. Check your key and signing method. (S3_ERROR)'
|
||||
$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_1 FROM S3($(s3_location inc_1)) SETTINGS use_same_s3_credentials_for_base_backup=1" | cut -f2
|
||||
$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_2 FROM S3($(s3_location inc_3)) SETTINGS use_same_s3_credentials_for_base_backup=1" | cut -f2
|
||||
|
||||
echo 'use_same_s3_credentials_for_base_backup for S3 (invalid arguments)'
|
||||
$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO S3($(s3_location inc_4_bad)) SETTINGS base_backup=S3($(s3_location inc_1), 'foo'), use_same_s3_credentials_for_base_backup=1" |& cut -f2
|
||||
$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO S3($(s3_location inc_5_bad), 'foo') SETTINGS base_backup=S3($(s3_location inc_1)), use_same_s3_credentials_for_base_backup=1" |& grep -o -m1 NUMBER_OF_ARGUMENTS_DOESNT_MATCH
|
||||
|
||||
echo 'use_same_s3_credentials_for_base_backup for Disk'
|
||||
$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('default', '$CLICKHOUSE_DATABASE/backup_1') SETTINGS use_same_s3_credentials_for_base_backup=1" | cut -f2
|
||||
$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('default', '$CLICKHOUSE_DATABASE/backup_2') SETTINGS use_same_s3_credentials_for_base_backup=1, base_backup=Disk('default', '$CLICKHOUSE_DATABASE/backup_1')" |& grep -o -m1 BAD_ARGUMENTS
|
||||
|
||||
exit 0
|
Loading…
Reference in New Issue
Block a user