Add max_ prefix for backup_bandwidth settings

Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com>
This commit is contained in:
Azat Khuzhin 2023-04-05 10:54:25 +02:00
parent f0f58de79c
commit 4008155a6e
5 changed files with 7 additions and 7 deletions

View File

@ -29,7 +29,7 @@ namespace DB
M(UInt64, max_backups_io_thread_pool_free_size, 0, "Max free size for backups IO thread pool.", 0) \
M(UInt64, backups_io_thread_pool_queue_size, 0, "Queue size for backups IO thread pool.", 0) \
M(UInt64, backup_threads, 16, "The maximum number of threads to execute BACKUP requests.", 0) \
M(UInt64, backup_bandwidth_for_server, 0, "The maximum read speed in bytes per second for all backups on server. Zero means unlimited.", 0) \
M(UInt64, max_backup_bandwidth_for_server, 0, "The maximum read speed in bytes per second for all backups on server. Zero means unlimited.", 0) \
M(UInt64, restore_threads, 16, "The maximum number of threads to execute RESTORE requests.", 0) \
M(Int32, max_connections, 1024, "Max server connections.", 0) \
M(UInt32, asynchronous_metrics_update_period_s, 1, "Period in seconds for updating asynchronous metrics.", 0) \

View File

@ -424,7 +424,7 @@ class IColumn;
M(UInt64, backup_restore_keeper_fault_injection_seed, 0, "0 - random seed, otherwise the setting value", 0) \
M(UInt64, backup_restore_keeper_value_max_size, 1048576, "Maximum size of data of a [Zoo]Keeper's node during backup", 0) \
M(UInt64, backup_restore_batch_size_for_keeper_multiread, 10000, "Maximum size of batch for multiread request to [Zoo]Keeper during backup or restore", 0) \
M(UInt64, backup_bandwidth, 0, "The maximum read speed in bytes per second for particular backup on server. Zero means unlimited.", 0) \
M(UInt64, max_backup_bandwidth, 0, "The maximum read speed in bytes per second for particular backup on server. Zero means unlimited.", 0) \
\
M(Bool, log_profile_events, true, "Log query performance statistics into the query_log, query_thread_log and query_views_log.", 0) \
M(Bool, log_query_settings, true, "Log query settings into the query_log.", 0) \

View File

@ -2430,20 +2430,20 @@ ThrottlerPtr Context::getBackupsThrottler() const
{
ThrottlerPtr throttler;
if (shared->server_settings.backup_bandwidth_for_server)
if (shared->server_settings.max_backup_bandwidth_for_server)
{
auto lock = getLock();
if (!shared->backups_server_throttler)
shared->backups_server_throttler = std::make_shared<Throttler>(shared->server_settings.backup_bandwidth_for_server);
shared->backups_server_throttler = std::make_shared<Throttler>(shared->server_settings.max_backup_bandwidth_for_server);
throttler = shared->backups_server_throttler;
}
const auto & query_settings = getSettingsRef();
if (query_settings.backup_bandwidth)
if (query_settings.max_backup_bandwidth)
{
auto lock = getLock();
if (!backups_query_throttler)
backups_query_throttler = std::make_shared<Throttler>(query_settings.backup_bandwidth, throttler);
backups_query_throttler = std::make_shared<Throttler>(query_settings.max_backup_bandwidth, throttler);
throttler = backups_query_throttler;
}

View File

@ -14,7 +14,7 @@ $CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -q "insert into data select * from numbers(1e6)"
query_id=$(random_str 10)
$CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to Disk('default', 'backups/$CLICKHOUSE_DATABASE/data/backup1')" --backup_bandwidth=1M > /dev/null
$CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to Disk('default', 'backups/$CLICKHOUSE_DATABASE/data/backup1')" --max_backup_bandwidth=1M > /dev/null
$CLICKHOUSE_CLIENT -nm -q "
SYSTEM FLUSH LOGS;
SELECT