This commit is contained in:
Igor Nikonov 2024-09-18 18:55:21 -04:00 committed by GitHub
commit 5c6b57538d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 39 additions and 5 deletions

View File

@ -454,6 +454,7 @@ void executeQueryWithParallelReplicas(
auto not_optimized_cluster = context->getClusterForParallelReplicas(); auto not_optimized_cluster = context->getClusterForParallelReplicas();
auto new_context = Context::createCopy(context); auto new_context = Context::createCopy(context);
const auto & new_settings = new_context->getSettingsRef();
/// check hedged connections setting /// check hedged connections setting
if (settings.use_hedged_requests.value) if (settings.use_hedged_requests.value)
@ -477,6 +478,14 @@ void executeQueryWithParallelReplicas(
new_context->setSetting("use_hedged_requests", Field{false}); new_context->setSetting("use_hedged_requests", Field{false});
} }
if (settings.max_execution_time_leaf.value > 0)
{
/// Replace 'max_execution_time' of this sub-query with 'max_execution_time_leaf' and 'timeout_overflow_mode'
/// with 'timeout_overflow_mode_leaf'
new_context->setSetting("max_execution_time", Field{new_settings.max_execution_time_leaf});
new_context->setSetting("timeout_overflow_mode", Field{new_settings.timeout_overflow_mode_leaf});
}
auto scalars = new_context->hasQueryContext() ? new_context->getQueryContext()->getScalars() : Scalars{}; auto scalars = new_context->hasQueryContext() ? new_context->getQueryContext()->getScalars() : Scalars{};
UInt64 shard_num = 0; /// shard_num is 1-based, so 0 - no shard specified UInt64 shard_num = 0; /// shard_num is 1-based, so 0 - no shard specified

View File

@ -9,18 +9,19 @@ SET log_queries=1;
CREATE TEMPORARY TABLE times (t DateTime); CREATE TEMPORARY TABLE times (t DateTime);
INSERT INTO times SELECT now(); INSERT INTO times SELECT now();
SELECT count('special query for 01290_max_execution_speed_distributed') FROM remote('127.0.0.{2,3}', numbers(1000000)); SELECT count() FROM remote('127.0.0.{2,3}', numbers(1000000)) SETTINGS log_comment='01290_8ca5d52f-8582-4ee3-8674-351c76d67b8c';
INSERT INTO times SELECT now(); INSERT INTO times SELECT now();
SELECT max(t) - min(t) >= 1 FROM times; SELECT max(t) - min(t) >= 1 FROM times;
-- Check that the query was also throttled on "remote" servers. -- Check that the query was also throttled on "remote" servers.
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT DISTINCT query_duration_ms >= 500 SELECT COUNT()
FROM system.query_log FROM system.query_log
WHERE WHERE
current_database = currentDatabase() AND current_database = currentDatabase() AND
event_date >= yesterday() AND event_date >= yesterday() AND
query LIKE '%special query for 01290_max_execution_speed_distributed%' AND log_comment = '01290_8ca5d52f-8582-4ee3-8674-351c76d67b8c' AND
query NOT LIKE '%system.query_log%' AND type = 'QueryFinish' AND
type = 2; query_duration_ms >= 500
SETTINGS max_threads = 0;

View File

@ -0,0 +1,24 @@
-- Tags: no-fasttest
DROP TABLE IF EXISTS 03231_max_execution_time_t SYNC;
CREATE TABLE 03231_max_execution_time_t
(
key UInt64,
value String,
)
ENGINE = ReplicatedMergeTree('/clickhouse/{database}/03231_max_execution_time', 'r1')
ORDER BY (key, value);
SET max_rows_to_read = 20_000_000;
SYSTEM STOP MERGES 03231_max_execution_time_t;
INSERT INTO 03231_max_execution_time_t SELECT number, toString(number) FROM numbers_mt(20_000_000) SETTINGS max_threads=0, max_insert_threads=0;
SET allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost';
SET use_query_cache = false;
SELECT key, SUM(length(value)) FROM 03231_max_execution_time_t GROUP BY key FORMAT Null SETTINGS max_execution_time=1; -- { serverError TIMEOUT_EXCEEDED }
SELECT key, SUM(length(value)) FROM 03231_max_execution_time_t GROUP BY key FORMAT Null SETTINGS max_execution_time_leaf=1; -- { serverError TIMEOUT_EXCEEDED }
-- Can return partial result
SELECT key, SUM(length(value)) FROM 03231_max_execution_time_t GROUP BY key FORMAT Null SETTINGS max_execution_time=1, timeout_overflow_mode='break';
SELECT key, SUM(length(value)) FROM 03231_max_execution_time_t GROUP BY key FORMAT Null SETTINGS max_execution_time_leaf=1, timeout_overflow_mode_leaf='break';
DROP TABLE 03231_max_execution_time_t SYNC;