diff --git a/tests/queries/0_stateless/01290_max_execution_speed_distributed.sql b/tests/queries/0_stateless/01290_max_execution_speed_distributed.sql index a6f67a433d2..c1fc8ffa2a4 100644 --- a/tests/queries/0_stateless/01290_max_execution_speed_distributed.sql +++ b/tests/queries/0_stateless/01290_max_execution_speed_distributed.sql @@ -9,19 +9,19 @@ SET log_queries=1; CREATE TEMPORARY TABLE times (t DateTime); INSERT INTO times SELECT now(); -SELECT count('special query for 01290_max_execution_speed_distributed') FROM remote('127.0.0.{2,3}', numbers(1000000)); +SELECT count() FROM remote('127.0.0.{2,3}', numbers(1000000)) SETTINGS log_comment='01290_8ca5d52f-8582-4ee3-8674-351c76d67b8c'; INSERT INTO times SELECT now(); SELECT max(t) - min(t) >= 1 FROM times; -- Check that the query was also throttled on "remote" servers. SYSTEM FLUSH LOGS; -SELECT DISTINCT query_duration_ms >= 500 +SELECT COUNT() FROM system.query_log WHERE current_database = currentDatabase() AND event_date >= yesterday() AND - query LIKE '%special query for 01290_max_execution_speed_distributed%' AND - query NOT LIKE '%system.query_log%' AND - type = 'QueryFinish' -SETTINGS max_threads = 0; -- to avoid TOO_SLOW error while executing the query + log_comment = '01290_8ca5d52f-8582-4ee3-8674-351c76d67b8c' AND + type = 'QueryFinish' AND + query_duration_ms >= 500 +SETTINGS max_threads = 0; diff --git a/tests/queries/0_stateless/03231_pr_max_execution_time.sql b/tests/queries/0_stateless/03231_pr_max_execution_time.sql index b7b6d436f26..09e01673727 100644 --- a/tests/queries/0_stateless/03231_pr_max_execution_time.sql +++ b/tests/queries/0_stateless/03231_pr_max_execution_time.sql @@ -8,8 +8,9 @@ CREATE TABLE 03231_max_execution_time_t ENGINE = ReplicatedMergeTree('/clickhouse/{database}/03231_max_execution_time', 'r1') ORDER BY (key, value); -SET max_rows_to_read = 100_000_000; -- to avoid DB::Exception: Limit for rows (controlled by 'max_rows_to_read' setting) exceeded, max rows: 20.00 million, current rows: 100.00 million. (TOO_MANY_ROWS) -INSERT INTO 03231_max_execution_time_t SELECT number, toString(number) FROM numbers_mt(100_000_000) SETTINGS max_threads=0, max_insert_threads=0; +SET max_rows_to_read = 20_000_000; +SYSTEM STOP MERGES 03231_max_execution_time_t; +INSERT INTO 03231_max_execution_time_t SELECT number, toString(number) FROM numbers_mt(20_000_000) SETTINGS max_threads=0, max_insert_threads=0; SET allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; SET use_query_cache = false;