Another try to fix flakiness (probably useless)

This commit is contained in:
Igor Nikonov 2024-09-13 18:12:31 +00:00
parent e1706225b4
commit bbc730fac0
2 changed files with 9 additions and 8 deletions

View File

@ -9,19 +9,19 @@ SET log_queries=1;
CREATE TEMPORARY TABLE times (t DateTime);
INSERT INTO times SELECT now();
SELECT count('special query for 01290_max_execution_speed_distributed') FROM remote('127.0.0.{2,3}', numbers(1000000));
SELECT count() FROM remote('127.0.0.{2,3}', numbers(1000000)) SETTINGS log_comment='01290_8ca5d52f-8582-4ee3-8674-351c76d67b8c';
INSERT INTO times SELECT now();
SELECT max(t) - min(t) >= 1 FROM times;
-- Check that the query was also throttled on "remote" servers.
SYSTEM FLUSH LOGS;
SELECT DISTINCT query_duration_ms >= 500
SELECT COUNT()
FROM system.query_log
WHERE
current_database = currentDatabase() AND
event_date >= yesterday() AND
query LIKE '%special query for 01290_max_execution_speed_distributed%' AND
query NOT LIKE '%system.query_log%' AND
type = 'QueryFinish'
SETTINGS max_threads = 0; -- to avoid TOO_SLOW error while executing the query
log_comment = '01290_8ca5d52f-8582-4ee3-8674-351c76d67b8c' AND
type = 'QueryFinish' AND
query_duration_ms >= 500
SETTINGS max_threads = 0;

View File

@ -8,8 +8,9 @@ CREATE TABLE 03231_max_execution_time_t
ENGINE = ReplicatedMergeTree('/clickhouse/{database}/03231_max_execution_time', 'r1')
ORDER BY (key, value);
SET max_rows_to_read = 100_000_000; -- to avoid DB::Exception: Limit for rows (controlled by 'max_rows_to_read' setting) exceeded, max rows: 20.00 million, current rows: 100.00 million. (TOO_MANY_ROWS)
INSERT INTO 03231_max_execution_time_t SELECT number, toString(number) FROM numbers_mt(100_000_000) SETTINGS max_threads=0, max_insert_threads=0;
SET max_rows_to_read = 20_000_000;
SYSTEM STOP MERGES 03231_max_execution_time_t;
INSERT INTO 03231_max_execution_time_t SELECT number, toString(number) FROM numbers_mt(20_000_000) SETTINGS max_threads=0, max_insert_threads=0;
SET allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost';
SET use_query_cache = false;