Update tests

This commit is contained in:
Alexey Milovidov 2024-07-26 02:54:11 +02:00
parent f81e8aa345
commit 9c7078bcf7
6 changed files with 15 additions and 13 deletions

View File

@ -15,6 +15,7 @@ SELECT count() > 0 FROM system.trace_log t WHERE query_id = (SELECT query_id FRO
SET query_profiler_real_time_period_ns = 0;
SET query_profiler_cpu_time_period_ns = 1000000;
SET log_queries = 1;
SET max_rows_to_read = 0;
SELECT count(), ignore('test cpu time query profiler') FROM numbers_mt(10000000000);
SET log_queries = 0;
SYSTEM FLUSH LOGS;

View File

@ -12,7 +12,8 @@ select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by n
-- and the query with GROUP BY on remote servers will first do GROUP BY and then send the block,
-- so the initiator will first receive all blocks from remotes and only after start merging,
-- and will hit the memory limit.
select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='20Mi', max_block_size=4294967296, max_rows_to_read=0; -- { serverError MEMORY_LIMIT_EXCEEDED }
SET max_rows_to_read = 0;
select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='20Mi', max_block_size=4294967296; -- { serverError MEMORY_LIMIT_EXCEEDED }
-- with optimize_aggregation_in_order=1 remote servers will produce blocks more frequently,
-- since they don't need to wait until the aggregation will be finished,

View File

@ -14,7 +14,7 @@ MAX_PROCESS_WAIT=5
# TCP CLIENT: As of today (02/12/21) uses PullingAsyncPipelineExecutor
### Should be cancelled after 1 second and return a 159 exception (timeout)
timeout -s KILL $MAX_PROCESS_WAIT $CLICKHOUSE_CLIENT --max_execution_time 1 -q \
timeout -s KILL $MAX_PROCESS_WAIT $CLICKHOUSE_CLIENT --max_result_rows 0 --max_result_bytes 0 --max_execution_time 1 -q \
"SELECT * FROM
(
SELECT a.name as n
@ -31,7 +31,7 @@ timeout -s KILL $MAX_PROCESS_WAIT $CLICKHOUSE_CLIENT --max_execution_time 1 -q \
FORMAT Null" 2>&1 | grep -o "Code: 159" | sort | uniq
### Should stop pulling data and return what has been generated already (return code 0)
timeout -s KILL $MAX_PROCESS_WAIT $CLICKHOUSE_CLIENT -q \
timeout -s KILL $MAX_PROCESS_WAIT $CLICKHOUSE_CLIENT --max_result_rows 0 --max_result_bytes 0 -q \
"SELECT a.name as n
FROM
(
@ -48,7 +48,7 @@ echo $?
# HTTP CLIENT: As of today (02/12/21) uses PullingPipelineExecutor
### Should be cancelled after 1 second and return a 159 exception (timeout)
${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL&max_execution_time=1" -d \
${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL&max_result_rows=0&max_result_bytes=0&max_execution_time=1" -d \
"SELECT * FROM
(
SELECT a.name as n
@ -66,7 +66,7 @@ ${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL&max_exec
### Should stop pulling data and return what has been generated already (return code 0)
${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL" -d \
${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL&max_result_rows=0&max_result_bytes=0" -d \
"SELECT a.name as n
FROM
(

View File

@ -1,6 +1,6 @@
-- Tags: long, no-tsan, no-msan, no-asan, no-ubsan, no-debug, no-object-storage
SET max_rows_to_read = '51M';
SET max_rows_to_read = '101M';
DROP TABLE IF EXISTS t_2354_dist_with_external_aggr;

View File

@ -12,6 +12,6 @@ DATA_FILE=$CUR_DIR/data_parquet/list_monotonically_increasing_offsets.parquet
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_load"
${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_load (list Array(Int64), json Nullable(String)) ENGINE = Memory"
cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} --max_memory_usage 10G -q "INSERT INTO parquet_load FORMAT Parquet"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_load" | md5sum
${CLICKHOUSE_CLIENT} --max_result_rows 0 --max_result_bytes 0 --query="SELECT * FROM parquet_load" | md5sum
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM parquet_load"
${CLICKHOUSE_CLIENT} --query="drop table parquet_load"

View File

@ -12,7 +12,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
GROUP BY ac, nw
)
GROUP BY nw
@ -32,7 +32,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
GROUP BY ac, nw
)
GROUP BY nw
@ -53,7 +53,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
WHERE (ac % 4) = 0
GROUP BY
ac,
@ -64,7 +64,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
WHERE (ac % 4) = 1
GROUP BY
ac,
@ -75,7 +75,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
WHERE (ac % 4) = 2
GROUP BY
ac,
@ -86,7 +86,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
WHERE (ac % 4) = 3
GROUP BY
ac,