Update tests

This commit is contained in:
Alexey Milovidov 2024-07-25 22:08:32 +02:00
parent ba5a07bcc7
commit f81e8aa345
10 changed files with 12 additions and 5 deletions

View File

@ -1,5 +1,7 @@
-- Tags: no-parallel, no-fasttest, no-random-settings
SET max_bytes_in_join = 0;
SET max_rows_in_join = 0;
SET max_memory_usage = 32000000;
SET join_on_disk_max_files_to_merge = 4;

View File

@ -12,7 +12,7 @@ $CLICKHOUSE_CLIENT --max_rows_to_read 50M --multiquery "
INSERT INTO bug SELECT rand64(), '2020-06-07' FROM numbers(50000000);
OPTIMIZE TABLE bug FINAL;"
LOG="$CLICKHOUSE_TMP/err-$CLICKHOUSE_DATABASE"
$CLICKHOUSE_BENCHMARK --iterations 10 --max_threads 100 --min_bytes_to_use_direct_io 1 <<< "SELECT sum(UserID) FROM bug PREWHERE NOT ignore(Date)" 1>/dev/null 2>"$LOG"
$CLICKHOUSE_BENCHMARK --max_rows_to_read 51M --iterations 10 --max_threads 100 --min_bytes_to_use_direct_io 1 <<< "SELECT sum(UserID) FROM bug PREWHERE NOT ignore(Date)" 1>/dev/null 2>"$LOG"
cat "$LOG" | grep Exception
cat "$LOG" | grep Loaded

View File

@ -12,7 +12,7 @@ select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by n
-- and the query with GROUP BY on remote servers will first do GROUP BY and then send the block,
-- so the initiator will first receive all blocks from remotes and only after start merging,
-- and will hit the memory limit.
select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='20Mi', max_block_size=4294967296; -- { serverError MEMORY_LIMIT_EXCEEDED }
select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='20Mi', max_block_size=4294967296, max_rows_to_read=0; -- { serverError MEMORY_LIMIT_EXCEEDED }
-- with optimize_aggregation_in_order=1 remote servers will produce blocks more frequently,
-- since they don't need to wait until the aggregation will be finished,

View File

@ -3,5 +3,6 @@ drop table if exists tab_lc;
CREATE TABLE tab_lc (x UInt64, y LowCardinality(String)) engine = MergeTree order by x SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi';
insert into tab_lc select number, toString(number % 10) from numbers(20000000);
optimize table tab_lc;
SET max_rows_to_read = '21M';
select count() from tab_lc where y == '0' settings local_filesystem_read_prefetch=1;
drop table if exists tab_lc;

View File

@ -1,4 +1,5 @@
-- Tags: no-parallel, long, no-debug, no-tsan, no-msan, no-asan
SET max_rows_to_read = 0;
create table data_02344 (key Int) engine=Null;
-- 3e9 rows is enough to fill the socket buffer and cause INSERT hung.

View File

@ -1,5 +1,7 @@
-- Tags: long, no-tsan, no-msan, no-asan, no-ubsan, no-debug, no-object-storage
SET max_rows_to_read = '51M';
DROP TABLE IF EXISTS t_2354_dist_with_external_aggr;
create table t_2354_dist_with_external_aggr(a UInt64, b String, c FixedString(100)) engine = MergeTree order by tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi';

View File

@ -11,7 +11,7 @@ echo "Parquet"
DATA_FILE=$CUR_DIR/data_parquet/list_monotonically_increasing_offsets.parquet
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_load"
${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_load (list Array(Int64), json Nullable(String)) ENGINE = Memory"
cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO parquet_load FORMAT Parquet"
cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} --max_memory_usage 10G -q "INSERT INTO parquet_load FORMAT Parquet"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_load" | md5sum
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM parquet_load"
${CLICKHOUSE_CLIENT} --query="drop table parquet_load"

View File

@ -2,7 +2,7 @@
set allow_suspicious_fixed_string_types=1;
create table fat_granularity (x UInt32, fat FixedString(160000)) engine = MergeTree order by x settings storage_policy = 's3_cache';
insert into fat_granularity select number, toString(number) || '_' from numbers(100000) settings max_block_size = 8192, max_insert_threads=8;
insert into fat_granularity select number, toString(number) || '_' from numbers(100000) settings max_block_size = 3000, max_insert_threads = 8, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0;
-- Too large sizes of FixedString to deserialize
select x from fat_granularity prewhere fat like '256\_%' settings max_threads=2;

View File

@ -1,6 +1,6 @@
-- Tags: no-fasttest
SET max_rows_to_read = 0;
SET max_rows_to_read = 0, max_execution_time = 0, max_estimated_execution_time = 0;
-- Query stops after timeout without an error
SELECT * FROM numbers(100000000) SETTINGS max_block_size=1, max_execution_time=2, timeout_overflow_mode='break' FORMAT Null;

View File

@ -35,6 +35,7 @@ AS
3 AS key
FROM numbers(0, 4000000);
SET max_rows_to_read = 0;
SELECT SUM(value), COUNT(*)
FROM skewed_probe