Merge pull request #66837 from ClickHouse/tighten-limits-functional-tests

What if we tighten limits for functional tests?
This commit is contained in:
Alexey Milovidov 2024-08-19 12:01:30 +02:00 committed by GitHub
commit 46dd66bfb6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
90 changed files with 276 additions and 153 deletions

View File

@ -118,8 +118,8 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
clickhouse-client --query "CREATE TABLE test.hits AS datasets.hits_v1"
clickhouse-client --query "CREATE TABLE test.visits AS datasets.visits_v1"
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1"
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1"
clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1"
clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1"
clickhouse-client --query "DROP TABLE datasets.hits_v1"
clickhouse-client --query "DROP TABLE datasets.visits_v1"
@ -191,8 +191,8 @@ else
ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC"
clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC"
else
@ -200,7 +200,8 @@ else
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
fi
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
# AWS S3 is very inefficient, so increase memory even further:
clickhouse-client --max_memory_usage 30G --max_memory_usage_for_user 30G --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
fi
clickhouse-client --query "SHOW TABLES FROM test"

View File

@ -391,8 +391,8 @@ done
# wait for minio to flush its batch if it has any
sleep 1
clickhouse-client -q "SYSTEM FLUSH ASYNC INSERT QUEUE"
clickhouse-client -q "SELECT log FROM minio_audit_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_audit_logs.jsonl.zst' FORMAT JSONEachRow"
clickhouse-client -q "SELECT log FROM minio_server_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_server_logs.jsonl.zst' FORMAT JSONEachRow"
clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_bytes 0 --max_result_rows 0 --max_rows_to_read 0 --max_bytes_to_read 0 -q "SELECT log FROM minio_audit_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_audit_logs.jsonl.zst' FORMAT JSONEachRow"
clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_bytes 0 --max_result_rows 0 --max_rows_to_read 0 --max_bytes_to_read 0 -q "SELECT log FROM minio_server_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_server_logs.jsonl.zst' FORMAT JSONEachRow"
# Stop server so we can safely read data with clickhouse-local.
# Why do we read data with clickhouse-local?

View File

@ -94,6 +94,7 @@ ln -sf $SRC_PATH/users.d/prefetch_settings.xml $DEST_SERVER_PATH/users.d/
ln -sf $SRC_PATH/users.d/nonconst_timezone.xml $DEST_SERVER_PATH/users.d/
ln -sf $SRC_PATH/users.d/allow_introspection_functions.yaml $DEST_SERVER_PATH/users.d/
ln -sf $SRC_PATH/users.d/replicated_ddl_entry.xml $DEST_SERVER_PATH/users.d/
ln -sf $SRC_PATH/users.d/limits.yaml $DEST_SERVER_PATH/users.d/
if [[ -n "$USE_OLD_ANALYZER" ]] && [[ "$USE_OLD_ANALYZER" -eq 1 ]]; then
ln -sf $SRC_PATH/users.d/analyzer.xml $DEST_SERVER_PATH/users.d/

View File

@ -0,0 +1,56 @@
profiles:
default:
max_memory_usage: 5G
max_rows_to_read: 20000000
# Also set every other limit to a high value, so it will not limit anything, but we will test that code around it.
s3_max_get_rps: 1000000
s3_max_get_burst: 2000000
s3_max_put_rps: 1000000
s3_max_put_burst: 2000000
max_remote_read_network_bandwidth: 1T
max_remote_write_network_bandwidth: 1T
max_local_read_bandwidth: 1T
max_local_write_bandwidth: 1T
use_index_for_in_with_subqueries_max_values: 1G
max_bytes_to_read: 1T
max_bytes_to_read_leaf: 1T
max_rows_to_group_by: 10G
max_bytes_before_external_group_by: 10G
max_rows_to_sort: 10G
max_bytes_to_sort: 10G
max_bytes_before_external_sort: 10G
max_result_rows: 1G
max_result_bytes: 1G
max_execution_time: 600
max_execution_time_leaf: 600
max_execution_speed: 100G
max_execution_speed_bytes: 10T
timeout_before_checking_execution_speed: 300
max_estimated_execution_time: 600
max_columns_to_read: 20K
max_temporary_columns: 20K
max_temporary_non_const_columns: 20K
max_rows_in_set: 10G
max_bytes_in_set: 10G
max_rows_in_join: 10G
max_bytes_in_join: 10G
max_rows_to_transfer: 1G
max_bytes_to_transfer: 1G
max_rows_in_distinct: 10G
max_bytes_in_distinct: 10G
max_memory_usage_for_user: 32G
max_network_bandwidth: 100G
max_network_bytes: 1T
max_network_bandwidth_for_user: 100G
max_network_bandwidth_for_all_users: 100G
max_temporary_data_on_disk_size_for_user: 100G
max_temporary_data_on_disk_size_for_query: 100G
max_backup_bandwidth: 100G
max_hyperscan_regexp_length: 1M
max_hyperscan_regexp_total_length: 10M
query_cache_max_size_in_bytes: 10M
query_cache_max_entries: 100K
external_storage_max_read_rows: 10G
external_storage_max_read_bytes: 10G
max_streams_for_merge_tree_reading: 1000

View File

@ -9,6 +9,8 @@ system flush logs;
drop table if exists logs;
create view logs as select * from system.text_log where now() - toIntervalMinute(120) < event_time;
SET max_rows_to_read = 0;
-- Check that we don't have too many messages formatted with fmt::runtime or strings concatenation.
-- 0.001 threshold should be always enough, the value was about 0.00025
WITH 0.001 AS threshold

View File

@ -1 +1 @@
SELECT extract(toString(number), '10000000') FROM system.numbers_mt WHERE concat(materialize('1'), '...', toString(number)) LIKE '%10000000%' LIMIT 1
SELECT extract(toString(number), '10000000') FROM system.numbers_mt WHERE concat(materialize('1'), '...', toString(number)) LIKE '%10000000%' LIMIT 1 SETTINGS max_rows_to_read = 0;

View File

@ -1,20 +1,20 @@
7040546
7040546
4327029
4327029
1613512
1613512
8947307
8947307
6233790
6233790
3520273
3520273
806756
806756
8140551
8140551
5427034
5427034
2713517
2713517
4437158
4437158
1723641
1723641
3630402
3630402
916885
916885
2823646
2823646
110129
110129
4730407
4730407
2016890
2016890
3923651
3923651
1210134
1210134

View File

@ -1,11 +1,12 @@
-- Tags: distributed
-- Tags: distributed, long, no-flaky-check
-- ^ no-flaky-check - sometimes longer than 600s with ThreadFuzzer.
SET max_memory_usage = 300000000;
SET max_bytes_before_external_sort = 20000000;
SET max_memory_usage = 150000000;
SET max_bytes_before_external_sort = 10000000;
DROP TABLE IF EXISTS numbers10m;
CREATE VIEW numbers10m AS SELECT number FROM system.numbers LIMIT 10000000;
CREATE VIEW numbers10m AS SELECT number FROM system.numbers LIMIT 5000000;
SELECT number FROM remote('127.0.0.{2,3}', currentDatabase(), numbers10m) ORDER BY number * 1234567890123456789 LIMIT 19999980, 20;
SELECT number FROM remote('127.0.0.{2,3}', currentDatabase(), numbers10m) ORDER BY number * 1234567890123456789 LIMIT 4999980, 20;
DROP TABLE numbers10m;

View File

@ -1,4 +1,4 @@
-- Tags: shard
-- Tags: shard, long
DROP TABLE IF EXISTS group_uniq_str;
CREATE TABLE group_uniq_str ENGINE = Memory AS SELECT number % 10 as id, toString(intDiv((number%10000), 10)) as v FROM system.numbers LIMIT 10000000;
@ -7,7 +7,7 @@ INSERT INTO group_uniq_str SELECT 2 as id, toString(number % 100) as v FROM syst
INSERT INTO group_uniq_str SELECT 5 as id, toString(number % 100) as v FROM system.numbers LIMIT 10000000;
SELECT length(groupUniqArray(v)) FROM group_uniq_str GROUP BY id ORDER BY id;
SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_str') GROUP BY id ORDER BY id;
SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_str') GROUP BY id ORDER BY id SETTINGS max_rows_to_read = '100M';
SELECT length(groupUniqArray(10)(v)) FROM group_uniq_str GROUP BY id ORDER BY id;
SELECT length(groupUniqArray(10000)(v)) FROM group_uniq_str GROUP BY id ORDER BY id;

View File

@ -1,4 +1,6 @@
-- Tags: shard
-- Tags: long
SET max_rows_to_read = '55M';
DROP TABLE IF EXISTS group_uniq_arr_int;
CREATE TABLE group_uniq_arr_int ENGINE = Memory AS

View File

@ -1,4 +1,5 @@
-- Tags: shard
-- Tags: shard, long
SET max_rows_to_read = '55M';
DROP TABLE IF EXISTS group_uniq_arr_str;
CREATE TABLE group_uniq_arr_str ENGINE = Memory AS

View File

@ -74,7 +74,7 @@ ${CLICKHOUSE_CLIENT} --query "DROP TABLE t"
echo "A session cannot be used by concurrent connections:"
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_9&query_id=${CLICKHOUSE_DATABASE}_9" --data-binary "SELECT count() FROM system.numbers" >/dev/null &
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_9&query_id=${CLICKHOUSE_DATABASE}_9&max_rows_to_read=0" --data-binary "SELECT count() FROM system.numbers" >/dev/null &
# An infinite loop is required to make the test reliable. We will ensure that at least once the query on the line above has started before this check
while true

View File

@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
TEST_PREFIX="${CLICKHOUSE_DATABASE}"
${CLICKHOUSE_CLIENT} -q "drop user if exists u_00600${TEST_PREFIX}"
${CLICKHOUSE_CLIENT} -q "create user u_00600${TEST_PREFIX} settings max_execution_time=60, readonly=1"
${CLICKHOUSE_CLIENT} -q "create user u_00600${TEST_PREFIX} settings max_execution_time=60, readonly=1, max_rows_to_read=0"
${CLICKHOUSE_CLIENT} -q "grant select on system.numbers to u_00600${TEST_PREFIX}"
function wait_for_query_to_start()
@ -28,7 +28,7 @@ function wait_for_queries_to_finish()
}
$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=${CLICKHOUSE_DATABASE}hello&replace_running_query=1" -d 'SELECT 1, count() FROM system.numbers' > /dev/null 2>&1 &
$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=${CLICKHOUSE_DATABASE}hello&replace_running_query=1&max_rows_to_read=0" -d 'SELECT 1, count() FROM system.numbers' > /dev/null 2>&1 &
wait_for_query_to_start "${CLICKHOUSE_DATABASE}hello"
# Replace it
@ -51,7 +51,7 @@ $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "KILL QUERY WHERE query_id = '${CLICKH
wait
wait_for_queries_to_finish
${CLICKHOUSE_CLIENT} --query_id="${CLICKHOUSE_DATABASE}42" --query='SELECT 3, count() FROM system.numbers' 2>&1 | grep -cF 'QUERY_WAS_CANCELLED' &
${CLICKHOUSE_CLIENT} --query_id="${CLICKHOUSE_DATABASE}42" --max_rows_to_read=0 --query='SELECT 3, count() FROM system.numbers' 2>&1 | grep -cF 'QUERY_WAS_CANCELLED' &
wait_for_query_to_start "${CLICKHOUSE_DATABASE}42"
${CLICKHOUSE_CLIENT} --query_id="${CLICKHOUSE_DATABASE}42" --replace_running_query=1 --replace_running_query_max_wait_ms=500 --query='SELECT 43' 2>&1 | grep -F "can't be stopped" > /dev/null
wait

View File

@ -1 +1 @@
waiting test_00601_default default SELECT sum(ignore(*)) FROM (SELECT number % 1000 AS k, groupArray(number) FROM numbers(50000000) GROUP BY k)
waiting test_00601_default default SELECT sum(ignore(*)) FROM (SELECT number % 1000 AS k, groupArray(number) FROM numbers(50000000) GROUP BY k) SETTINGS max_rows_to_read = 0

View File

@ -11,7 +11,7 @@ function wait_for_query_to_start()
while [[ $($CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT count() FROM system.processes WHERE query_id = '$1'") == 0 ]]; do sleep 0.1; done
}
${CLICKHOUSE_CURL_COMMAND} -q --max-time 30 -sS "$CLICKHOUSE_URL&query_id=test_00601_$CLICKHOUSE_DATABASE" -d 'SELECT sum(ignore(*)) FROM (SELECT number % 1000 AS k, groupArray(number) FROM numbers(50000000) GROUP BY k)' > /dev/null &
${CLICKHOUSE_CURL_COMMAND} -q --max-time 30 -sS "$CLICKHOUSE_URL&query_id=test_00601_$CLICKHOUSE_DATABASE" -d 'SELECT sum(ignore(*)) FROM (SELECT number % 1000 AS k, groupArray(number) FROM numbers(50000000) GROUP BY k) SETTINGS max_rows_to_read = 0' > /dev/null &
wait_for_query_to_start "test_00601_$CLICKHOUSE_DATABASE"
$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "KILL QUERY WHERE query_id = 'test_00601_$CLICKHOUSE_DATABASE'"
wait

File diff suppressed because one or more lines are too long

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CURL} --max-time 1 -sS "${CLICKHOUSE_URL}&query_id=cancel_http_readonly_queries_on_client_close&cancel_http_readonly_queries_on_client_close=1&query=SELECT+count()+FROM+system.numbers" 2>&1 | grep -cF 'curl: (28)'
${CLICKHOUSE_CURL} --max-time 1 -sS "${CLICKHOUSE_URL}&query_id=cancel_http_readonly_queries_on_client_close&cancel_http_readonly_queries_on_client_close=1&max_rows_to_read=0&query=SELECT+count()+FROM+system.numbers" 2>&1 | grep -cF 'curl: (28)'
i=0 retries=300
while [[ $i -lt $retries ]]; do

View File

@ -1,3 +1,6 @@
-- Tags: long
SET max_rows_to_read = '100M';
drop table if exists lc_00906;
create table lc_00906 (b LowCardinality(String)) engine=MergeTree order by b SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi';
insert into lc_00906 select '0123456789' from numbers(100000000);

View File

@ -17,6 +17,7 @@ SELECT count() > 0 FROM system.trace_log t WHERE query_id = (SELECT query_id FRO
SET query_profiler_real_time_period_ns = 0;
SET query_profiler_cpu_time_period_ns = 1000000;
SET log_queries = 1;
SET max_rows_to_read = 0;
SELECT count(), ignore('test cpu time query profiler') FROM numbers_mt(10000000000);
SET log_queries = 0;
SYSTEM FLUSH LOGS;

View File

@ -1,2 +1,2 @@
SET max_execution_speed = 1, max_execution_time = 3;
SET max_execution_speed = 1, max_execution_time = 3, max_rows_to_read = 0;
SELECT count() FROM system.numbers; -- { serverError TIMEOUT_EXCEEDED }

View File

@ -1,5 +1,7 @@
-- Tags: no-parallel, no-fasttest, no-random-settings
SET max_bytes_in_join = 0;
SET max_rows_in_join = 0;
SET max_memory_usage = 32000000;
SET join_on_disk_max_files_to_merge = 4;

View File

@ -1,4 +1,4 @@
-- Tags: no-tsan, no-asan, no-ubsan, no-msan, no-debug
SET query_profiler_cpu_time_period_ns = 1;
SET query_profiler_cpu_time_period_ns = 1, max_rows_to_read = 0;
SELECT count() FROM numbers_mt(1000000000);

View File

@ -1,8 +1,9 @@
drop table if exists t;
create table t(n int, a Int64, s String) engine = MergeTree() order by a;
set enable_positional_arguments=0;
set optimize_trivial_insert_select=1;
set enable_positional_arguments = 0;
set optimize_trivial_insert_select = 1;
set max_rows_to_read = 0;
-- due to aggregate functions, optimize_trivial_insert_select will not be applied
insert into t select 1, sum(number) as c, getSetting('max_threads') from numbers_mt(100000000) settings max_insert_threads=4, max_threads=2;

View File

@ -19,7 +19,7 @@ function run_selects()
thread_num=$1
readarray -t tables_arr < <(${CLICKHOUSE_CLIENT} -q "SELECT database || '.' || name FROM system.tables
WHERE database in ('system', 'information_schema', 'INFORMATION_SCHEMA') and name != 'zookeeper' and name != 'models'
AND sipHash64(name || toString($RAND)) % $THREADS = $thread_num AND name NOT LIKE '%\\_sender' AND name NOT LIKE '%\\_watcher'")
AND sipHash64(name || toString($RAND)) % $THREADS = $thread_num AND name NOT LIKE '%\\_sender' AND name NOT LIKE '%\\_watcher' AND name != 'coverage_log'")
for t in "${tables_arr[@]}"
do

View File

@ -9,3 +9,4 @@ FROM
)
WHERE number = 1
LIMIT 1
SETTINGS max_rows_to_read = 0;

View File

@ -14,10 +14,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
function test()
{
timeout 5 ${CLICKHOUSE_LOCAL} --max_execution_time 10 --query "
timeout 5 ${CLICKHOUSE_LOCAL} --max_execution_time 10 --max_rows_to_read 0 --query "
SELECT DISTINCT number % 5 FROM system.numbers" ||:
echo -e '---'
timeout 5 ${CLICKHOUSE_CURL} -sS --no-buffer "${CLICKHOUSE_URL}&max_execution_time=10" --data-binary "
timeout 5 ${CLICKHOUSE_CURL} -sS --no-buffer "${CLICKHOUSE_URL}&max_execution_time=10&max_rows_to_read=0" --data-binary "
SELECT DISTINCT number % 5 FROM system.numbers" ||:
echo -e '---'
}

View File

@ -5,5 +5,7 @@ connect_timeout_with_failover_secure_ms Milliseconds 3000
external_storage_connect_timeout_sec UInt64 10
s3_connect_timeout_ms UInt64 1000
filesystem_prefetch_max_memory_usage UInt64 1073741824
max_memory_usage UInt64 5000000000
max_memory_usage_for_user UInt64 32000000000
max_untracked_memory UInt64 1048576
memory_profiler_step UInt64 1048576

View File

@ -1,2 +1,2 @@
Memory limit (for query) exceeded
Memory limit exceeded
Ok

View File

@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
start=$SECONDS
# If the memory leak exists, it will lead to OOM fairly quickly.
for _ in {1..1000}; do
$CLICKHOUSE_CLIENT --max_memory_usage 1G <<< "SELECT uniqExactState(number) FROM system.numbers_mt GROUP BY number % 10";
$CLICKHOUSE_CLIENT --max_memory_usage 1G --max_rows_to_read 0 <<< "SELECT uniqExactState(number) FROM system.numbers_mt GROUP BY number % 10";
# NOTE: we cannot use timeout here since this will not guarantee that the query will be executed at least once.
# (since graceful wait of clickhouse-client had been reverted)
@ -16,5 +16,5 @@ for _ in {1..1000}; do
if [[ $elapsed -gt 30 ]]; then
break
fi
done 2>&1 | grep -o -F 'Memory limit (for query) exceeded' | uniq
done 2>&1 | grep -o -P 'Memory limit .+ exceeded' | sed -r -e 's/(Memory limit)(.+)( exceeded)/\1\3/' | uniq
echo 'Ok'

View File

@ -1,18 +1,19 @@
#!/usr/bin/env bash
# Tags: long, no-object-storage-with-slow-build
# Tags: long, no-object-storage-with-slow-build, no-flaky-check
# It can be too long with ThreadFuzzer
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --query "
$CLICKHOUSE_CLIENT --max_rows_to_read 50M --query "
DROP TABLE IF EXISTS bug;
CREATE TABLE bug (UserID UInt64, Date Date) ENGINE = MergeTree ORDER BY Date
SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi', merge_max_block_size = 8192;
INSERT INTO bug SELECT rand64(), '2020-06-07' FROM numbers(50000000);
OPTIMIZE TABLE bug FINAL;"
LOG="$CLICKHOUSE_TMP/err-$CLICKHOUSE_DATABASE"
$CLICKHOUSE_BENCHMARK --iterations 10 --max_threads 100 --min_bytes_to_use_direct_io 1 <<< "SELECT sum(UserID) FROM bug PREWHERE NOT ignore(Date)" 1>/dev/null 2>"$LOG"
$CLICKHOUSE_BENCHMARK --max_rows_to_read 51M --iterations 10 --max_threads 100 --min_bytes_to_use_direct_io 1 <<< "SELECT sum(UserID) FROM bug PREWHERE NOT ignore(Date)" 1>/dev/null 2>"$LOG"
cat "$LOG" | grep Exception
cat "$LOG" | grep Loaded

View File

@ -1,5 +1,7 @@
-- Tags: no-random-settings, no-asan, no-msan, no-tsan, no-ubsan, no-debug
SET max_rows_to_read = '100M';
select count() from
(
select toInt128(number) * number x, toInt256(number) * number y from numbers_mt(100000000) where x != y

View File

@ -1,15 +1,17 @@
-- Tags: long, no-tsan, no-distributed-cache
-- Tag no-tsan: Too long for TSan
-- Tags: long, no-tsan, no-msan, no-distributed-cache
-- Too long for TSan and MSan
set enable_filesystem_cache=0;
set enable_filesystem_cache_on_write_operations=0;
set max_rows_to_read = '30M';
drop table if exists t;
create table t (x UInt64, s String) engine = MergeTree order by x SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi';
INSERT INTO t SELECT
number,
if(number < (8129 * 1024), arrayStringConcat(arrayMap(x -> toString(x), range(number % 128)), ' '), '')
FROM numbers_mt((8129 * 1024) * 3) settings max_insert_threads=8;
FROM numbers_mt((8129 * 1024) * 3) settings max_insert_threads=8, max_rows_to_read=0;
-- optimize table t final;

View File

@ -1,6 +1,7 @@
-- Tags: long, distributed, no-random-settings
drop table if exists data_01730;
SET max_rows_to_read = 0, max_result_rows = 0, max_bytes_before_external_group_by = 0;
-- does not use 127.1 due to prefer_localhost_replica

View File

@ -1,5 +1,6 @@
#!/usr/bin/env bash
# Tags: no-fasttest
# Tags: no-fasttest, no-s3-storage, long
# ^ no-s3-storage: too memory hungry
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
@ -8,7 +9,8 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata"
${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata (data JSON) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_json_type 1
cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO ghdata FORMAT JSONAsObject"
cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} \
--max_memory_usage 10G --query "INSERT INTO ghdata FORMAT JSONAsObject"
${CLICKHOUSE_CLIENT} -q "SELECT count() FROM ghdata WHERE NOT ignore(*)"
@ -16,7 +18,7 @@ ${CLICKHOUSE_CLIENT} -q \
"SELECT data.repo.name, count() AS stars FROM ghdata \
WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5"
${CLICKHOUSE_CLIENT} --allow_experimental_analyzer=1 -q \
${CLICKHOUSE_CLIENT} --enable_analyzer=1 -q \
"SELECT data.payload.commits[].author.name AS name, count() AS c FROM ghdata \
ARRAY JOIN data.payload.commits[].author.name \
GROUP BY name ORDER BY c DESC, name LIMIT 5"

View File

@ -1,5 +1,6 @@
#!/usr/bin/env bash
# Tags: no-fasttest, long
# Tags: no-fasttest, no-s3-storage, long
# ^ no-s3-storage: it is memory-hungry
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
@ -13,10 +14,10 @@ ${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2 (data JSON) ENGINE = MergeTree OR
${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2_string (data String) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'"
${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2_from_string (data JSON) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_json_type 1
cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO ghdata_2 FORMAT JSONAsObject"
cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} --max_memory_usage 10G -q "INSERT INTO ghdata_2 FORMAT JSONAsObject"
cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO ghdata_2_string FORMAT JSONAsString"
${CLICKHOUSE_CLIENT} -q "INSERT INTO ghdata_2_from_string SELECT data FROM ghdata_2_string"
${CLICKHOUSE_CLIENT} --max_memory_usage 10G -q "INSERT INTO ghdata_2_from_string SELECT data FROM ghdata_2_string"
${CLICKHOUSE_CLIENT} -q "SELECT \
(SELECT mapSort(groupUniqArrayMap(JSONAllPathsWithTypes(data))), sum(cityHash64(toString(data))) FROM ghdata_2_from_string) = \

View File

@ -1,5 +1,6 @@
#!/usr/bin/env bash
# Tags: no-fasttest
# Tags: no-fasttest, no-s3-storage, long
# ^ no-s3-storage: it is memory-hungry
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh

View File

@ -1,5 +1,6 @@
#!/usr/bin/env bash
# Tags: no-fasttest
# Tags: no-fasttest, no-s3-storage, long
# ^ no-s3-storage: too memory hungry
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh

View File

@ -2,5 +2,5 @@
SET max_bytes_before_external_group_by = 0;
SET max_memory_usage = '100M';
SET max_memory_usage = '100M', max_rows_to_read = '1G';
SELECT cityHash64(rand() % 1000) as n, groupBitmapState(number) FROM numbers_mt(200000000) GROUP BY n FORMAT Null; -- { serverError MEMORY_LIMIT_EXCEEDED }

View File

@ -4,11 +4,11 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --max_memory_usage_in_client=1 -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }"
$CLICKHOUSE_CLIENT --max_result_bytes 0 --max_memory_usage_in_client=1 -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }"
$CLICKHOUSE_CLIENT --max_memory_usage_in_client=0 -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000"
$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5K' -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }"
$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5k' -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }"
$CLICKHOUSE_CLIENT --max_result_bytes 0 --max_memory_usage_in_client='5K' -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }"
$CLICKHOUSE_CLIENT --max_result_bytes 0 --max_memory_usage_in_client='5k' -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }"
$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1M' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000"
$CLICKHOUSE_CLIENT --max_memory_usage_in_client='23G' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000"
$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11T' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000"

View File

@ -5,4 +5,3 @@
0.0009775171065493646
0.0009775171065493646
0.0009775171065493646
0.0009775171065493646

View File

@ -6,4 +6,3 @@ WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(100000);
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(1000000);
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(10000000);
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(100000000);

View File

@ -2,4 +2,3 @@
0.009775171065493644
0.009775171065493644
0.009775171065493644
0.009775171065493644

View File

@ -3,4 +3,3 @@ WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM remote('127.0.0.{1..10}', numbers_mt(10000));
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM remote('127.0.0.{1..10}', numbers_mt(100000));
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM remote('127.0.0.{1..10}', numbers_mt(1000000));
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM remote('127.0.0.{1..10}', numbers_mt(10000000));

View File

@ -17,7 +17,7 @@ fi
### Should be cancelled after 1 second and return a 159 exception (timeout)
### However, in the test, the server can be overloaded, so we assert query duration in the interval of 1 to 60 seconds.
query_id=$(random_str 12)
$CLICKHOUSE_CLIENT --query_id "$query_id" --max_execution_time 1 -q "
$CLICKHOUSE_CLIENT --max_result_rows 0 --max_result_bytes 0 --query_id "$query_id" --max_execution_time 1 -q "
SELECT * FROM
(
SELECT a.name as n
@ -39,7 +39,7 @@ ${CLICKHOUSE_CURL} -q -sS "$CLICKHOUSE_URL" -d "select 'query_duration', round(q
### Should stop pulling data and return what has been generated already (return code 0)
query_id=$(random_str 12)
$CLICKHOUSE_CLIENT --query_id "$query_id" -q "
$CLICKHOUSE_CLIENT --max_result_rows 0 --max_result_bytes 0 --query_id "$query_id" -q "
SELECT a.name as n
FROM
(
@ -58,7 +58,7 @@ ${CLICKHOUSE_CURL} -q -sS "$CLICKHOUSE_URL" -d "select 'query_duration', round(q
# HTTP CLIENT: As of today (02/12/21) uses PullingPipelineExecutor
### Should be cancelled after 1 second and return a 159 exception (timeout)
${CLICKHOUSE_CURL} -q --max-time $TIMEOUT -sS "$CLICKHOUSE_URL&max_execution_time=1" -d "
${CLICKHOUSE_CURL} -q --max-time $TIMEOUT -sS "$CLICKHOUSE_URL&max_execution_time=1&max_result_rows=0&max_result_bytes=0" -d "
SELECT * FROM
(
SELECT a.name as n
@ -77,7 +77,7 @@ ${CLICKHOUSE_CURL} -q --max-time $TIMEOUT -sS "$CLICKHOUSE_URL&max_execution_tim
### Should stop pulling data and return what has been generated already (return code 0)
${CLICKHOUSE_CURL} -q --max-time $TIMEOUT -sS "$CLICKHOUSE_URL" -d "
${CLICKHOUSE_CURL} -q --max-time $TIMEOUT -sS "$CLICKHOUSE_URL&max_result_rows=0&max_result_bytes=0" -d "
SELECT a.name as n
FROM
(

View File

@ -10,7 +10,7 @@ function wait_for_query_to_start()
}
QUERY_1_ID="${CLICKHOUSE_DATABASE}_TEST02132KILL_QUERY1"
(${CLICKHOUSE_CLIENT} --query_id="${QUERY_1_ID}" --query='select (SELECT max(number) from system.numbers) + 1;' 2>&1 | grep -q "Code: 394." || echo 'FAIL') &
(${CLICKHOUSE_CLIENT} --max_rows_to_read 0 --query_id="${QUERY_1_ID}" --query='select (SELECT max(number) from system.numbers) + 1;' 2>&1 | grep -q "Code: 394." || echo 'FAIL') &
wait_for_query_to_start "${QUERY_1_ID}"
${CLICKHOUSE_CLIENT} --query="KILL QUERY WHERE query_id='${QUERY_1_ID}' SYNC"

View File

@ -3,5 +3,6 @@ drop table if exists tab_lc;
CREATE TABLE tab_lc (x UInt64, y LowCardinality(String)) engine = MergeTree order by x SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi';
insert into tab_lc select number, toString(number % 10) from numbers(20000000);
optimize table tab_lc;
SET max_rows_to_read = '21M';
select count() from tab_lc where y == '0' settings local_filesystem_read_prefetch=1;
drop table if exists tab_lc;

View File

@ -6,7 +6,7 @@ SELECT addressToLineWithInlines(1); -- { serverError FUNCTION_NOT_ALLOWED }
SET allow_introspection_functions = 1;
SET query_profiler_real_time_period_ns = 0;
SET query_profiler_cpu_time_period_ns = 1000000;
SET log_queries = 1;
SET log_queries = 1, max_rows_to_read = 0;
SELECT count() FROM numbers_mt(10000000000) SETTINGS log_comment='02161_test_case';
SET log_queries = 0;
SET query_profiler_cpu_time_period_ns = 0;

View File

@ -8,6 +8,8 @@ DROP TABLE IF EXISTS right;
CREATE TABLE left ( key UInt32, value String ) ENGINE = MergeTree ORDER BY key SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi';
CREATE TABLE right ( key UInt32, value String ) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi';
SET max_rows_to_read = '50M';
INSERT INTO left SELECT number, toString(number) FROM numbers(25367182);
INSERT INTO right SELECT number, toString(number) FROM numbers(23124707);

View File

@ -1,4 +1,5 @@
-- Tags: long
-- Tags: long, no-flaky-check
-- It can be too long with ThreadFuzzer
DROP TABLE IF EXISTS left;
DROP TABLE IF EXISTS right;

View File

@ -1,3 +1,5 @@
SET allow_hyperscan = 1, max_hyperscan_regexp_length = 0, max_hyperscan_regexp_total_length = 0;
EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 0;
EXPLAIN QUERY TREE run_passes=1 SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 0, enable_analyzer = 1;
EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 1;

View File

@ -37,7 +37,7 @@ IPv6 functions
::ffff:127.0.0.1
::
\N
100000000
20000000
--
::ffff:127.0.0.1
--

View File

@ -67,11 +67,11 @@ SELECT toIPv6('::.1.2.3'); --{serverError CANNOT_PARSE_IPV6}
SELECT toIPv6OrDefault('::.1.2.3');
SELECT toIPv6OrNull('::.1.2.3');
SELECT count() FROM numbers_mt(100000000) WHERE NOT ignore(toIPv6OrZero(randomString(8)));
SELECT count() FROM numbers_mt(20000000) WHERE NOT ignore(toIPv6OrZero(randomString(8)));
SELECT '--';
SELECT cast('test' , 'IPv6'); --{serverError CANNOT_PARSE_IPV6}
SELECT cast('test' , 'IPv6'); -- { serverError CANNOT_PARSE_IPV6 }
SELECT cast('::ffff:127.0.0.1', 'IPv6');
SELECT '--';

View File

@ -1,3 +1,5 @@
-- Tags: long
SELECT roundBankers(result.1, 5), roundBankers(result.2, 5) FROM (
SELECT
studentTTest(sample, variant) as result
@ -15,6 +17,8 @@ SELECT
FROM system.numbers limit 500000));
SET max_rows_to_read = 0;
SELECT roundBankers(result.1, 5), roundBankers(result.2, 5 ) FROM (
SELECT
studentTTest(sample, variant) as result

View File

@ -23,16 +23,16 @@ function check_output() {
# TCP CLIENT
echo "TCP CLIENT"
OUTPUT=$($CLICKHOUSE_CLIENT --max_execution_time $MAX_TIMEOUT -q "SELECT count() FROM system.numbers" 2>&1 || true)
OUTPUT=$($CLICKHOUSE_CLIENT --max_rows_to_read 0 --max_execution_time $MAX_TIMEOUT -q "SELECT count() FROM system.numbers" 2>&1 || true)
check_output "${OUTPUT}"
echo "TCP CLIENT WITH SETTINGS IN QUERY"
OUTPUT=$($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.numbers SETTINGS max_execution_time=$MAX_TIMEOUT" 2>&1 || true)
OUTPUT=$($CLICKHOUSE_CLIENT --max_rows_to_read 0 -q "SELECT count() FROM system.numbers SETTINGS max_execution_time=$MAX_TIMEOUT" 2>&1 || true)
check_output "${OUTPUT}"
# HTTP CLIENT
echo "HTTP CLIENT"
OUTPUT=$(${CLICKHOUSE_CURL_COMMAND} -q -sS "$CLICKHOUSE_URL&max_execution_time=$MAX_TIMEOUT" -d \
OUTPUT=$(${CLICKHOUSE_CURL_COMMAND} -q -sS "$CLICKHOUSE_URL&max_execution_time=${MAX_TIMEOUT}&max_rows_to_read=0" -d \
"SELECT count() FROM system.numbers" || true)
check_output "${OUTPUT}"

View File

@ -1,6 +1,6 @@
-- { echoOn }
explain pipeline select * from (select * from numbers(1e8) group by number) group by number;
explain pipeline select * from (select * from numbers(1e8) group by number) group by number settings max_rows_to_read = 0;
(Expression)
ExpressionTransform × 16
(Aggregating)
@ -16,7 +16,7 @@ ExpressionTransform × 16
ExpressionTransform
(ReadFromSystemNumbers)
NumbersRange 0 → 1
explain pipeline select * from (select * from numbers_mt(1e8) group by number) group by number;
explain pipeline select * from (select * from numbers_mt(1e8) group by number) group by number settings max_rows_to_read = 0;
(Expression)
ExpressionTransform × 16
(Aggregating)
@ -32,7 +32,7 @@ ExpressionTransform × 16
ExpressionTransform × 16
(ReadFromSystemNumbers)
NumbersRange × 16 0 → 1
explain pipeline select * from (select * from numbers_mt(1e8) group by number) order by number;
explain pipeline select * from (select * from numbers_mt(1e8) group by number) order by number settings max_rows_to_read = 0;
(Expression)
ExpressionTransform
(Sorting)

View File

@ -13,11 +13,9 @@ set allow_prefetched_read_pool_for_local_filesystem = 0;
-- { echoOn }
explain pipeline select * from (select * from numbers(1e8) group by number) group by number;
explain pipeline select * from (select * from numbers_mt(1e8) group by number) group by number;
explain pipeline select * from (select * from numbers_mt(1e8) group by number) order by number;
explain pipeline select * from (select * from numbers(1e8) group by number) group by number settings max_rows_to_read = 0;
explain pipeline select * from (select * from numbers_mt(1e8) group by number) group by number settings max_rows_to_read = 0;
explain pipeline select * from (select * from numbers_mt(1e8) group by number) order by number settings max_rows_to_read = 0;
explain pipeline select number from remote('127.0.0.{1,2,3}', system, numbers_mt) group by number settings distributed_aggregation_memory_efficient = 1;

View File

@ -1,4 +1,5 @@
-- Tags: no-parallel, long, no-debug, no-tsan, no-msan, no-asan
SET max_rows_to_read = 0;
create table data_02344 (key Int) engine=Null;
-- 3e9 rows is enough to fill the socket buffer and cause INSERT hung.

View File

@ -4,6 +4,8 @@ drop table if exists table_2;
drop table if exists v_numbers;
drop table if exists mv_table;
SET max_rows_to_read = 0;
create table table_1 (x UInt32, y String) engine = MergeTree order by x;
insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');

View File

@ -2,5 +2,6 @@
SET max_execution_time = 3;
SET timeout_overflow_mode = 'break';
SET max_rows_to_read = 0, max_bytes_to_read = 0;
SELECT count() FROM system.numbers_mt WHERE NOT ignore(JSONExtract('{' || repeat('"a":"b",', rand() % 10) || '"c":"d"}', 'a', 'String')) FORMAT Null;

View File

@ -1,5 +1,7 @@
-- Tags: long, no-tsan, no-msan, no-asan, no-ubsan, no-debug, no-object-storage
SET max_rows_to_read = '101M';
DROP TABLE IF EXISTS t_2354_dist_with_external_aggr;
create table t_2354_dist_with_external_aggr(a UInt64, b String, c FixedString(100)) engine = MergeTree order by tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi';
@ -23,6 +25,6 @@ select a, b, c, sum(a) as s
from remote('127.0.0.{2,3}', currentDatabase(), t_2354_dist_with_external_aggr)
group by a, b, c
format Null
settings max_memory_usage = '5Gi';
settings max_memory_usage = '5Gi', max_result_rows = 0, max_result_bytes = 0;
DROP TABLE t_2354_dist_with_external_aggr;

View File

@ -1,3 +1,4 @@
SET max_rows_to_read = 0, max_bytes_to_read = 0;
SELECT count() FROM (SELECT DISTINCT nowInBlock(), nowInBlock('Pacific/Pitcairn') FROM system.numbers LIMIT 2);
SELECT nowInBlock(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT nowInBlock(NULL) IS NULL;

View File

@ -5,20 +5,24 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
# Test that running distributed query and cancel it ASAP,
# this can trigger a hung/deadlock in ProcessorList.
for i in {1..50}; do
# Test that runs a distributed query and cancels it ASAP,
# this has a chance to trigger a hung/deadlock in ProcessorList.
for i in {1..50}
do
query_id="$CLICKHOUSE_TEST_UNIQUE_NAME-$i"
$CLICKHOUSE_CLIENT --format Null --query_id "$query_id" -q "select * from remote('127.{1|2|3|4|5|6}', numbers(1e12))" 2>/dev/null &
while :; do
$CLICKHOUSE_CLIENT --format Null --query_id "$query_id" --max_rows_to_read 0 --max_bytes_to_read 0 --max_result_rows 0 --max_result_bytes 0 -q "select * from remote('127.{1|2|3|4|5|6}', numbers(1e12))" 2>/dev/null &
while true
do
killed_queries="$($CLICKHOUSE_CLIENT -q "kill query where query_id = '$query_id' sync" | wc -l)"
if [[ "$killed_queries" -ge 1 ]]; then
if [[ "$killed_queries" -ge 1 ]]
then
break
fi
done
wait -n
query_return_status=$?
if [[ $query_return_status -eq 0 ]]; then
if [[ $query_return_status -eq 0 ]]
then
echo "Query $query_id should be cancelled, however it returns successfully"
fi
done

View File

@ -12,7 +12,7 @@ echo "Parquet"
DATA_FILE=$CUR_DIR/data_parquet/list_monotonically_increasing_offsets.parquet
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_load"
${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_load (list Array(Int64), json Nullable(String)) ENGINE = Memory"
cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO parquet_load FORMAT Parquet"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_load" | md5sum
cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} --max_memory_usage 10G -q "INSERT INTO parquet_load FORMAT Parquet"
${CLICKHOUSE_CLIENT} --max_result_rows 0 --max_result_bytes 0 --query="SELECT * FROM parquet_load" | md5sum
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM parquet_load"
${CLICKHOUSE_CLIENT} --query="drop table parquet_load"

View File

@ -2,7 +2,7 @@
set allow_suspicious_fixed_string_types=1;
create table fat_granularity (x UInt32, fat FixedString(160000)) engine = MergeTree order by x settings storage_policy = 's3_cache';
insert into fat_granularity select number, toString(number) || '_' from numbers(100000) settings max_block_size = 8192, max_insert_threads=8;
insert into fat_granularity select number, toString(number) || '_' from numbers(100000) settings max_block_size = 3000, max_insert_threads = 8, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0;
-- Too large sizes of FixedString to deserialize
select x from fat_granularity prewhere fat like '256\_%' settings max_threads=2;

View File

@ -12,7 +12,7 @@ create table bug_delta_gorilla
(value_bug UInt64 codec (Delta, Gorilla))
engine = MergeTree
order by tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'
as (select 0 from numbers(30000000));
as (select 0 from numbers(20000000));
select count(*)
from bug_delta_gorilla

View File

@ -7,8 +7,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
QUERY_ID="${CLICKHOUSE_DATABASE}_test_02585_query_to_kill_id_1"
$CLICKHOUSE_CLIENT --query_id="$QUERY_ID" -n -q "
create temporary table tmp as select * from numbers(500000000);
$CLICKHOUSE_CLIENT --query_id="$QUERY_ID" --max_rows_to_read 0 -n -q "
create temporary table tmp as select * from numbers(100000000);
select * from remote('127.0.0.2', 'system.numbers_mt') where number in (select * from tmp);" &> /dev/null &
$CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS"
@ -19,8 +19,7 @@ do
if [ -n "$res" ]; then
break
fi
sleep 1
sleep 1
done
$CLICKHOUSE_CLIENT -q "kill query where query_id = '$QUERY_ID' sync" &> /dev/null

View File

@ -1,5 +1,5 @@
#!/usr/bin/env bash
# Tags: no-fasttest
# Tags: no-fasttest, no-random-merge-tree-settings
# Tag no-fasttest: needs s3
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
@ -17,7 +17,9 @@ WITH '(\\w+): (\\d+)' AS pattern,
WHERE line LIKE '% S3%'
AND line NOT LIKE '%Microseconds%'
AND line NOT LIKE '%S3DiskConnections%'
AND line NOT LIKE '%S3DiskAddresses') AS pe_map
AND line NOT LIKE '%S3DiskAddresses%'
AND line NOT LIKE '%RequestThrottlerCount%'
) AS pe_map
SELECT * FROM (
SELECT untuple(arrayJoin(pe_map) AS pe)
WHERE tupleElement(pe, 1) not like '%WriteRequests%'

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
QUERY_ID="${CLICKHOUSE_DATABASE}_read_with_cancel"
$CLICKHOUSE_CLIENT -n --query_id="$QUERY_ID" --query="SELECT sum(number * 0) FROM numbers(10000000000) SETTINGS partial_result_on_first_cancel=true;" &
$CLICKHOUSE_CLIENT --max_rows_to_read 0 -n --query_id="$QUERY_ID" --query="SELECT sum(number * 0) FROM numbers(10000000000) SETTINGS partial_result_on_first_cancel=true;" &
pid=$!
for _ in {0..60}

View File

@ -10,7 +10,9 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# NOTE: .sh test is used over .sql because it needs $CLICKHOUSE_DATABASE to
# avoid truncation, since seems that the version of MinIO that is used on CI
# too slow with this.
$CLICKHOUSE_CLIENT -nm -q "
#
# Unfortunately, the test has to buffer it in memory.
$CLICKHOUSE_CLIENT --max_memory_usage 16G -nm -q "
INSERT INTO FUNCTION s3('http://localhost:11111/test/$CLICKHOUSE_DATABASE/test_INT_MAX.tsv', '', '', 'TSV')
SELECT repeat('a', 1024) FROM numbers((pow(2, 30) * 2) / 1024)
SETTINGS s3_max_single_part_upload_size = '5Gi';

View File

@ -1,4 +1,5 @@
-- Tags: no-fasttest
SET max_rows_to_read = 0;
SELECT count() FROM cluster('test_cluster_two_shards', view( SELECT * FROM numbers(100000000000) )) SETTINGS max_execution_time_leaf = 1; -- { serverError TIMEOUT_EXCEEDED }
-- Can return partial result
SELECT count() FROM cluster('test_cluster_two_shards', view( SELECT * FROM numbers(100000000000) )) FORMAT Null SETTINGS max_execution_time_leaf = 1, timeout_overflow_mode_leaf = 'break';

View File

@ -4,7 +4,7 @@ CREATE TABLE t (key UInt64, value UInt64, INDEX value_idx value TYPE bloom_filte
INSERT INTO t SELECT number, rand()%1000 FROM numbers(10000);
SET timeout_overflow_mode='break';
SET max_execution_time=0.1;
SET max_execution_time=0.1, max_rows_to_read=0;
SELECT * FROM t WHERE value IN (SELECT number FROM numbers(1000000000));
DROP TABLE t;

View File

@ -12,7 +12,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
GROUP BY ac, nw
)
GROUP BY nw
@ -32,7 +32,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
GROUP BY ac, nw
)
GROUP BY nw
@ -42,6 +42,7 @@ SETTINGS max_threads = 1;
0 2 0
1 2 0
2 2 0
SET max_rows_to_read = 40000000;
SELECT
nw,
sum(WR) AS R,
@ -53,7 +54,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
WHERE (ac % 4) = 0
GROUP BY
ac,
@ -64,7 +65,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
WHERE (ac % 4) = 1
GROUP BY
ac,
@ -75,7 +76,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
WHERE (ac % 4) = 2
GROUP BY
ac,
@ -86,7 +87,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
WHERE (ac % 4) = 3
GROUP BY
ac,

View File

@ -1,6 +1,6 @@
-- Tags: long, no-tsan, no-asan, no-ubsan, no-msan, no-debug
CREATE TABLE window_funtion_threading
CREATE TABLE window_function_threading
Engine = MergeTree
ORDER BY (ac, nw)
AS SELECT
@ -20,7 +20,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
GROUP BY ac, nw
)
GROUP BY nw
@ -40,7 +40,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
GROUP BY ac, nw
)
GROUP BY nw
@ -58,7 +58,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
GROUP BY ac, nw
)
GROUP BY nw
@ -66,6 +66,8 @@ ORDER BY nw ASC, R DESC
LIMIT 10
SETTINGS max_threads = 1;
SET max_rows_to_read = 40000000;
SELECT
nw,
sum(WR) AS R,
@ -77,7 +79,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
WHERE (ac % 4) = 0
GROUP BY
ac,
@ -88,7 +90,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
WHERE (ac % 4) = 1
GROUP BY
ac,
@ -99,7 +101,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
WHERE (ac % 4) = 2
GROUP BY
ac,
@ -110,7 +112,7 @@ FROM
AVG(wg) AS WR,
ac,
nw
FROM window_funtion_threading
FROM window_function_threading
WHERE (ac % 4) = 3
GROUP BY
ac,

View File

@ -1,5 +1,7 @@
-- Tags: no-fasttest
SET max_rows_to_read = 0, max_execution_time = 0, max_estimated_execution_time = 0;
-- Query stops after timeout without an error
SELECT * FROM numbers(100000000) SETTINGS max_block_size=1, max_execution_time=2, timeout_overflow_mode='break' FORMAT Null;

View File

@ -1,6 +1,7 @@
SELECT sleep(3.40282e+44); -- { serverError BAD_ARGUMENTS }
SELECT sleep((pow(2, 64) / 1000000) - 1); -- { serverError BAD_ARGUMENTS }
SELECT sleepEachRow(184467440737095516) from numbers(10000); -- { serverError BAD_ARGUMENTS }
SET max_rows_to_read = 0;
SELECT sleepEachRow(pow(2, 31)) from numbers(9007199254740992) settings function_sleep_max_microseconds_per_block = 8589934592000000000; -- { serverError TOO_SLOW }
-- Another corner case, but it requires lots of memory to run (huge block size)

View File

@ -1,2 +1,2 @@
set max_execution_time = 0.5, timeout_overflow_mode = 'break';
set max_execution_time = 0.5, timeout_overflow_mode = 'break', max_rows_to_read = 0;
SELECT number FROM remote('127.0.0.{3|2}', numbers(1)) WHERE number GLOBAL IN (SELECT number FROM numbers(10000000000.)) format Null;

View File

@ -30,7 +30,7 @@ SETTINGS enable_analyzer=1,
allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1,
cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0"
$CLICKHOUSE_CLIENT -q "
$CLICKHOUSE_CLIENT --max_rows_in_set_to_optimize_join 0 -q "
select * from (select key, value from num_1) l
inner join (select key, value from num_2 inner join
(select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=0) r
@ -55,7 +55,7 @@ SETTINGS enable_analyzer=1,
allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1,
cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0"
$CLICKHOUSE_CLIENT -q "
$CLICKHOUSE_CLIENT --max_rows_in_set_to_optimize_join 0 -q "
select * from (select key, value from num_1) l
inner join (select key, value from num_2 inner join
(select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r
@ -81,7 +81,7 @@ SETTINGS enable_analyzer=1, parallel_replicas_prefer_local_join=0,
allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1,
cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='full_sorting_merge'"
$CLICKHOUSE_CLIENT -q "
$CLICKHOUSE_CLIENT --max_rows_in_set_to_optimize_join 0 -q "
select * from (select key, value from num_1) l
inner join (select key, value from num_2 inner join
(select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='full_sorting_merge') r
@ -106,7 +106,7 @@ SETTINGS enable_analyzer=1, parallel_replicas_prefer_local_join=0,
allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1,
cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='full_sorting_merge'"
$CLICKHOUSE_CLIENT -q "
$CLICKHOUSE_CLIENT --max_rows_in_set_to_optimize_join 0 -q "
select * from (select key, value from num_1) l
inner join (select key, value from num_2 inner join
(select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='hash') r
@ -131,7 +131,7 @@ SETTINGS enable_analyzer=1, parallel_replicas_prefer_local_join=0,
allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1,
cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='hash'"
$CLICKHOUSE_CLIENT -q "
$CLICKHOUSE_CLIENT --max_rows_in_set_to_optimize_join 0 -q "
select * from (select key, value from num_1) l
inner join (select key, value from num_2 inner join
(select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='full_sorting_merge') r

View File

@ -1,3 +1,5 @@
-- Tags: long
SET max_rows_to_read = 0;
create table test (number UInt64) engine=MergeTree order by number;
insert into test select * from numbers(50000000);
select ignore(number) from test where RAND() > 4292390314 limit 10;

View File

@ -1,2 +1,2 @@
49999983751397 10000032
49999983751397 10000032
7999995751397 4000032
7999995751397 4000032

View File

@ -12,7 +12,7 @@ AS
toDateTime('1990-03-21 13:00:00') + INTERVAL number MINUTE AS begin,
number % 4 AS key,
number AS value
FROM numbers(0, 10000000);
FROM numbers(0, 4000000);
CREATE TABLE skewed_probe ENGINE = MergeTree ORDER BY (key, begin)
AS
@ -34,8 +34,9 @@ AS
SELECT
toDateTime('1990-03-21 13:00:01') + INTERVAL number MINUTE AS begin,
3 AS key
FROM numbers(0, 10000000);
FROM numbers(0, 4000000);
SET max_rows_to_read = 0;
SELECT SUM(value), COUNT(*)
FROM skewed_probe

View File

@ -89,7 +89,7 @@ function check_replication_consistency()
# Touch all data to check that it's readable (and trigger PartCheckThread if needed)
# it's important to disable prefer warmed unmerged parts because
# otherwise it can read non-syncrhonized state of replicas
while ! $CLICKHOUSE_CLIENT --prefer_warmed_unmerged_parts_seconds=0 -q "SELECT * FROM merge(currentDatabase(), '$table_name_prefix') FORMAT Null" 2>/dev/null; do
while ! $CLICKHOUSE_CLIENT --prefer_warmed_unmerged_parts_seconds=0 --max_result_rows 0 --max_result_bytes 0 --max_rows_to_read 0 --max_bytes_to_read 0 -q "SELECT * FROM merge(currentDatabase(), '$table_name_prefix') FORMAT Null" 2>/dev/null; do
sleep 1;
num_tries=$((num_tries+1))
if [ $num_tries -eq 250 ]; then
@ -114,7 +114,8 @@ function check_replication_consistency()
# it's important to disable prefer warmed unmerged parts because
# otherwise it can read non-syncrhonized state of replicas
res=$($CLICKHOUSE_CLIENT --prefer_warmed_unmerged_parts_seconds=0 -q \
# also, disable the limit that is set for tests globally
res=$($CLICKHOUSE_CLIENT --prefer_warmed_unmerged_parts_seconds=0 --max_rows_to_read=0 -q \
"SELECT
if((countDistinct(data) as c) == 0, 1, c)
FROM

View File

@ -10,4 +10,5 @@ UNION ALL
ORDER BY id DESC
LIMIT 10
)
ORDER BY id, event;
ORDER BY id, event
SETTINGS max_rows_to_read = 40_000_000;

View File

@ -1,4 +1,4 @@
-- Tags: shard
SET output_format_write_statistics = 0;
SET output_format_write_statistics = 0, max_rows_to_read = 50_000_000;
SELECT EventDate, count() FROM remote('127.0.0.1', test.hits) WHERE UserID GLOBAL IN (SELECT UserID FROM test.hits) GROUP BY EventDate ORDER BY EventDate LIMIT 5 FORMAT JSONCompact;

View File

@ -1,4 +1,5 @@
-- Tags: global
SET max_rows_to_read = 100_000_000;
SELECT sum(UserID GLOBAL IN (SELECT UserID FROM remote('127.0.0.{1,2}', test.hits))) FROM remote('127.0.0.{1,2}', test.hits);
SELECT sum(UserID GLOBAL IN (SELECT UserID FROM test.hits)) FROM remote('127.0.0.{1,2}', test.hits);

View File

@ -1,4 +1,5 @@
-- Tags: distributed
SET max_rows_to_read = 100_000_000;
SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID);
SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS optimize_aggregation_in_order = 1;

View File

@ -9,7 +9,8 @@ ORDER BY (CounterID, EventDate, intHash32(UserID))
SAMPLE BY intHash32(UserID)
SETTINGS storage_policy = 'default';
INSERT INTO test.hits_1m SELECT * FROM test.hits LIMIT 1000000;
INSERT INTO test.hits_1m SELECT * FROM test.hits LIMIT 1000000
SETTINGS min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, max_block_size = 8192, max_insert_threads = 1, max_threads = 1;
CREATE DATABASE IF NOT EXISTS db_dict;
DROP DICTIONARY IF EXISTS db_dict.cache_hits;

View File

@ -10,6 +10,8 @@ SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'hits' PA
LIFETIME(MIN 300 MAX 600)
LAYOUT(CACHE(SIZE_IN_CELLS 100 QUERY_WAIT_TIMEOUT_MILLISECONDS 600000));
SET timeout_before_checking_execution_speed = 300;
SELECT sum(flag) FROM (SELECT dictHas('db_dict.cache_hits', toUInt64(WatchID)) as flag FROM test.hits PREWHERE WatchID % 1400 == 0 LIMIT 100);
SELECT count() from test.hits PREWHERE WatchID % 1400 == 0;
@ -20,4 +22,4 @@ SELECT sum(flag) FROM (SELECT dictHas('db_dict.cache_hits', toUInt64(WatchID)) a
SELECT count() from test.hits PREWHERE WatchID % 5 == 0;
DROP DICTIONARY IF EXISTS db_dict.cache_hits;
DROP DATABASE IF EXISTS db_dict;
DROP DATABASE IF EXISTS db_dict;

View File

@ -1,5 +1,6 @@
-- Tags: no-random-settings
SET max_memory_usage = '10G';
SELECT sum(cityHash64(*)) FROM test.hits SETTINGS max_threads=40;
-- We had a bug which lead to additional compressed data read. test.hits compressed size is about 1.2Gb, but we read more then 3Gb.

View File

@ -1,4 +1,5 @@
-- Tags: distributed
SET max_rows_to_read = '100M';
SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS max_block_size = 63169;
SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS optimize_aggregation_in_order = 1, max_block_size = 63169;

View File

@ -1,6 +1,7 @@
-- Tags: global
set allow_prefetched_read_pool_for_remote_filesystem=0, merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0, max_threads=2, max_block_size=65387;
set max_rows_to_read = '100M';
SELECT sum(UserID GLOBAL IN (SELECT UserID FROM remote('127.0.0.{1,2}', test.hits))) FROM remote('127.0.0.{1,2}', test.hits);
SELECT sum(UserID GLOBAL IN (SELECT UserID FROM test.hits)) FROM remote('127.0.0.{1,2}', test.hits);