Fix 01514_distributed_cancel_query_on_error flackiness

This commit is contained in:
Azat Khuzhin 2020-10-08 01:58:31 +03:00
parent e465ce3d49
commit 983303243b

View File

@ -9,12 +9,12 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# max_block_size to fail faster
# max_memory_usage/_shard_num/repeat() will allow failure on the first shard earlier.
opts=(
--max_memory_usage=3G
--max_memory_usage=1G
--max_block_size=50
--max_threads=1
--max_distributed_connections=2
)
${CLICKHOUSE_CLIENT} "${opts[@]}" -q "SELECT groupArray(repeat('a', 1000*_shard_num)), number%100000 k from remote('127.{2,3}', system.numbers) GROUP BY k LIMIT 10e6" |& {
${CLICKHOUSE_CLIENT} "${opts[@]}" -q "SELECT groupArray(repeat('a', if(_shard_num == 2, 100000, 1))), number%100000 k from remote('127.{2,3}', system.numbers) GROUP BY k LIMIT 10e6" |& {
# the query should fail earlier on 127.3 and 127.2 should not even go to the memory limit exceeded error.
fgrep -q 'DB::Exception: Received from 127.3:9000. DB::Exception: Memory limit (for query) exceeded:'
# while if this will not correctly then it will got the exception from the 127.2:9000 and fail