Update test

This commit is contained in:
Alexey Milovidov 2024-07-27 01:28:38 +02:00
parent b8b6188cbf
commit fd5934d0ad

View File

@ -1,6 +1,7 @@
-- Tags: long, distributed, no-random-settings
drop table if exists data_01730;
SET max_rows_to_read = 0, max_result_rows = 0;
-- does not use 127.1 due to prefer_localhost_replica
@ -12,7 +13,6 @@ select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by n
-- and the query with GROUP BY on remote servers will first do GROUP BY and then send the block,
-- so the initiator will first receive all blocks from remotes and only after start merging,
-- and will hit the memory limit.
SET max_rows_to_read = 0, max_result_rows = 0;
select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='20Mi', max_block_size=4294967296; -- { serverError MEMORY_LIMIT_EXCEEDED }
-- with optimize_aggregation_in_order=1 remote servers will produce blocks more frequently,