mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 09:32:06 +00:00
Avoid hiding errors like Limit for rows or bytes to read exceeded
for subqueries
This commit is contained in:
parent
6e45ed0534
commit
be95a425d6
@ -116,8 +116,6 @@ void ExecuteScalarSubqueriesMatcher::visit(const ASTSubquery & subquery, ASTPtr
|
||||
{
|
||||
auto io = interpreter.execute();
|
||||
|
||||
try
|
||||
{
|
||||
PullingAsyncPipelineExecutor executor(io.pipeline);
|
||||
while (block.rows() == 0 && executor.pull(block));
|
||||
|
||||
@ -139,14 +137,6 @@ void ExecuteScalarSubqueriesMatcher::visit(const ASTSubquery & subquery, ASTPtr
|
||||
if (tmp_block.rows() != 0)
|
||||
throw Exception("Scalar subquery returned more than one row", ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY);
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
if (e.code() == ErrorCodes::TOO_MANY_ROWS)
|
||||
throw Exception("Scalar subquery returned more than one row", ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY);
|
||||
else
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
block = materializeBlock(block);
|
||||
size_t columns = block.columns();
|
||||
|
@ -0,0 +1,16 @@
|
||||
drop table if exists nums_in_mem;
|
||||
drop table if exists nums_in_mem_dist;
|
||||
|
||||
create table nums_in_mem(v UInt64) engine=Memory;
|
||||
insert into nums_in_mem select * from system.numbers limit 1000000;
|
||||
|
||||
create table nums_in_mem_dist as nums_in_mem engine=Distributed('test_shard_localhost', currentDatabase(), nums_in_mem);
|
||||
|
||||
set prefer_localhost_replica = 0;
|
||||
set max_rows_to_read = 100;
|
||||
|
||||
select
|
||||
count()
|
||||
/
|
||||
(select count() from nums_in_mem_dist where rand() > 0)
|
||||
from system.one; -- { serverError 158 }
|
Loading…
Reference in New Issue
Block a user