This commit is contained in:
Nikita Taranov 2024-10-18 21:37:52 +01:00
parent 52ea328f68
commit 32538a7046
4 changed files with 85 additions and 9 deletions

View File

@ -1704,7 +1704,8 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional<P
executeLimitBy(query_plan);
}
if (query.limitLength() && !query.limitBy())
/// WITH TIES simply not supported properly for preliminary steps, so let's disable it.
if (query.limitLength() && !query.limitBy() && !query.limit_with_ties)
executePreLimit(query_plan, true);
}
};

View File

@ -989,10 +989,14 @@ void addPreliminarySortOrDistinctOrLimitStepsIfNeeded(QueryPlan & query_plan,
{
auto & limit_by_analysis_result = expressions_analysis_result.getLimitBy();
addExpressionStep(query_plan, limit_by_analysis_result.before_limit_by_actions, "Before LIMIT BY", useful_sets);
/// We don't apply LIMIT BY on remote nodes at all in the old infrastructure.
/// https://github.com/ClickHouse/ClickHouse/blob/67c1e89d90ef576e62f8b1c68269742a3c6f9b1e/src/Interpreters/InterpreterSelectQuery.cpp#L1697-L1705
/// Let's be optimistic and try to disable only skipping offset.
addLimitByStep(query_plan, limit_by_analysis_result, query_node, true /*do_not_skip_offset*/);
}
if (query_node.hasLimit() && !query_node.hasLimitBy() && !query_node.isLimitWithTies())
/// WITH TIES simply not supported properly for preliminary steps, so let's disable it.
if (query_node.hasLimit() && !query_node.hasLimitByOffset() && !query_node.isLimitWithTies())
addPreliminaryLimitStep(query_plan, query_analysis_result, planner_context, true /*do_not_skip_offset*/);
}

View File

@ -1,3 +1,57 @@
1 100
1 110
2 200
2 210
3 300
1 100
1 110
1 120
2 200
2 210
2 220
3 300
1 120
1 130
2 220
1 110
1 120
2 210
2 220
1 110
1 120
2 210
1 120
2 210
2 220
1
1
1
2
2
2
2
2
2
2
2
1
1
2
2
2
2
1 100
1 110
2 200
2 210
3 300
1 100
1 110
1 120
2 200
2 210
2 220
3 300
1 120
1 130
2 220

View File

@ -8,13 +8,6 @@ ORDER BY tuple();
insert into limit_by values(1, 100), (1, 110), (1, 120), (1, 130), (2, 200), (2, 210), (2, 220), (3, 300);
set allow_experimental_parallel_reading_from_replicas=1, cluster_for_parallel_replicas='parallel_replicas', max_parallel_replicas=100, parallel_replicas_for_non_replicated_merge_tree=1;
select * from limit_by order by id, val limit 2, 2 by id;
select * from limit_by order by id, val limit 2 offset 1 by id;
select * from limit_by order by id, val limit 1, 2 by id limit 3;
select * from limit_by order by id, val limit 1, 2 by id limit 3 offset 1;
CREATE TABLE ties
(
`a` Int
@ -24,8 +17,32 @@ ORDER BY tuple();
INSERT INTO ties VALUES (1), (1), (2), (2), (2), (2) (3), (3);
set allow_experimental_parallel_reading_from_replicas=1, cluster_for_parallel_replicas='parallel_replicas', max_parallel_replicas=100, parallel_replicas_for_non_replicated_merge_tree=1;
set enable_analyzer=0;
select * from limit_by order by id, val limit 2 by id;
select * from limit_by order by id, val limit 3 by id;
select * from limit_by order by id, val limit 2, 2 by id;
select * from limit_by order by id, val limit 2 offset 1 by id;
select * from limit_by order by id, val limit 1, 2 by id limit 3;
select * from limit_by order by id, val limit 1, 2 by id limit 3 offset 1;
SELECT a FROM ties order by a limit 1 with ties;
SELECT a FROM ties order by a limit 1, 2 with ties;
SELECT a FROM ties order by a limit 2, 3 with ties;
SELECT a FROM ties order by a limit 4 with ties;
set enable_analyzer=1;
select * from limit_by order by id, val limit 2 by id;
select * from limit_by order by id, val limit 3 by id;
select * from limit_by order by id, val limit 2, 2 by id;
select * from limit_by order by id, val limit 2 offset 1 by id;
select * from limit_by order by id, val limit 1, 2 by id limit 3;
select * from limit_by order by id, val limit 1, 2 by id limit 3 offset 1;
SELECT a FROM ties order by a limit 1 with ties;
SELECT a FROM ties order by a limit 1, 2 with ties;
SELECT a FROM ties order by a limit 2, 3 with ties;
SELECT a FROM ties order by a limit 4 with ties;