This commit is contained in:
Nikita Taranov 2024-10-18 19:26:03 +01:00
parent c1b56801b3
commit 52ea328f68
3 changed files with 75 additions and 6 deletions

View File

@ -854,9 +854,8 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan,
query_plan.addStep(std::move(filling_step));
}
void addLimitByStep(QueryPlan & query_plan,
const LimitByAnalysisResult & limit_by_analysis_result,
const QueryNode & query_node)
void addLimitByStep(
QueryPlan & query_plan, const LimitByAnalysisResult & limit_by_analysis_result, const QueryNode & query_node, bool do_not_skip_offset)
{
/// Constness of LIMIT BY limit is validated during query analysis stage
UInt64 limit_by_limit = query_node.getLimitByLimit()->as<ConstantNode &>().getValue().safeGet<UInt64>();
@ -868,6 +867,15 @@ void addLimitByStep(QueryPlan & query_plan,
limit_by_offset = query_node.getLimitByOffset()->as<ConstantNode &>().getValue().safeGet<UInt64>();
}
if (do_not_skip_offset)
{
if (limit_by_limit > std::numeric_limits<UInt64>::max() - limit_by_offset)
return;
limit_by_limit += limit_by_offset;
limit_by_offset = 0;
}
auto limit_by_step = std::make_unique<LimitByStep>(query_plan.getCurrentDataStream(),
limit_by_limit,
limit_by_offset,
@ -981,10 +989,10 @@ void addPreliminarySortOrDistinctOrLimitStepsIfNeeded(QueryPlan & query_plan,
{
auto & limit_by_analysis_result = expressions_analysis_result.getLimitBy();
addExpressionStep(query_plan, limit_by_analysis_result.before_limit_by_actions, "Before LIMIT BY", useful_sets);
addLimitByStep(query_plan, limit_by_analysis_result, query_node);
addLimitByStep(query_plan, limit_by_analysis_result, query_node, true /*do_not_skip_offset*/);
}
if (query_node.hasLimit())
if (query_node.hasLimit() && !query_node.hasLimitBy() && !query_node.isLimitWithTies())
addPreliminaryLimitStep(query_plan, query_analysis_result, planner_context, true /*do_not_skip_offset*/);
}
@ -1777,7 +1785,7 @@ void Planner::buildPlanForQueryNode()
{
auto & limit_by_analysis_result = expression_analysis_result.getLimitBy();
addExpressionStep(query_plan, limit_by_analysis_result.before_limit_by_actions, "Before LIMIT BY", useful_sets);
addLimitByStep(query_plan, limit_by_analysis_result, query_node);
addLimitByStep(query_plan, limit_by_analysis_result, query_node, false /*do_not_skip_offset*/);
}
if (query_node.hasOrderBy())

View File

@ -0,0 +1,30 @@
1 120
1 130
2 220
1 110
1 120
2 210
2 220
1 110
1 120
2 210
1 120
2 210
2 220
1
1
1
2
2
2
2
2
2
2
2
1
1
2
2
2
2

View File

@ -0,0 +1,31 @@
CREATE TABLE limit_by
(
`id` Int,
`val` Int
)
ENGINE = MergeTree
ORDER BY tuple();
insert into limit_by values(1, 100), (1, 110), (1, 120), (1, 130), (2, 200), (2, 210), (2, 220), (3, 300);
set allow_experimental_parallel_reading_from_replicas=1, cluster_for_parallel_replicas='parallel_replicas', max_parallel_replicas=100, parallel_replicas_for_non_replicated_merge_tree=1;
select * from limit_by order by id, val limit 2, 2 by id;
select * from limit_by order by id, val limit 2 offset 1 by id;
select * from limit_by order by id, val limit 1, 2 by id limit 3;
select * from limit_by order by id, val limit 1, 2 by id limit 3 offset 1;
CREATE TABLE ties
(
`a` Int
)
ENGINE = MergeTree
ORDER BY tuple();
INSERT INTO ties VALUES (1), (1), (2), (2), (2), (2) (3), (3);
SELECT a FROM ties order by a limit 1 with ties;
SELECT a FROM ties order by a limit 1, 2 with ties;
SELECT a FROM ties order by a limit 2, 3 with ties;
SELECT a FROM ties order by a limit 4 with ties;