mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-12 17:32:32 +00:00
fix pushdown of limit to reading stage
This commit is contained in:
parent
c45a9e3bd6
commit
03c785931a
@ -1925,7 +1925,7 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc
|
||||
}
|
||||
|
||||
/// If we don't have filtration, we can pushdown limit to reading stage for optimizations.
|
||||
UInt64 limit = (query.where() && query.prewhere()) ? getLimitForSorting(query, context) : 0;
|
||||
UInt64 limit = (query.where() || query.prewhere()) ? 0 : getLimitForSorting(query, context);
|
||||
if (query_info.projection)
|
||||
query_info.projection->input_order_info
|
||||
= query_info.projection->order_optimizer->getInputOrder(query_info.projection->desc->metadata, context, limit);
|
||||
|
@ -181,8 +181,8 @@ ProcessorPtr ReadFromMergeTree::createSource(
|
||||
{
|
||||
return std::make_shared<TSource>(
|
||||
data, metadata_snapshot, part.data_part, max_block_size, preferred_block_size_bytes,
|
||||
preferred_max_column_in_block_size_bytes, required_columns, part.ranges, use_uncompressed_cache,
|
||||
prewhere_info, actions_settings, true, reader_settings, virt_column_names, has_limit_below_one_block);
|
||||
preferred_max_column_in_block_size_bytes, required_columns, part.ranges, use_uncompressed_cache, prewhere_info,
|
||||
actions_settings, true, reader_settings, virt_column_names, part.part_index_in_query, has_limit_below_one_block);
|
||||
}
|
||||
|
||||
Pipe ReadFromMergeTree::readInOrder(
|
||||
|
@ -22,6 +22,7 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor(
|
||||
bool check_columns,
|
||||
const MergeTreeReaderSettings & reader_settings_,
|
||||
const Names & virt_column_names_,
|
||||
size_t part_index_in_query_,
|
||||
bool has_limit_below_one_block_)
|
||||
: MergeTreeBaseSelectProcessor{
|
||||
metadata_snapshot_->getSampleBlockForColumns(required_columns_, storage_.getVirtuals(), storage_.getStorageID()),
|
||||
@ -31,6 +32,7 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor(
|
||||
required_columns{std::move(required_columns_)},
|
||||
data_part{owned_data_part_},
|
||||
all_mark_ranges(std::move(mark_ranges_)),
|
||||
part_index_in_query(part_index_in_query_),
|
||||
has_limit_below_one_block(has_limit_below_one_block_),
|
||||
total_rows(data_part->index_granularity.getRowsCountInRanges(all_mark_ranges))
|
||||
{
|
||||
|
@ -31,6 +31,7 @@ public:
|
||||
bool check_columns,
|
||||
const MergeTreeReaderSettings & reader_settings,
|
||||
const Names & virt_column_names = {},
|
||||
size_t part_index_in_query_ = 0,
|
||||
bool has_limit_below_one_block_ = false);
|
||||
|
||||
~MergeTreeSelectProcessor() override;
|
||||
|
Loading…
Reference in New Issue
Block a user