fix pushdown of limit to reading stage

This commit is contained in:
Anton Popov 2021-07-16 17:27:38 +03:00
parent c45a9e3bd6
commit 03c785931a
4 changed files with 6 additions and 3 deletions

View File

@ -1925,7 +1925,7 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc
} }
/// If we don't have filtration, we can pushdown limit to reading stage for optimizations. /// If we don't have filtration, we can pushdown limit to reading stage for optimizations.
UInt64 limit = (query.where() && query.prewhere()) ? getLimitForSorting(query, context) : 0; UInt64 limit = (query.where() || query.prewhere()) ? 0 : getLimitForSorting(query, context);
if (query_info.projection) if (query_info.projection)
query_info.projection->input_order_info query_info.projection->input_order_info
= query_info.projection->order_optimizer->getInputOrder(query_info.projection->desc->metadata, context, limit); = query_info.projection->order_optimizer->getInputOrder(query_info.projection->desc->metadata, context, limit);

View File

@ -181,8 +181,8 @@ ProcessorPtr ReadFromMergeTree::createSource(
{ {
return std::make_shared<TSource>( return std::make_shared<TSource>(
data, metadata_snapshot, part.data_part, max_block_size, preferred_block_size_bytes, data, metadata_snapshot, part.data_part, max_block_size, preferred_block_size_bytes,
preferred_max_column_in_block_size_bytes, required_columns, part.ranges, use_uncompressed_cache, preferred_max_column_in_block_size_bytes, required_columns, part.ranges, use_uncompressed_cache, prewhere_info,
prewhere_info, actions_settings, true, reader_settings, virt_column_names, has_limit_below_one_block); actions_settings, true, reader_settings, virt_column_names, part.part_index_in_query, has_limit_below_one_block);
} }
Pipe ReadFromMergeTree::readInOrder( Pipe ReadFromMergeTree::readInOrder(

View File

@ -22,6 +22,7 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor(
bool check_columns, bool check_columns,
const MergeTreeReaderSettings & reader_settings_, const MergeTreeReaderSettings & reader_settings_,
const Names & virt_column_names_, const Names & virt_column_names_,
size_t part_index_in_query_,
bool has_limit_below_one_block_) bool has_limit_below_one_block_)
: MergeTreeBaseSelectProcessor{ : MergeTreeBaseSelectProcessor{
metadata_snapshot_->getSampleBlockForColumns(required_columns_, storage_.getVirtuals(), storage_.getStorageID()), metadata_snapshot_->getSampleBlockForColumns(required_columns_, storage_.getVirtuals(), storage_.getStorageID()),
@ -31,6 +32,7 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor(
required_columns{std::move(required_columns_)}, required_columns{std::move(required_columns_)},
data_part{owned_data_part_}, data_part{owned_data_part_},
all_mark_ranges(std::move(mark_ranges_)), all_mark_ranges(std::move(mark_ranges_)),
part_index_in_query(part_index_in_query_),
has_limit_below_one_block(has_limit_below_one_block_), has_limit_below_one_block(has_limit_below_one_block_),
total_rows(data_part->index_granularity.getRowsCountInRanges(all_mark_ranges)) total_rows(data_part->index_granularity.getRowsCountInRanges(all_mark_ranges))
{ {

View File

@ -31,6 +31,7 @@ public:
bool check_columns, bool check_columns,
const MergeTreeReaderSettings & reader_settings, const MergeTreeReaderSettings & reader_settings,
const Names & virt_column_names = {}, const Names & virt_column_names = {},
size_t part_index_in_query_ = 0,
bool has_limit_below_one_block_ = false); bool has_limit_below_one_block_ = false);
~MergeTreeSelectProcessor() override; ~MergeTreeSelectProcessor() override;