diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h index b598ad87aab..3615127ea34 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h @@ -169,16 +169,14 @@ protected: std::deque delayed_tasks; std::deque buffered_ranges; - /// This setting is used in base algorithm only an an additionally limit the number of granules to read. + /// This setting is used in base algorithm only to additionally limit the number of granules to read. /// It is changed in ctor of MergeTreeThreadSelectAlgorithm. /// /// The reason why we have it here is because MergeTreeReadPool takes the full task /// ignoring min_marks_to_read setting in case of remote disk (see MergeTreeReadPool::getTask). - /// Which makes a logic of adaptive granulatity - /// (merge_tree_min_rows_for_concurrent_read_for_remote_filesystem and - /// merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem settings) a bit useless. + /// In this case, we won't limit the number of rows to read based on adaptive granularity settings. /// - /// On the other hand, big tasks are better for remote disk and prefetches. + /// Big reading tasks are better for remote disk and prefetches. /// So, for now it's easier to limit max_rows_to_read. /// Somebody need to refactor this later. size_t min_marks_to_read = 0;