2015-11-19 21:34:53 +00:00
# include <boost/rational.hpp> /// Для вычислений, связанных с коэффициентами сэмплирования.
2015-10-12 07:05:54 +00:00
# include <DB/Core/FieldVisitors.h>
2014-03-13 12:48:07 +00:00
# include <DB/Storages/MergeTree/MergeTreeDataSelectExecutor.h>
2015-04-12 04:39:20 +00:00
# include <DB/Storages/MergeTree/MergeTreeBlockInputStream.h>
2015-06-24 11:03:53 +00:00
# include <DB/Storages/MergeTree/MergeTreeReadPool.h>
# include <DB/Storages/MergeTree/MergeTreeThreadBlockInputStream.h>
2014-03-13 12:48:07 +00:00
# include <DB/Parsers/ASTIdentifier.h>
2015-11-19 21:34:53 +00:00
# include <DB/Parsers/ASTSampleRatio.h>
2014-03-13 12:48:07 +00:00
# include <DB/DataStreams/ExpressionBlockInputStream.h>
# include <DB/DataStreams/FilterBlockInputStream.h>
# include <DB/DataStreams/CollapsingFinalBlockInputStream.h>
2014-12-30 18:04:53 +00:00
# include <DB/DataStreams/AddingConstColumnBlockInputStream.h>
2015-02-15 04:16:11 +00:00
# include <DB/DataStreams/CreatingSetsBlockInputStream.h>
# include <DB/DataStreams/NullBlockInputStream.h>
2015-07-08 04:38:46 +00:00
# include <DB/DataStreams/SummingSortedBlockInputStream.h>
# include <DB/DataStreams/AggregatingSortedBlockInputStream.h>
2014-03-13 12:48:07 +00:00
# include <DB/DataTypes/DataTypesNumberFixed.h>
2015-11-29 08:06:29 +00:00
# include <DB/DataTypes/DataTypeDate.h>
2014-07-28 10:36:11 +00:00
# include <DB/Common/VirtualColumnUtils.h>
2014-09-17 09:59:21 +00:00
2015-02-03 14:37:35 +00:00
2014-03-13 12:48:07 +00:00
namespace DB
{
2015-02-15 02:31:48 +00:00
MergeTreeDataSelectExecutor : : MergeTreeDataSelectExecutor ( MergeTreeData & data_ )
: data ( data_ ) , log ( & Logger : : get ( data . getLogName ( ) + " (SelectExecutor) " ) )
2014-03-13 12:48:07 +00:00
{
}
2015-11-18 21:37:28 +00:00
2014-07-28 10:36:11 +00:00
/// Построить блок состоящий только из возможных значений виртуальных столбцов
static Block getBlockWithVirtualColumns ( const MergeTreeData : : DataPartsVector & parts )
{
Block res ;
2015-07-17 01:27:35 +00:00
ColumnWithTypeAndName _part ( new ColumnString , new DataTypeString , " _part " ) ;
2014-07-28 10:36:11 +00:00
for ( const auto & part : parts )
_part . column - > insert ( part - > name ) ;
res . insert ( _part ) ;
return res ;
}
2015-11-18 21:37:28 +00:00
size_t MergeTreeDataSelectExecutor : : getApproximateTotalRowsToRead (
const MergeTreeData : : DataPartsVector & parts , const PKCondition & key_condition , const Settings & settings ) const
{
size_t full_marks_count = 0 ;
/// Узнаем, сколько строк мы бы прочли без семплирования.
LOG_DEBUG ( log , " Preliminary index scan with condition: " < < key_condition . toString ( ) ) ;
for ( size_t i = 0 ; i < parts . size ( ) ; + + i )
{
const MergeTreeData : : DataPartPtr & part = parts [ i ] ;
2015-11-29 11:58:44 +00:00
MarkRanges ranges = markRangesFromPKRange ( part - > index , key_condition , settings ) ;
2015-11-18 21:37:28 +00:00
/** Для того, чтобы получить оценку снизу количества строк, подходящих под условие на PK,
* у ч и т ы в а е м т о л ь к о г а р а н т и р о в а н н о п о л н ы е з а с е ч к и .
* Т о е с т ь , н е у ч и т ы в а е м п е р в у ю и п о с л е д н ю ю з а с е ч к у , к о т о р ы е м о г у т б ы т ь н е п о л н ы м и .
*/
for ( size_t j = 0 ; j < ranges . size ( ) ; + + j )
if ( ranges [ j ] . end - ranges [ j ] . begin > 2 )
full_marks_count + = ranges [ j ] . end - ranges [ j ] . begin - 2 ;
}
return full_marks_count * data . index_granularity ;
}
2015-11-19 21:34:53 +00:00
using RelativeSize = boost : : rational < ASTSampleRatio : : BigNum > ;
static std : : ostream & operator < < ( std : : ostream & ostr , const RelativeSize & x )
{
ostr < < ASTSampleRatio : : toString ( x . numerator ( ) ) < < " / " < < ASTSampleRatio : : toString ( x . denominator ( ) ) ;
return ostr ;
}
2015-11-18 21:37:28 +00:00
/// Переводит размер сэмпла в приблизительном количестве строк (вида SAMPLE 1000000) в относительную величину (вида SAMPLE 0.1).
static RelativeSize convertAbsoluteSampleSizeToRelative ( const ASTPtr & node , size_t approx_total_rows )
{
if ( approx_total_rows = = 0 )
return 1 ;
2015-11-19 21:34:53 +00:00
const ASTSampleRatio & node_sample = typeid_cast < const ASTSampleRatio & > ( * node ) ;
auto absolute_sample_size = node_sample . ratio . numerator / node_sample . ratio . denominator ;
return std : : min ( RelativeSize ( 1 ) , RelativeSize ( absolute_sample_size ) / approx_total_rows ) ;
2015-11-18 21:37:28 +00:00
}
2014-03-13 12:48:07 +00:00
BlockInputStreams MergeTreeDataSelectExecutor : : read (
const Names & column_names_to_return ,
ASTPtr query ,
2014-12-17 11:53:17 +00:00
const Context & context ,
2014-03-13 12:48:07 +00:00
const Settings & settings ,
QueryProcessingStage : : Enum & processed_stage ,
2014-12-17 11:53:17 +00:00
const size_t max_block_size ,
const unsigned threads ,
2015-09-20 11:54:58 +00:00
size_t * part_index ,
2015-11-18 21:37:28 +00:00
Int64 max_block_number_to_read ) const
2014-03-13 12:48:07 +00:00
{
2014-07-30 12:10:34 +00:00
size_t part_index_var = 0 ;
if ( ! part_index )
part_index = & part_index_var ;
2014-09-19 11:44:29 +00:00
MergeTreeData : : DataPartsVector parts = data . getDataPartsVector ( ) ;
2014-07-28 10:36:11 +00:00
/// Если в запросе есть ограничения на виртуальный столбец _part, выберем только подходящие под него куски.
Names virt_column_names , real_column_names ;
2014-07-30 12:10:34 +00:00
for ( const String & name : column_names_to_return )
if ( name ! = " _part " & &
name ! = " _part_index " )
real_column_names . push_back ( name ) ;
2014-07-28 10:36:11 +00:00
else
2014-07-30 12:10:34 +00:00
virt_column_names . push_back ( name ) ;
2014-07-28 10:36:11 +00:00
2014-07-30 12:22:12 +00:00
/// Если в запросе только виртуальные столбцы, надо запросить хотя бы один любой другой.
2014-10-03 09:17:06 +00:00
if ( real_column_names . empty ( ) )
2014-07-30 12:22:12 +00:00
real_column_names . push_back ( ExpressionActions : : getSmallestColumn ( data . getColumnsList ( ) ) ) ;
2014-07-28 10:36:11 +00:00
Block virtual_columns_block = getBlockWithVirtualColumns ( parts ) ;
/// Если запрошен хотя бы один виртуальный столбец, пробуем индексировать
if ( ! virt_column_names . empty ( ) )
2014-12-17 11:53:17 +00:00
VirtualColumnUtils : : filterBlockWithQuery ( query , virtual_columns_block , context ) ;
2014-07-28 10:36:11 +00:00
2014-07-29 14:05:15 +00:00
std : : multiset < String > values = VirtualColumnUtils : : extractSingleValueFromBlock < String > ( virtual_columns_block , " _part " ) ;
2014-07-28 10:36:11 +00:00
data . check ( real_column_names ) ;
2014-03-13 12:48:07 +00:00
processed_stage = QueryProcessingStage : : FetchColumns ;
2014-12-17 11:53:17 +00:00
PKCondition key_condition ( query , context , data . getColumnsList ( ) , data . getSortDescription ( ) ) ;
PKCondition date_condition ( query , context , data . getColumnsList ( ) , SortDescription ( 1 , SortColumnDescription ( data . date_column_name , 1 ) ) ) ;
2014-03-13 12:48:07 +00:00
2015-11-29 08:06:29 +00:00
if ( settings . force_primary_key & & key_condition . alwaysUnknown ( ) )
throw Exception ( " Primary key is not used and setting 'force_primary_key' is set. " , ErrorCodes : : INDEX_NOT_USED ) ;
2015-03-27 03:06:06 +00:00
if ( settings . force_index_by_date & & date_condition . alwaysUnknown ( ) )
2015-03-14 01:20:10 +00:00
throw Exception ( " Index by date is not used and setting 'force_index_by_date' is set. " , ErrorCodes : : INDEX_NOT_USED ) ;
2015-09-20 11:54:58 +00:00
/// Выберем куски, в которых могут быть данные, удовлетворяющие date_condition, и которые подходят под условие на _part,
/// а также max_block_number_to_read.
2014-03-13 12:48:07 +00:00
{
2015-11-29 08:06:29 +00:00
const DataTypes data_types_date { new DataTypeDate } ;
2014-07-28 10:36:11 +00:00
auto prev_parts = parts ;
parts . clear ( ) ;
2014-03-13 12:48:07 +00:00
2014-07-28 10:36:11 +00:00
for ( const auto & part : prev_parts )
2014-03-13 12:48:07 +00:00
{
2014-07-28 10:36:11 +00:00
if ( values . find ( part - > name ) = = values . end ( ) )
continue ;
Field left = static_cast < UInt64 > ( part - > left_date ) ;
Field right = static_cast < UInt64 > ( part - > right_date ) ;
2015-11-29 08:06:29 +00:00
if ( ! date_condition . mayBeTrueInRange ( & left , & right , data_types_date ) )
2014-07-28 10:36:11 +00:00
continue ;
2014-03-13 12:48:07 +00:00
2015-09-20 12:25:10 +00:00
if ( max_block_number_to_read & & part - > right > max_block_number_to_read )
2015-09-20 11:54:58 +00:00
continue ;
2014-07-28 10:36:11 +00:00
parts . push_back ( part ) ;
2014-03-13 12:48:07 +00:00
}
}
/// Семплирование.
2014-07-28 10:36:11 +00:00
Names column_names_to_read = real_column_names ;
2014-03-13 12:48:07 +00:00
typedef Poco : : SharedPtr < ASTFunction > ASTFunctionPtr ;
ASTFunctionPtr filter_function ;
ExpressionActionsPtr filter_expression ;
2015-11-18 21:37:28 +00:00
RelativeSize relative_sample_size = 0 ;
RelativeSize relative_sample_offset = 0 ;
2014-03-13 12:48:07 +00:00
2015-05-20 11:58:21 +00:00
ASTSelectQuery & select = * typeid_cast < ASTSelectQuery * > ( & * query ) ;
2014-03-13 12:48:07 +00:00
if ( select . sample_size )
{
2015-11-19 21:34:53 +00:00
relative_sample_size . assign (
typeid_cast < const ASTSampleRatio & > ( * select . sample_size ) . ratio . numerator ,
typeid_cast < const ASTSampleRatio & > ( * select . sample_size ) . ratio . denominator ) ;
2014-03-13 12:48:07 +00:00
2014-10-23 19:16:43 +00:00
if ( relative_sample_size < 0 )
2014-03-13 12:48:07 +00:00
throw Exception ( " Negative sample size " , ErrorCodes : : ARGUMENT_OUT_OF_BOUND ) ;
2015-11-18 21:37:28 +00:00
relative_sample_offset = 0 ;
if ( select . sample_offset )
2015-11-19 21:34:53 +00:00
relative_sample_offset . assign (
typeid_cast < const ASTSampleRatio & > ( * select . sample_offset ) . ratio . numerator ,
typeid_cast < const ASTSampleRatio & > ( * select . sample_offset ) . ratio . denominator ) ;
2014-03-13 12:48:07 +00:00
2015-11-18 21:37:28 +00:00
if ( relative_sample_offset < 0 )
throw Exception ( " Negative sample offset " , ErrorCodes : : ARGUMENT_OUT_OF_BOUND ) ;
2014-03-13 12:48:07 +00:00
2015-11-18 21:37:28 +00:00
/// Переводим абсолютную величину сэмплирования (вида SAMPLE 1000000 - сколько строк прочитать) в относительную (какую долю данных читать).
size_t approx_total_rows = 0 ;
if ( relative_sample_size > 1 | | relative_sample_offset > 1 )
approx_total_rows = getApproximateTotalRowsToRead ( parts , key_condition , settings ) ;
2014-03-13 12:48:07 +00:00
2015-11-18 21:37:28 +00:00
if ( relative_sample_size > 1 )
{
relative_sample_size = convertAbsoluteSampleSizeToRelative ( select . sample_size , approx_total_rows ) ;
2014-10-23 19:16:43 +00:00
LOG_DEBUG ( log , " Selected relative sample size: " < < relative_sample_size ) ;
2014-03-13 12:48:07 +00:00
}
2014-10-23 19:16:43 +00:00
/// SAMPLE 1 - то же, что и отсутствие SAMPLE.
if ( relative_sample_size = = 1 )
relative_sample_size = 0 ;
2015-11-18 21:37:28 +00:00
if ( relative_sample_offset > 0 & & 0 = = relative_sample_size )
throw Exception ( " Sampling offset is incorrect because no sampling " , ErrorCodes : : ARGUMENT_OUT_OF_BOUND ) ;
2015-02-03 12:33:51 +00:00
2015-11-18 21:37:28 +00:00
if ( relative_sample_offset > 1 )
{
relative_sample_offset = convertAbsoluteSampleSizeToRelative ( select . sample_offset , approx_total_rows ) ;
LOG_DEBUG ( log , " Selected relative sample offset: " < < relative_sample_offset ) ;
}
}
/** Какой диапазон значений ключа сэмплирования нужно читать?
* С н а ч а л а в о в с ё м д и а п а з о н е ( " юнивёрсум " ) в ы б е р а е м и н т е р в а л
* о т н о с и т е л ь н о г о р а з м е р а relative_sample_size , с м е щ ё н н ы й о т н а ч а л а н а relative_sample_offset .
*
* П р и м е р : SAMPLE 0.4 OFFSET 0.3 :
*
* [ - - - - - - * * * * * * * * - - - - - - ]
* ^ - offset
* < - - - - - - > - size
*
* Е с л и и н т е р в а л п е р е х о д и т ч е р е з к о н е ц ю н и в ё р с у м а , т о с р е з а е м е г о п р а в у ю ч а с т ь .
*
* П р и м е р : SAMPLE 0.4 OFFSET 0.8 :
*
* [ - - - - - - - - - - - - - - - - * * * * ]
* ^ - offset
* < - - - - - - > - size
*
* Д а л е е , е с л и в ы с т а в л е н ы н а с т р о й к и parallel_replicas_count , parallel_replica_offset ,
* т о н е о б х о д и м о р а з б и т ь п о л у ч е н н ы й и н т е р в а л е щ ё н а к у с о ч к и в к о л и ч е с т в е parallel_replicas_count ,
* и в ы б р а т ь и з н и х к у с о ч е к с н о м е р о м parallel_replica_offset ( о т н у л я ) .
*
* П р и м е р : SAMPLE 0.4 OFFSET 0.3 , parallel_replicas_count = 2 , parallel_replica_offset = 1 :
*
* [ - - - - - - - - - - * * * * - - - - - - ]
* ^ - offset
* < - - - - - - > - size
* < - - > < - - > - к у с о ч к и д л я р а з н ы х parallel_replica_offset , в ы б и р а е м в т о р о й .
2015-11-19 03:24:59 +00:00
*
* О ч е н ь в а ж н о , ч т о б ы и н т е р в а л ы д л я р а з н ы х parallel_replica_offset п о к р ы в а л и в е с ь д и а п а з о н б е з п р о п у с к о в и п е р е к р ы т и й .
* Т а к ж е в а ж н о , ч т о б ы в е с ь ю н и в ё р с у м м о ж н о б ы л о п о к р ы т ь , и с п о л ь з у я SAMPLE 0.1 OFFSET 0 , . . . OFFSET 0.9 и п о х о ж и е д е с я т и ч н ы е д р о б и .
2015-11-18 21:37:28 +00:00
*/
bool use_sampling = relative_sample_size > 0 | | settings . parallel_replicas_count > 1 ;
bool no_data = false ; /// После сэмплирования ничего не остаётся.
if ( use_sampling )
2014-10-23 19:16:43 +00:00
{
2015-11-19 03:24:59 +00:00
RelativeSize size_of_universum = 0 ;
2014-03-14 17:03:52 +00:00
DataTypePtr type = data . getPrimaryExpression ( ) - > getSampleBlock ( ) . getByName ( data . sampling_expression - > getColumnName ( ) ) . type ;
2014-03-13 12:48:07 +00:00
2015-11-19 03:24:59 +00:00
if ( typeid_cast < const DataTypeUInt64 * > ( type . get ( ) ) )
size_of_universum = RelativeSize ( std : : numeric_limits < UInt64 > : : max ( ) ) + 1 ;
else if ( typeid_cast < const DataTypeUInt32 * > ( type . get ( ) ) )
size_of_universum = RelativeSize ( std : : numeric_limits < UInt32 > : : max ( ) ) + 1 ;
else if ( typeid_cast < const DataTypeUInt16 * > ( type . get ( ) ) )
size_of_universum = RelativeSize ( std : : numeric_limits < UInt16 > : : max ( ) ) + 1 ;
else if ( typeid_cast < const DataTypeUInt8 * > ( type . get ( ) ) )
size_of_universum = RelativeSize ( std : : numeric_limits < UInt8 > : : max ( ) ) + 1 ;
2014-03-13 12:48:07 +00:00
else
throw Exception ( " Invalid sampling column type in storage parameters: " + type - > getName ( ) + " . Must be unsigned integer type. " , ErrorCodes : : ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER ) ;
2015-02-04 10:40:00 +00:00
if ( settings . parallel_replicas_count > 1 )
2015-02-03 12:33:51 +00:00
{
2015-11-20 02:29:16 +00:00
if ( relative_sample_size = = 0 )
relative_sample_size = 1 ;
2015-11-18 21:37:28 +00:00
relative_sample_size / = settings . parallel_replicas_count ;
relative_sample_offset + = relative_sample_size * settings . parallel_replica_offset ;
2015-02-03 12:33:51 +00:00
}
2015-11-18 21:37:28 +00:00
2015-11-19 03:24:59 +00:00
if ( relative_sample_offset > = 1 )
no_data = true ;
2015-11-18 21:37:28 +00:00
/// Вычисляем полуинтервал [lower, upper) значений столбца.
bool has_lower_limit = false ;
bool has_upper_limit = false ;
2015-11-19 21:34:53 +00:00
RelativeSize lower_limit_rational = relative_sample_offset * size_of_universum ;
RelativeSize upper_limit_rational = ( relative_sample_offset + relative_sample_size ) * size_of_universum ;
2015-11-19 03:24:59 +00:00
2015-11-20 03:08:12 +00:00
UInt64 lower = boost : : rational_cast < ASTSampleRatio : : BigNum > ( lower_limit_rational ) ;
UInt64 upper = boost : : rational_cast < ASTSampleRatio : : BigNum > ( upper_limit_rational ) ;
2015-11-18 21:37:28 +00:00
if ( lower > 0 )
has_lower_limit = true ;
2015-11-20 02:42:26 +00:00
if ( upper_limit_rational < size_of_universum )
2015-11-18 21:37:28 +00:00
has_upper_limit = true ;
2015-11-19 03:24:59 +00:00
2015-11-20 03:08:12 +00:00
/*std::cerr << std::fixed << std::setprecision(100)
2015-11-19 03:24:59 +00:00
< < " relative_sample_size: " < < relative_sample_size < < " \n "
< < " relative_sample_offset: " < < relative_sample_offset < < " \n "
2015-11-20 02:29:16 +00:00
< < " lower_limit_float: " < < lower_limit_rational < < " \n "
< < " upper_limit_float: " < < upper_limit_rational < < " \n "
2015-11-19 03:24:59 +00:00
< < " lower: " < < lower < < " \n "
< < " upper: " < < upper < < " \n " ; */
if ( ( has_upper_limit & & upper = = 0 )
| | ( has_lower_limit & & has_upper_limit & & lower = = upper ) )
no_data = true ;
2015-02-03 12:33:51 +00:00
2015-11-18 21:37:28 +00:00
if ( no_data | | ( ! has_lower_limit & & ! has_upper_limit ) )
{
use_sampling = false ;
}
else
{
/// Добавим условия, чтобы отсечь еще что-нибудь при повторном просмотре индекса и при обработке запроса.
2014-03-13 12:48:07 +00:00
2015-11-18 21:37:28 +00:00
ASTFunctionPtr lower_function ;
ASTFunctionPtr upper_function ;
2015-02-03 12:33:51 +00:00
2015-11-18 21:37:28 +00:00
if ( has_lower_limit )
{
if ( ! key_condition . addCondition ( data . sampling_expression - > getColumnName ( ) , Range : : createLeftBounded ( lower , true ) ) )
throw Exception ( " Sampling column not in primary key " , ErrorCodes : : ILLEGAL_COLUMN ) ;
2015-02-03 12:33:51 +00:00
2015-11-18 21:37:28 +00:00
ASTPtr args = new ASTExpressionList ;
args - > children . push_back ( data . sampling_expression ) ;
args - > children . push_back ( new ASTLiteral ( StringRange ( ) , lower ) ) ;
2014-03-13 12:48:07 +00:00
2015-11-18 21:37:28 +00:00
lower_function = new ASTFunction ;
lower_function - > name = " greaterOrEquals " ;
lower_function - > arguments = args ;
lower_function - > children . push_back ( lower_function - > arguments ) ;
2015-02-04 10:27:06 +00:00
2015-11-18 21:37:28 +00:00
filter_function = lower_function ;
}
2014-03-13 12:48:07 +00:00
2015-11-18 21:37:28 +00:00
if ( has_upper_limit )
{
if ( ! key_condition . addCondition ( data . sampling_expression - > getColumnName ( ) , Range : : createRightBounded ( upper , false ) ) )
throw Exception ( " Sampling column not in primary key " , ErrorCodes : : ILLEGAL_COLUMN ) ;
2015-02-04 10:27:06 +00:00
2015-11-18 21:37:28 +00:00
ASTPtr args = new ASTExpressionList ;
args - > children . push_back ( data . sampling_expression ) ;
args - > children . push_back ( new ASTLiteral ( StringRange ( ) , upper ) ) ;
2015-02-04 10:27:06 +00:00
2015-11-18 21:37:28 +00:00
upper_function = new ASTFunction ;
upper_function - > name = " less " ;
upper_function - > arguments = args ;
upper_function - > children . push_back ( upper_function - > arguments ) ;
2014-03-13 12:48:07 +00:00
2015-11-18 21:37:28 +00:00
filter_function = upper_function ;
}
2014-03-13 12:48:07 +00:00
2015-11-18 21:37:28 +00:00
if ( has_lower_limit & & has_upper_limit )
{
ASTPtr args = new ASTExpressionList ;
args - > children . push_back ( lower_function ) ;
args - > children . push_back ( upper_function ) ;
filter_function = new ASTFunction ;
filter_function - > name = " and " ;
filter_function - > arguments = args ;
filter_function - > children . push_back ( filter_function - > arguments ) ;
}
filter_expression = ExpressionAnalyzer ( filter_function , context , nullptr , data . getColumnsList ( ) ) . getActions ( false ) ;
/// Добавим столбцы, нужные для sampling_expression.
std : : vector < String > add_columns = filter_expression - > getRequiredColumns ( ) ;
column_names_to_read . insert ( column_names_to_read . end ( ) , add_columns . begin ( ) , add_columns . end ( ) ) ;
std : : sort ( column_names_to_read . begin ( ) , column_names_to_read . end ( ) ) ;
column_names_to_read . erase ( std : : unique ( column_names_to_read . begin ( ) , column_names_to_read . end ( ) ) , column_names_to_read . end ( ) ) ;
}
}
if ( no_data )
{
LOG_DEBUG ( log , " Sampling yields no data. " ) ;
return { } ;
2014-03-13 12:48:07 +00:00
}
LOG_DEBUG ( log , " Key condition: " < < key_condition . toString ( ) ) ;
LOG_DEBUG ( log , " Date condition: " < < date_condition . toString ( ) ) ;
/// PREWHERE
ExpressionActionsPtr prewhere_actions ;
String prewhere_column ;
if ( select . prewhere_expression )
{
2015-07-15 04:50:48 +00:00
ExpressionAnalyzer analyzer ( select . prewhere_expression , context , nullptr , data . getColumnsList ( ) ) ;
2014-03-13 12:48:07 +00:00
prewhere_actions = analyzer . getActions ( false ) ;
prewhere_column = select . prewhere_expression - > getColumnName ( ) ;
2015-02-15 04:16:11 +00:00
SubqueriesForSets prewhere_subqueries = analyzer . getSubqueriesForSets ( ) ;
/** Вычислим подзапросы прямо сейчас.
* NOTE Н е д о с т а т о к - э т и в ы ч и с л е н и я н е в п и с ы в а ю т с я в к о н в е й е р в ы п о л н е н и я з а п р о с а .
* О н и д е л а ю т с я д о н а ч а л а в ы п о л н е н и я к о н в е й е р а ; и х н е л ь з я п р е р в а т ь ; в о в р е м я в ы ч и с л е н и й н е о т п р а в л я ю т с я п а к е т ы п р о г р е с с а .
*/
if ( ! prewhere_subqueries . empty ( ) )
CreatingSetsBlockInputStream ( new NullBlockInputStream , prewhere_subqueries , settings . limits ) . read ( ) ;
2014-03-13 12:48:07 +00:00
}
RangesInDataParts parts_with_ranges ;
/// Найдем, какой диапазон читать из каждого куска.
2015-02-03 12:33:51 +00:00
size_t sum_marks = 0 ;
size_t sum_ranges = 0 ;
2014-10-18 21:46:05 +00:00
for ( auto & part : parts )
2014-03-13 12:48:07 +00:00
{
2014-07-30 12:10:34 +00:00
RangesInDataPart ranges ( part , ( * part_index ) + + ) ;
2015-03-14 02:37:53 +00:00
if ( data . mode ! = MergeTreeData : : Unsorted )
2015-11-29 11:58:44 +00:00
ranges . ranges = markRangesFromPKRange ( part - > index , key_condition , settings ) ;
2015-03-14 02:37:53 +00:00
else
ranges . ranges = MarkRanges { MarkRange { 0 , part - > size } } ;
2014-03-13 12:48:07 +00:00
if ( ! ranges . ranges . empty ( ) )
2015-01-21 11:17:18 +00:00
{
2015-02-03 12:33:51 +00:00
parts_with_ranges . push_back ( ranges ) ;
2015-01-22 14:22:59 +00:00
2015-02-03 12:33:51 +00:00
sum_ranges + = ranges . ranges . size ( ) ;
for ( const auto & range : ranges . ranges )
sum_marks + = range . end - range . begin ;
2015-01-21 12:24:29 +00:00
}
2015-01-22 14:22:59 +00:00
}
2015-01-21 14:35:49 +00:00
LOG_DEBUG ( log , " Selected " < < parts . size ( ) < < " parts by date, " < < parts_with_ranges . size ( ) < < " parts by key, "
2015-03-14 02:36:39 +00:00
< < sum_marks < < " marks to read from " < < sum_ranges < < " ranges " ) ;
2014-03-13 12:48:07 +00:00
2015-09-16 04:18:16 +00:00
if ( parts_with_ranges . empty ( ) )
return { } ;
2014-03-13 12:48:07 +00:00
BlockInputStreams res ;
if ( select . final )
{
/// Добавим столбцы, нужные для вычисления первичного ключа и знака.
2014-03-14 17:03:52 +00:00
std : : vector < String > add_columns = data . getPrimaryExpression ( ) - > getRequiredColumns ( ) ;
2014-03-13 12:48:07 +00:00
column_names_to_read . insert ( column_names_to_read . end ( ) , add_columns . begin ( ) , add_columns . end ( ) ) ;
2015-07-08 04:38:46 +00:00
if ( ! data . sign_column . empty ( ) )
column_names_to_read . push_back ( data . sign_column ) ;
2014-03-13 12:48:07 +00:00
std : : sort ( column_names_to_read . begin ( ) , column_names_to_read . end ( ) ) ;
column_names_to_read . erase ( std : : unique ( column_names_to_read . begin ( ) , column_names_to_read . end ( ) ) , column_names_to_read . end ( ) ) ;
res = spreadMarkRangesAmongThreadsFinal (
2015-01-21 14:35:49 +00:00
parts_with_ranges ,
2014-03-13 12:48:07 +00:00
threads ,
column_names_to_read ,
max_block_size ,
settings . use_uncompressed_cache ,
prewhere_actions ,
2014-07-28 10:36:11 +00:00
prewhere_column ,
2015-02-15 02:31:48 +00:00
virt_column_names ,
2015-07-15 04:50:48 +00:00
settings ,
context ) ;
2014-03-13 12:48:07 +00:00
}
else
{
res = spreadMarkRangesAmongThreads (
2015-01-21 14:35:49 +00:00
parts_with_ranges ,
2014-03-13 12:48:07 +00:00
threads ,
column_names_to_read ,
max_block_size ,
settings . use_uncompressed_cache ,
prewhere_actions ,
2014-07-28 10:36:11 +00:00
prewhere_column ,
2015-02-15 02:31:48 +00:00
virt_column_names ,
settings ) ;
2014-03-13 12:48:07 +00:00
}
2015-11-18 21:37:28 +00:00
if ( use_sampling )
2014-10-18 21:46:05 +00:00
for ( auto & stream : res )
stream = new FilterBlockInputStream ( new ExpressionBlockInputStream ( stream , filter_expression ) , filter_function - > getColumnName ( ) ) ;
2014-03-13 12:48:07 +00:00
return res ;
}
2015-02-15 04:16:11 +00:00
2014-03-13 12:48:07 +00:00
BlockInputStreams MergeTreeDataSelectExecutor : : spreadMarkRangesAmongThreads (
RangesInDataParts parts ,
size_t threads ,
const Names & column_names ,
size_t max_block_size ,
bool use_uncompressed_cache ,
ExpressionActionsPtr prewhere_actions ,
2014-07-28 10:36:11 +00:00
const String & prewhere_column ,
2015-02-15 02:31:48 +00:00
const Names & virt_columns ,
2015-11-18 21:37:28 +00:00
const Settings & settings ) const
2014-03-13 12:48:07 +00:00
{
2015-06-24 11:03:53 +00:00
const std : : size_t min_marks_for_concurrent_read =
2015-06-11 13:04:45 +00:00
( settings . merge_tree_min_rows_for_concurrent_read + data . index_granularity - 1 ) / data . index_granularity ;
2015-06-24 11:03:53 +00:00
const std : : size_t max_marks_to_use_cache =
2015-06-11 13:04:45 +00:00
( settings . merge_tree_max_rows_to_use_cache + data . index_granularity - 1 ) / data . index_granularity ;
2015-02-15 02:31:48 +00:00
2014-03-13 12:48:07 +00:00
/// Посчитаем засечки для каждого куска.
std : : vector < size_t > sum_marks_in_parts ( parts . size ( ) ) ;
size_t sum_marks = 0 ;
for ( size_t i = 0 ; i < parts . size ( ) ; + + i )
{
/// Пусть отрезки будут перечислены справа налево, чтобы можно было выбрасывать самый левый отрезок с помощью pop_back().
std : : reverse ( parts [ i ] . ranges . begin ( ) , parts [ i ] . ranges . end ( ) ) ;
2015-06-11 13:04:45 +00:00
for ( const auto & range : parts [ i ] . ranges )
2014-03-13 12:48:07 +00:00
sum_marks_in_parts [ i ] + = range . end - range . begin ;
2015-06-11 13:04:45 +00:00
2014-03-13 12:48:07 +00:00
sum_marks + = sum_marks_in_parts [ i ] ;
}
if ( sum_marks > max_marks_to_use_cache )
use_uncompressed_cache = false ;
BlockInputStreams res ;
2015-09-01 16:09:12 +00:00
if ( sum_marks > 0 & & settings . merge_tree_uniform_read_distribution = = 1 )
{
2015-10-08 20:01:09 +00:00
/// Уменьшим количество потоков, если данных мало.
if ( sum_marks < threads * min_marks_for_concurrent_read & & parts . size ( ) < threads )
threads = std : : max ( ( sum_marks + min_marks_for_concurrent_read - 1 ) / min_marks_for_concurrent_read , parts . size ( ) ) ;
2015-09-01 16:09:12 +00:00
MergeTreeReadPoolPtr pool = std : : make_shared < MergeTreeReadPool > (
threads , sum_marks , min_marks_for_concurrent_read , parts , data , prewhere_actions , prewhere_column , true ,
2015-12-13 04:52:13 +00:00
column_names , MergeTreeReadPool : : BackoffSettings ( settings ) ) ;
2015-09-01 16:09:12 +00:00
2015-09-16 12:15:25 +00:00
/// Оценим общее количество строк - для прогресс-б а р а .
const std : : size_t total_rows = data . index_granularity * sum_marks ;
LOG_TRACE ( log , " Reading approx. " < < total_rows < < " rows " ) ;
2015-09-01 16:09:12 +00:00
for ( std : : size_t i = 0 ; i < threads ; + + i )
2015-09-16 12:15:25 +00:00
{
2015-09-01 16:09:12 +00:00
res . emplace_back ( new MergeTreeThreadBlockInputStream {
2015-09-25 13:39:06 +00:00
i , pool , min_marks_for_concurrent_read , max_block_size , data , use_uncompressed_cache ,
prewhere_actions ,
prewhere_column , settings , virt_columns
2015-09-01 16:09:12 +00:00
} ) ;
2015-09-16 12:15:25 +00:00
if ( i = = 0 )
/// Выставим приблизительное количество строк только для первого источника
static_cast < IProfilingBlockInputStream & > ( * res . front ( ) ) . setTotalRowsApprox ( total_rows ) ;
}
2015-09-01 16:09:12 +00:00
}
else if ( sum_marks > 0 )
2015-06-24 12:21:43 +00:00
{
const size_t min_marks_per_thread = ( sum_marks - 1 ) / threads + 1 ;
for ( size_t i = 0 ; i < threads & & ! parts . empty ( ) ; + + i )
{
size_t need_marks = min_marks_per_thread ;
/// Цикл по кускам.
while ( need_marks > 0 & & ! parts . empty ( ) )
{
RangesInDataPart & part = parts . back ( ) ;
size_t & marks_in_part = sum_marks_in_parts . back ( ) ;
/// Н е будем брать из куска слишком мало строк.
if ( marks_in_part > = min_marks_for_concurrent_read & &
need_marks < min_marks_for_concurrent_read )
need_marks = min_marks_for_concurrent_read ;
/// Н е будем оставлять в куске слишком мало строк.
if ( marks_in_part > need_marks & &
marks_in_part - need_marks < min_marks_for_concurrent_read )
need_marks = marks_in_part ;
MarkRanges ranges_to_get_from_part ;
/// Возьмем весь кусок, если он достаточно мал.
if ( marks_in_part < = need_marks )
{
/// Восстановим порядок отрезков.
std : : reverse ( part . ranges . begin ( ) , part . ranges . end ( ) ) ;
ranges_to_get_from_part = part . ranges ;
need_marks - = marks_in_part ;
parts . pop_back ( ) ;
sum_marks_in_parts . pop_back ( ) ;
}
else
{
/// Цикл по отрезкам куска.
while ( need_marks > 0 )
{
if ( part . ranges . empty ( ) )
throw Exception ( " Unexpected end of ranges while spreading marks among threads " , ErrorCodes : : LOGICAL_ERROR ) ;
MarkRange & range = part . ranges . back ( ) ;
const size_t marks_in_range = range . end - range . begin ;
const size_t marks_to_get_from_range = std : : min ( marks_in_range , need_marks ) ;
ranges_to_get_from_part . emplace_back ( range . begin , range . begin + marks_to_get_from_range ) ;
range . begin + = marks_to_get_from_range ;
marks_in_part - = marks_to_get_from_range ;
need_marks - = marks_to_get_from_range ;
if ( range . begin = = range . end )
part . ranges . pop_back ( ) ;
}
}
BlockInputStreamPtr source_stream = new MergeTreeBlockInputStream (
data . getFullPath ( ) + part . data_part - > name + ' / ' , max_block_size , column_names , data ,
part . data_part , ranges_to_get_from_part , use_uncompressed_cache ,
2015-12-26 00:59:09 +00:00
prewhere_actions , prewhere_column , true , settings . min_bytes_to_use_direct_io , settings . max_read_buffer_size , true ) ;
2015-06-24 12:21:43 +00:00
res . push_back ( source_stream ) ;
for ( const String & virt_column : virt_columns )
{
if ( virt_column = = " _part " )
res . back ( ) = new AddingConstColumnBlockInputStream < String > (
res . back ( ) , new DataTypeString , part . data_part - > name , " _part " ) ;
else if ( virt_column = = " _part_index " )
res . back ( ) = new AddingConstColumnBlockInputStream < UInt64 > (
res . back ( ) , new DataTypeUInt64 , part . part_index_in_query , " _part_index " ) ;
}
}
}
if ( ! parts . empty ( ) )
throw Exception ( " Couldn't spread marks among threads " , ErrorCodes : : LOGICAL_ERROR ) ;
}
2014-03-13 12:48:07 +00:00
return res ;
}
BlockInputStreams MergeTreeDataSelectExecutor : : spreadMarkRangesAmongThreadsFinal (
RangesInDataParts parts ,
size_t threads ,
const Names & column_names ,
size_t max_block_size ,
bool use_uncompressed_cache ,
ExpressionActionsPtr prewhere_actions ,
2014-07-28 10:36:11 +00:00
const String & prewhere_column ,
2015-02-15 02:31:48 +00:00
const Names & virt_columns ,
2015-07-15 04:50:48 +00:00
const Settings & settings ,
2015-11-18 21:37:28 +00:00
const Context & context ) const
2014-03-13 12:48:07 +00:00
{
2015-09-01 12:24:38 +00:00
const size_t max_marks_to_use_cache =
( settings . merge_tree_max_rows_to_use_cache + data . index_granularity - 1 ) / data . index_granularity ;
const size_t min_marks_for_read_task =
( settings . merge_tree_min_rows_for_concurrent_read + data . index_granularity - 1 ) / data . index_granularity ;
2015-02-15 02:31:48 +00:00
2014-03-13 12:48:07 +00:00
size_t sum_marks = 0 ;
for ( size_t i = 0 ; i < parts . size ( ) ; + + i )
for ( size_t j = 0 ; j < parts [ i ] . ranges . size ( ) ; + + j )
sum_marks + = parts [ i ] . ranges [ j ] . end - parts [ i ] . ranges [ j ] . begin ;
if ( sum_marks > max_marks_to_use_cache )
use_uncompressed_cache = false ;
2015-07-08 04:38:46 +00:00
BlockInputStreams to_merge ;
2014-03-13 12:48:07 +00:00
2015-09-01 16:09:12 +00:00
if ( settings . merge_tree_uniform_read_distribution = = 1 )
2014-03-13 12:48:07 +00:00
{
2015-09-25 11:35:58 +00:00
/// Пусть отрезки будут перечислены справа налево, чтобы можно было выбрасывать самый левый отрезок с помощью pop_back().
for ( auto & part : parts )
std : : reverse ( std : : begin ( part . ranges ) , std : : end ( part . ranges ) ) ;
2015-09-01 16:09:12 +00:00
MergeTreeReadPoolPtr pool = std : : make_shared < MergeTreeReadPool > (
parts . size ( ) , sum_marks , min_marks_for_read_task , parts , data , prewhere_actions , prewhere_column , true ,
2015-12-13 04:52:13 +00:00
column_names , MergeTreeReadPool : : BackoffSettings { } , true ) ;
2015-09-01 16:09:12 +00:00
2015-09-16 11:46:23 +00:00
/// Оценим общее количество строк - для прогресс-б а р а .
const std : : size_t total_rows = data . index_granularity * sum_marks ;
LOG_TRACE ( log , " Reading approx. " < < total_rows < < " rows " ) ;
2015-09-01 16:09:12 +00:00
for ( const auto i : ext : : range ( 0 , parts . size ( ) ) )
{
BlockInputStreamPtr source_stream {
new MergeTreeThreadBlockInputStream {
i , pool , min_marks_for_read_task , max_block_size , data , use_uncompressed_cache , prewhere_actions ,
2015-09-02 11:15:16 +00:00
prewhere_column , settings , virt_columns
2015-09-01 16:09:12 +00:00
}
} ;
2015-09-16 11:46:23 +00:00
if ( i = = 0 )
/// Выставим приблизительное количество строк только для первого источника
static_cast < IProfilingBlockInputStream & > ( * source_stream ) . setTotalRowsApprox ( total_rows ) ;
2015-09-01 16:09:12 +00:00
to_merge . push_back ( new ExpressionBlockInputStream ( source_stream , data . getPrimaryExpression ( ) ) ) ;
}
2014-03-13 12:48:07 +00:00
}
2015-09-01 16:09:12 +00:00
else
{
for ( size_t part_index = 0 ; part_index < parts . size ( ) ; + + part_index )
{
RangesInDataPart & part = parts [ part_index ] ;
2014-03-13 12:48:07 +00:00
2015-09-01 16:09:12 +00:00
BlockInputStreamPtr source_stream = new MergeTreeBlockInputStream (
data . getFullPath ( ) + part . data_part - > name + ' / ' , max_block_size , column_names , data ,
part . data_part , part . ranges , use_uncompressed_cache ,
2015-12-26 00:59:09 +00:00
prewhere_actions , prewhere_column , true , settings . min_bytes_to_use_direct_io , settings . max_read_buffer_size , true ) ;
2015-09-01 12:24:38 +00:00
2015-09-01 16:09:12 +00:00
for ( const String & virt_column : virt_columns )
{
if ( virt_column = = " _part " )
source_stream = new AddingConstColumnBlockInputStream < String > (
source_stream , new DataTypeString , part . data_part - > name , " _part " ) ;
else if ( virt_column = = " _part_index " )
source_stream = new AddingConstColumnBlockInputStream < UInt64 > (
source_stream , new DataTypeUInt64 , part . part_index_in_query , " _part_index " ) ;
}
2015-09-01 12:24:38 +00:00
2015-09-01 16:09:12 +00:00
to_merge . push_back ( new ExpressionBlockInputStream ( source_stream , data . getPrimaryExpression ( ) ) ) ;
}
}
2015-09-01 12:24:38 +00:00
2014-03-13 12:48:07 +00:00
BlockInputStreams res ;
2015-07-08 04:38:46 +00:00
if ( to_merge . size ( ) = = 1 )
{
if ( ! data . sign_column . empty ( ) )
{
ExpressionActionsPtr sign_filter_expression ;
String sign_filter_column ;
2015-07-15 04:50:48 +00:00
createPositiveSignCondition ( sign_filter_expression , sign_filter_column , context ) ;
2015-07-08 04:38:46 +00:00
res . push_back ( new FilterBlockInputStream ( new ExpressionBlockInputStream ( to_merge [ 0 ] , sign_filter_expression ) , sign_filter_column ) ) ;
}
else
res = to_merge ;
}
else if ( to_merge . size ( ) > 1 )
{
BlockInputStreamPtr merged ;
switch ( data . mode )
{
case MergeTreeData : : Ordinary :
2015-09-16 04:18:52 +00:00
merged = new MergingSortedBlockInputStream ( to_merge , data . getSortDescription ( ) , max_block_size ) ;
break ;
2015-07-08 04:38:46 +00:00
case MergeTreeData : : Collapsing :
merged = new CollapsingFinalBlockInputStream ( to_merge , data . getSortDescription ( ) , data . sign_column ) ;
break ;
case MergeTreeData : : Summing :
merged = new SummingSortedBlockInputStream ( to_merge , data . getSortDescription ( ) , data . columns_to_sum , max_block_size ) ;
break ;
case MergeTreeData : : Aggregating :
merged = new AggregatingSortedBlockInputStream ( to_merge , data . getSortDescription ( ) , max_block_size ) ;
break ;
case MergeTreeData : : Unsorted :
throw Exception ( " UnsortedMergeTree doesn't support FINAL " , ErrorCodes : : LOGICAL_ERROR ) ;
}
res . push_back ( merged ) ;
}
2014-03-13 12:48:07 +00:00
return res ;
}
2015-11-18 21:37:28 +00:00
void MergeTreeDataSelectExecutor : : createPositiveSignCondition ( ExpressionActionsPtr & out_expression , String & out_column , const Context & context ) const
2014-03-13 12:48:07 +00:00
{
ASTFunction * function = new ASTFunction ;
ASTPtr function_ptr = function ;
ASTExpressionList * arguments = new ASTExpressionList ;
ASTPtr arguments_ptr = arguments ;
ASTIdentifier * sign = new ASTIdentifier ;
ASTPtr sign_ptr = sign ;
ASTLiteral * one = new ASTLiteral ;
ASTPtr one_ptr = one ;
function - > name = " equals " ;
function - > arguments = arguments_ptr ;
function - > children . push_back ( arguments_ptr ) ;
arguments - > children . push_back ( sign_ptr ) ;
arguments - > children . push_back ( one_ptr ) ;
sign - > name = data . sign_column ;
sign - > kind = ASTIdentifier : : Column ;
one - > value = Field ( static_cast < Int64 > ( 1 ) ) ;
2015-07-15 04:50:48 +00:00
out_expression = ExpressionAnalyzer ( function_ptr , context , { } , data . getColumnsList ( ) ) . getActions ( false ) ;
2014-03-13 12:48:07 +00:00
out_column = function - > getColumnName ( ) ;
}
/// Получает набор диапазонов засечек, вне которых не могут находиться ключи из заданного диапазона.
2015-11-29 11:58:44 +00:00
MarkRanges MergeTreeDataSelectExecutor : : markRangesFromPKRange (
2015-11-18 21:37:28 +00:00
const MergeTreeData : : DataPart : : Index & index , const PKCondition & key_condition , const Settings & settings ) const
2014-03-13 12:48:07 +00:00
{
2015-02-15 02:31:48 +00:00
size_t min_marks_for_seek = ( settings . merge_tree_min_rows_for_seek + data . index_granularity - 1 ) / data . index_granularity ;
2014-03-13 12:48:07 +00:00
MarkRanges res ;
2014-03-14 17:03:52 +00:00
size_t key_size = data . getSortDescription ( ) . size ( ) ;
2014-03-13 12:48:07 +00:00
size_t marks_count = index . size ( ) / key_size ;
/// Если индекс не используется.
2015-03-27 03:06:06 +00:00
if ( key_condition . alwaysUnknown ( ) )
2014-03-13 12:48:07 +00:00
{
res . push_back ( MarkRange ( 0 , marks_count ) ) ;
}
else
{
/** В стеке всегда будут находиться непересекающиеся подозрительные отрезки, самый левый наверху (back).
* Н а к а ж д о м ш а г е б е р е м л е в ы й о т р е з о к и п р о в е р я е м , п о д х о д и т л и о н .
* Е с л и п о д х о д и т , р а з б и в а е м е г о н а б о л е е м е л к и е и к л а д е м и х в с т е к . Е с л и н е т - в ы б р а с ы в а е м е г о .
* Е с л и о т р е з о к у ж е д л и н о й в о д н у з а с е ч к у , д о б а в л я е м е г о в о т в е т и в ы б р а с ы в а е м .
*/
std : : vector < MarkRange > ranges_stack ;
ranges_stack . push_back ( MarkRange ( 0 , marks_count ) ) ;
while ( ! ranges_stack . empty ( ) )
{
MarkRange range = ranges_stack . back ( ) ;
ranges_stack . pop_back ( ) ;
bool may_be_true ;
if ( range . end = = marks_count )
2015-11-29 08:06:29 +00:00
may_be_true = key_condition . mayBeTrueAfter ( & index [ range . begin * key_size ] , data . primary_key_data_types ) ;
2014-03-13 12:48:07 +00:00
else
2015-11-29 08:06:29 +00:00
may_be_true = key_condition . mayBeTrueInRange ( & index [ range . begin * key_size ] , & index [ range . end * key_size ] , data . primary_key_data_types ) ;
2014-03-13 12:48:07 +00:00
if ( ! may_be_true )
continue ;
if ( range . end = = range . begin + 1 )
{
/// Увидели полезный промежуток между соседними засечками. Либо добавим е г о к последнему диапазону, либо начнем новый диапазон.
if ( res . empty ( ) | | range . begin - res . back ( ) . end > min_marks_for_seek )
res . push_back ( range ) ;
else
res . back ( ) . end = range . end ;
}
else
{
/// Разбиваем отрезок и кладем результат в стек справа налево.
2015-02-15 02:31:48 +00:00
size_t step = ( range . end - range . begin - 1 ) / settings . merge_tree_coarse_index_granularity + 1 ;
2014-03-13 12:48:07 +00:00
size_t end ;
for ( end = range . end ; end > range . begin + step ; end - = step )
ranges_stack . push_back ( MarkRange ( end - step , end ) ) ;
ranges_stack . push_back ( MarkRange ( range . begin , end ) ) ;
}
}
}
return res ;
}
}