2015-06-24 11:03:53 +00:00
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
|
|
#include <DB/DataStreams/IProfilingBlockInputStream.h>
|
|
|
|
|
#include <DB/Storages/MergeTree/MergeTreeData.h>
|
|
|
|
|
#include <DB/Storages/MergeTree/PKCondition.h>
|
|
|
|
|
#include <DB/Storages/MergeTree/MergeTreeReader.h>
|
|
|
|
|
#include <DB/Storages/MergeTree/MergeTreeReadPool.h>
|
2016-07-21 16:22:24 +00:00
|
|
|
|
#include <DB/Columns/ColumnNullable.h>
|
2015-06-24 11:03:53 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
namespace DB
|
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
2015-09-09 17:39:28 +00:00
|
|
|
|
/** Used in conjunction with MergeTreeReadPool, asking it for more work to do and performing whatever reads it is asked
|
|
|
|
|
* to perform. */
|
2015-06-24 11:03:53 +00:00
|
|
|
|
class MergeTreeThreadBlockInputStream : public IProfilingBlockInputStream
|
|
|
|
|
{
|
2015-09-09 17:39:28 +00:00
|
|
|
|
/// "thread" index (there are N threads and each thread is assigned index in interval [0..N-1])
|
2015-07-23 13:11:27 +00:00
|
|
|
|
std::size_t thread;
|
2015-06-24 11:03:53 +00:00
|
|
|
|
public:
|
|
|
|
|
MergeTreeThreadBlockInputStream(
|
2015-07-23 13:11:27 +00:00
|
|
|
|
const std::size_t thread,
|
2015-06-24 11:03:53 +00:00
|
|
|
|
const MergeTreeReadPoolPtr & pool, const std::size_t min_marks_to_read, const std::size_t block_size,
|
|
|
|
|
MergeTreeData & storage, const bool use_uncompressed_cache, const ExpressionActionsPtr & prewhere_actions,
|
2015-09-02 11:15:16 +00:00
|
|
|
|
const String & prewhere_column, const Settings & settings, const Names & virt_column_names)
|
2015-09-02 16:09:32 +00:00
|
|
|
|
: thread{thread}, pool{pool}, block_size_marks{block_size / storage.index_granularity},
|
2015-09-03 12:07:46 +00:00
|
|
|
|
/// round min_marks_to_read up to nearest multiple of block_size expressed in marks
|
|
|
|
|
min_marks_to_read{block_size
|
|
|
|
|
? (min_marks_to_read * storage.index_granularity + block_size - 1)
|
|
|
|
|
/ block_size * block_size / storage.index_granularity
|
|
|
|
|
: min_marks_to_read
|
|
|
|
|
},
|
2015-09-02 16:09:32 +00:00
|
|
|
|
storage{storage}, use_uncompressed_cache{use_uncompressed_cache}, prewhere_actions{prewhere_actions},
|
2015-09-02 11:15:16 +00:00
|
|
|
|
prewhere_column{prewhere_column}, min_bytes_to_use_direct_io{settings.min_bytes_to_use_direct_io},
|
2015-09-03 12:07:46 +00:00
|
|
|
|
max_read_buffer_size{settings.max_read_buffer_size}, virt_column_names{virt_column_names},
|
2015-06-24 11:03:53 +00:00
|
|
|
|
log{&Logger::get("MergeTreeThreadBlockInputStream")}
|
|
|
|
|
{}
|
|
|
|
|
|
|
|
|
|
String getName() const override { return "MergeTreeThread"; }
|
|
|
|
|
|
|
|
|
|
String getID() const override
|
|
|
|
|
{
|
|
|
|
|
std::stringstream res;
|
2015-09-02 16:09:32 +00:00
|
|
|
|
/// @todo print some meaningful information
|
2015-06-24 11:03:53 +00:00
|
|
|
|
// res << "MergeTreeThread(columns";
|
|
|
|
|
//
|
|
|
|
|
// for (const auto & column : columns)
|
|
|
|
|
// res << ", " << column.name;
|
|
|
|
|
//
|
|
|
|
|
// if (prewhere_actions)
|
|
|
|
|
// res << ", prewhere, " << prewhere_actions->getID();
|
|
|
|
|
//
|
|
|
|
|
// res << ", marks";
|
|
|
|
|
//
|
|
|
|
|
// for (size_t i = 0; i < all_mark_ranges.size(); ++i)
|
|
|
|
|
// res << ", " << all_mark_ranges[i].begin << ", " << all_mark_ranges[i].end;
|
|
|
|
|
//
|
|
|
|
|
// res << ")";
|
2015-12-21 20:17:24 +00:00
|
|
|
|
|
|
|
|
|
res << static_cast<const void *>(this);
|
2015-06-24 11:03:53 +00:00
|
|
|
|
return res.str();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
protected:
|
|
|
|
|
/// Будем вызывать progressImpl самостоятельно.
|
|
|
|
|
void progress(const Progress & value) override {}
|
|
|
|
|
|
|
|
|
|
Block readImpl() override
|
|
|
|
|
{
|
|
|
|
|
Block res;
|
|
|
|
|
|
2015-09-25 13:37:53 +00:00
|
|
|
|
while (!res && !isCancelled())
|
2015-06-24 11:03:53 +00:00
|
|
|
|
{
|
2015-07-03 15:08:21 +00:00
|
|
|
|
if (!task && !getNewTask())
|
|
|
|
|
break;
|
2015-06-24 11:03:53 +00:00
|
|
|
|
|
|
|
|
|
res = readFromPart();
|
|
|
|
|
|
|
|
|
|
if (res)
|
2015-07-03 15:08:21 +00:00
|
|
|
|
injectVirtualColumns(res);
|
2015-06-24 11:03:53 +00:00
|
|
|
|
|
|
|
|
|
if (task->mark_ranges.empty())
|
|
|
|
|
task = {};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
private:
|
2015-09-09 17:39:28 +00:00
|
|
|
|
/// Requests read task from MergeTreeReadPool and signals whether it got one
|
2015-07-03 15:08:21 +00:00
|
|
|
|
bool getNewTask()
|
|
|
|
|
{
|
2015-07-23 13:11:27 +00:00
|
|
|
|
task = pool->getTask(min_marks_to_read, thread);
|
2015-07-03 15:08:21 +00:00
|
|
|
|
|
|
|
|
|
if (!task)
|
2015-07-23 17:54:07 +00:00
|
|
|
|
{
|
2015-08-25 09:31:55 +00:00
|
|
|
|
/** Закрываем файлы (ещё до уничтожения объекта).
|
|
|
|
|
* Чтобы при создании многих источников, но одновременном чтении только из нескольких,
|
|
|
|
|
* буферы не висели в памяти. */
|
2015-07-23 17:54:07 +00:00
|
|
|
|
reader = {};
|
|
|
|
|
pre_reader = {};
|
2015-07-03 15:08:21 +00:00
|
|
|
|
return false;
|
2015-07-23 17:54:07 +00:00
|
|
|
|
}
|
2015-07-03 15:08:21 +00:00
|
|
|
|
|
2015-07-08 17:59:44 +00:00
|
|
|
|
const auto path = storage.getFullPath() + task->data_part->name + '/';
|
|
|
|
|
|
2015-12-13 04:52:13 +00:00
|
|
|
|
/// Позволяет пулу уменьшать количество потоков в случае слишком медленных чтений.
|
|
|
|
|
auto profile_callback = [this](ReadBufferFromFileBase::ProfileInfo info) { pool->profileFeedback(info); };
|
|
|
|
|
|
2015-07-08 17:59:44 +00:00
|
|
|
|
if (!reader)
|
2015-07-03 15:08:21 +00:00
|
|
|
|
{
|
|
|
|
|
if (use_uncompressed_cache)
|
|
|
|
|
owned_uncompressed_cache = storage.context.getUncompressedCache();
|
|
|
|
|
|
|
|
|
|
owned_mark_cache = storage.context.getMarkCache();
|
|
|
|
|
|
2015-07-08 17:59:44 +00:00
|
|
|
|
reader = std::make_unique<MergeTreeReader>(
|
2016-07-21 16:22:24 +00:00
|
|
|
|
path, task->data_part, task->columns, owned_uncompressed_cache.get(),
|
2016-08-10 19:12:29 +00:00
|
|
|
|
owned_mark_cache.get(), true,
|
2015-12-13 04:52:13 +00:00
|
|
|
|
storage, task->mark_ranges, min_bytes_to_use_direct_io, max_read_buffer_size, MergeTreeReader::ValueSizeMap{}, profile_callback);
|
2015-07-03 15:08:21 +00:00
|
|
|
|
|
2015-07-08 17:59:44 +00:00
|
|
|
|
if (prewhere_actions)
|
|
|
|
|
pre_reader = std::make_unique<MergeTreeReader>(
|
2016-07-21 16:22:24 +00:00
|
|
|
|
path, task->data_part, task->pre_columns, owned_uncompressed_cache.get(),
|
2016-08-10 19:12:29 +00:00
|
|
|
|
owned_mark_cache.get(), true,
|
2015-12-26 00:59:09 +00:00
|
|
|
|
storage, task->mark_ranges, min_bytes_to_use_direct_io,
|
2015-12-13 04:52:13 +00:00
|
|
|
|
max_read_buffer_size, MergeTreeReader::ValueSizeMap{}, profile_callback);
|
2015-07-08 17:59:44 +00:00
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
2015-09-16 17:49:08 +00:00
|
|
|
|
/// retain avg_value_size_hints
|
|
|
|
|
reader = std::make_unique<MergeTreeReader>(
|
2016-07-21 16:22:24 +00:00
|
|
|
|
path, task->data_part, task->columns, owned_uncompressed_cache.get(),
|
2016-08-10 19:12:29 +00:00
|
|
|
|
owned_mark_cache.get(), true,
|
2015-09-16 17:49:08 +00:00
|
|
|
|
storage, task->mark_ranges, min_bytes_to_use_direct_io, max_read_buffer_size,
|
2015-12-13 04:52:13 +00:00
|
|
|
|
reader->getAvgValueSizeHints(), profile_callback);
|
2015-09-16 17:49:08 +00:00
|
|
|
|
|
2015-07-08 17:59:44 +00:00
|
|
|
|
if (prewhere_actions)
|
2015-09-16 17:49:08 +00:00
|
|
|
|
pre_reader = std::make_unique<MergeTreeReader>(
|
2016-07-21 16:22:24 +00:00
|
|
|
|
path, task->data_part, task->pre_columns, owned_uncompressed_cache.get(),
|
2016-08-10 19:12:29 +00:00
|
|
|
|
owned_mark_cache.get(), true,
|
2015-12-26 00:59:09 +00:00
|
|
|
|
storage, task->mark_ranges, min_bytes_to_use_direct_io,
|
2015-12-13 04:52:13 +00:00
|
|
|
|
max_read_buffer_size, pre_reader->getAvgValueSizeHints(), profile_callback);
|
2015-07-08 17:59:44 +00:00
|
|
|
|
}
|
2015-07-03 15:08:21 +00:00
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-24 11:03:53 +00:00
|
|
|
|
Block readFromPart()
|
|
|
|
|
{
|
|
|
|
|
Block res;
|
|
|
|
|
|
|
|
|
|
if (prewhere_actions)
|
|
|
|
|
{
|
|
|
|
|
do
|
|
|
|
|
{
|
|
|
|
|
/// Прочитаем полный блок столбцов, нужных для вычисления выражения в PREWHERE.
|
2015-09-02 16:09:32 +00:00
|
|
|
|
size_t space_left = std::max(1LU, block_size_marks);
|
2015-06-24 11:03:53 +00:00
|
|
|
|
MarkRanges ranges_to_read;
|
2015-12-21 20:17:24 +00:00
|
|
|
|
|
|
|
|
|
while (!task->mark_ranges.empty() && space_left && !isCancelled())
|
2015-06-24 11:03:53 +00:00
|
|
|
|
{
|
|
|
|
|
auto & range = task->mark_ranges.back();
|
|
|
|
|
|
|
|
|
|
size_t marks_to_read = std::min(range.end - range.begin, space_left);
|
|
|
|
|
pre_reader->readRange(range.begin, range.begin + marks_to_read, res);
|
|
|
|
|
|
|
|
|
|
ranges_to_read.emplace_back(range.begin, range.begin + marks_to_read);
|
|
|
|
|
space_left -= marks_to_read;
|
|
|
|
|
range.begin += marks_to_read;
|
|
|
|
|
if (range.begin == range.end)
|
|
|
|
|
task->mark_ranges.pop_back();
|
|
|
|
|
}
|
2015-12-21 20:17:24 +00:00
|
|
|
|
|
|
|
|
|
/// В случае isCancelled.
|
|
|
|
|
if (!res)
|
|
|
|
|
return res;
|
|
|
|
|
|
2015-06-24 11:03:53 +00:00
|
|
|
|
progressImpl({ res.rowsInFirstColumn(), res.bytes() });
|
2015-07-03 15:08:21 +00:00
|
|
|
|
pre_reader->fillMissingColumns(res, task->ordered_names, task->should_reorder);
|
2015-06-24 11:03:53 +00:00
|
|
|
|
|
|
|
|
|
/// Вычислим выражение в PREWHERE.
|
|
|
|
|
prewhere_actions->execute(res);
|
|
|
|
|
|
|
|
|
|
ColumnPtr column = res.getByName(prewhere_column).column;
|
|
|
|
|
if (task->remove_prewhere_column)
|
|
|
|
|
res.erase(prewhere_column);
|
|
|
|
|
|
|
|
|
|
const auto pre_bytes = res.bytes();
|
|
|
|
|
|
2016-07-21 16:22:24 +00:00
|
|
|
|
ColumnPtr observed_column;
|
2016-08-12 15:22:28 +00:00
|
|
|
|
if (column->isNullable())
|
2016-07-21 16:22:24 +00:00
|
|
|
|
{
|
2016-08-10 19:12:29 +00:00
|
|
|
|
ColumnNullable & nullable_col = static_cast<ColumnNullable &>(*column);
|
2016-07-21 16:22:24 +00:00
|
|
|
|
observed_column = nullable_col.getNestedColumn();
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
observed_column = column;
|
|
|
|
|
|
2015-06-24 11:03:53 +00:00
|
|
|
|
/** Если фильтр - константа (например, написано PREWHERE 1),
|
|
|
|
|
* то либо вернём пустой блок, либо вернём блок без изменений.
|
|
|
|
|
*/
|
2016-07-21 16:22:24 +00:00
|
|
|
|
if (const auto column_const = typeid_cast<const ColumnConstUInt8 *>(observed_column.get()))
|
2015-06-24 11:03:53 +00:00
|
|
|
|
{
|
|
|
|
|
if (!column_const->getData())
|
|
|
|
|
{
|
|
|
|
|
res.clear();
|
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (const auto & range : ranges_to_read)
|
|
|
|
|
reader->readRange(range.begin, range.end, res);
|
|
|
|
|
|
|
|
|
|
progressImpl({ 0, res.bytes() - pre_bytes });
|
|
|
|
|
}
|
2016-07-21 16:22:24 +00:00
|
|
|
|
else if (const auto column_vec = typeid_cast<const ColumnUInt8 *>(observed_column.get()))
|
2015-06-24 11:03:53 +00:00
|
|
|
|
{
|
|
|
|
|
size_t index_granularity = storage.index_granularity;
|
|
|
|
|
|
|
|
|
|
const auto & pre_filter = column_vec->getData();
|
|
|
|
|
IColumn::Filter post_filter(pre_filter.size());
|
|
|
|
|
|
|
|
|
|
/// Прочитаем в нужных отрезках остальные столбцы и составим для них свой фильтр.
|
|
|
|
|
size_t pre_filter_pos = 0;
|
|
|
|
|
size_t post_filter_pos = 0;
|
|
|
|
|
|
|
|
|
|
for (const auto & range : ranges_to_read)
|
|
|
|
|
{
|
|
|
|
|
auto begin = range.begin;
|
|
|
|
|
auto pre_filter_begin_pos = pre_filter_pos;
|
|
|
|
|
|
|
|
|
|
for (auto mark = range.begin; mark <= range.end; ++mark)
|
|
|
|
|
{
|
|
|
|
|
UInt8 nonzero = 0;
|
|
|
|
|
|
|
|
|
|
if (mark != range.end)
|
|
|
|
|
{
|
|
|
|
|
const size_t limit = std::min(pre_filter.size(), pre_filter_pos + index_granularity);
|
|
|
|
|
for (size_t row = pre_filter_pos; row < limit; ++row)
|
|
|
|
|
nonzero |= pre_filter[row];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!nonzero)
|
|
|
|
|
{
|
|
|
|
|
if (mark > begin)
|
|
|
|
|
{
|
|
|
|
|
memcpy(
|
|
|
|
|
&post_filter[post_filter_pos],
|
|
|
|
|
&pre_filter[pre_filter_begin_pos],
|
|
|
|
|
pre_filter_pos - pre_filter_begin_pos);
|
|
|
|
|
post_filter_pos += pre_filter_pos - pre_filter_begin_pos;
|
|
|
|
|
reader->readRange(begin, mark, res);
|
|
|
|
|
}
|
|
|
|
|
begin = mark + 1;
|
|
|
|
|
pre_filter_begin_pos = std::min(pre_filter_pos + index_granularity, pre_filter.size());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (mark < range.end)
|
|
|
|
|
pre_filter_pos = std::min(pre_filter_pos + index_granularity, pre_filter.size());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!post_filter_pos)
|
|
|
|
|
{
|
|
|
|
|
res.clear();
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
progressImpl({ 0, res.bytes() - pre_bytes });
|
|
|
|
|
|
|
|
|
|
post_filter.resize(post_filter_pos);
|
|
|
|
|
|
|
|
|
|
/// Отфильтруем столбцы, относящиеся к PREWHERE, используя pre_filter,
|
|
|
|
|
/// остальные столбцы - используя post_filter.
|
|
|
|
|
size_t rows = 0;
|
|
|
|
|
for (const auto i : ext::range(0, res.columns()))
|
|
|
|
|
{
|
|
|
|
|
auto & col = res.getByPosition(i);
|
|
|
|
|
if (col.name == prewhere_column && res.columns() > 1)
|
|
|
|
|
continue;
|
|
|
|
|
col.column =
|
2015-12-05 07:01:18 +00:00
|
|
|
|
col.column->filter(task->column_name_set.count(col.name) ? post_filter : pre_filter, -1);
|
2015-06-24 11:03:53 +00:00
|
|
|
|
rows = col.column->size();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Заменим столбец со значением условия из PREWHERE на константу.
|
|
|
|
|
if (!task->remove_prewhere_column)
|
2016-05-28 05:31:36 +00:00
|
|
|
|
res.getByName(prewhere_column).column = std::make_shared<ColumnConstUInt8>(rows, 1);
|
2015-06-24 11:03:53 +00:00
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
throw Exception{
|
|
|
|
|
"Illegal type " + column->getName() + " of column for filter. Must be ColumnUInt8 or ColumnConstUInt8.",
|
|
|
|
|
ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER
|
|
|
|
|
};
|
|
|
|
|
|
2015-12-21 20:17:24 +00:00
|
|
|
|
if (res)
|
|
|
|
|
reader->fillMissingColumnsAndReorder(res, task->ordered_names);
|
2015-06-24 11:03:53 +00:00
|
|
|
|
}
|
|
|
|
|
while (!task->mark_ranges.empty() && !res && !isCancelled());
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
2015-09-02 16:09:32 +00:00
|
|
|
|
size_t space_left = std::max(1LU, block_size_marks);
|
2015-06-24 11:03:53 +00:00
|
|
|
|
|
2015-09-25 13:20:00 +00:00
|
|
|
|
while (!task->mark_ranges.empty() && space_left && !isCancelled())
|
2015-06-24 11:03:53 +00:00
|
|
|
|
{
|
|
|
|
|
auto & range = task->mark_ranges.back();
|
|
|
|
|
|
|
|
|
|
const size_t marks_to_read = std::min(range.end - range.begin, space_left);
|
|
|
|
|
reader->readRange(range.begin, range.begin + marks_to_read, res);
|
|
|
|
|
|
|
|
|
|
space_left -= marks_to_read;
|
|
|
|
|
range.begin += marks_to_read;
|
|
|
|
|
if (range.begin == range.end)
|
|
|
|
|
task->mark_ranges.pop_back();
|
|
|
|
|
}
|
|
|
|
|
|
2015-12-21 20:17:24 +00:00
|
|
|
|
/// В случае isCancelled.
|
|
|
|
|
if (!res)
|
|
|
|
|
return res;
|
2015-06-24 11:03:53 +00:00
|
|
|
|
|
2015-12-21 20:17:24 +00:00
|
|
|
|
progressImpl({ res.rowsInFirstColumn(), res.bytes() });
|
2015-07-03 15:08:21 +00:00
|
|
|
|
reader->fillMissingColumns(res, task->ordered_names, task->should_reorder);
|
2015-06-24 11:03:53 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-03 15:08:21 +00:00
|
|
|
|
void injectVirtualColumns(Block & block)
|
|
|
|
|
{
|
|
|
|
|
const auto rows = block.rowsInFirstColumn();
|
|
|
|
|
|
|
|
|
|
/// add virtual columns
|
2016-03-04 04:54:10 +00:00
|
|
|
|
/// Кроме _sample_factor, который добавляется снаружи.
|
2015-07-03 15:08:21 +00:00
|
|
|
|
if (!virt_column_names.empty())
|
|
|
|
|
{
|
|
|
|
|
for (const auto & virt_column_name : virt_column_names)
|
|
|
|
|
{
|
|
|
|
|
if (virt_column_name == "_part")
|
|
|
|
|
{
|
2015-08-31 15:49:47 +00:00
|
|
|
|
block.insert(ColumnWithTypeAndName{
|
2015-07-03 15:08:21 +00:00
|
|
|
|
ColumnConst<String>{rows, task->data_part->name}.convertToFullColumn(),
|
2016-05-28 07:48:40 +00:00
|
|
|
|
std::make_shared<DataTypeString>(),
|
2015-07-03 15:08:21 +00:00
|
|
|
|
virt_column_name
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
else if (virt_column_name == "_part_index")
|
|
|
|
|
{
|
2015-08-31 15:49:47 +00:00
|
|
|
|
block.insert(ColumnWithTypeAndName{
|
2015-07-03 15:08:21 +00:00
|
|
|
|
ColumnConst<UInt64>{rows, task->part_index_in_query}.convertToFullColumn(),
|
2016-05-28 07:48:40 +00:00
|
|
|
|
std::make_shared<DataTypeUInt64>(),
|
2015-07-03 15:08:21 +00:00
|
|
|
|
virt_column_name
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-24 11:03:53 +00:00
|
|
|
|
MergeTreeReadPoolPtr pool;
|
2015-09-02 16:09:32 +00:00
|
|
|
|
const std::size_t block_size_marks;
|
2015-06-24 11:03:53 +00:00
|
|
|
|
const std::size_t min_marks_to_read;
|
|
|
|
|
MergeTreeData & storage;
|
|
|
|
|
const bool use_uncompressed_cache;
|
|
|
|
|
ExpressionActionsPtr prewhere_actions;
|
|
|
|
|
const String prewhere_column;
|
|
|
|
|
const std::size_t min_bytes_to_use_direct_io;
|
|
|
|
|
const std::size_t max_read_buffer_size;
|
|
|
|
|
const Names virt_column_names;
|
|
|
|
|
|
|
|
|
|
Logger * log;
|
|
|
|
|
|
|
|
|
|
using MergeTreeReaderPtr = std::unique_ptr<MergeTreeReader>;
|
|
|
|
|
|
|
|
|
|
UncompressedCachePtr owned_uncompressed_cache;
|
|
|
|
|
MarkCachePtr owned_mark_cache;
|
|
|
|
|
|
|
|
|
|
MergeTreeReadTaskPtr task;
|
|
|
|
|
MergeTreeReaderPtr reader;
|
|
|
|
|
MergeTreeReaderPtr pre_reader;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
}
|