2017-06-14 10:50:22 +00:00
|
|
|
#include <Storages/MergeTree/MergeTreeReader.h>
|
2018-02-13 19:34:15 +00:00
|
|
|
#include <Columns/FilterDescription.h>
|
|
|
|
#include <Columns/ColumnsCommon.h>
|
2018-04-06 13:58:06 +00:00
|
|
|
#include <ext/range.h>
|
2018-11-16 12:22:51 +00:00
|
|
|
#include <DataTypes/DataTypeNothing.h>
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2019-01-04 12:10:00 +00:00
|
|
|
#ifdef __SSE2__
|
2018-02-13 19:34:15 +00:00
|
|
|
#include <emmintrin.h>
|
|
|
|
#endif
|
2017-06-14 10:50:22 +00:00
|
|
|
|
|
|
|
namespace DB
|
|
|
|
{
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
MergeTreeRangeReader::DelayedStream::DelayedStream(
|
2019-08-03 11:02:40 +00:00
|
|
|
size_t from_mark, MergeTreeReader * merge_tree_reader_)
|
2018-02-13 19:34:15 +00:00
|
|
|
: current_mark(from_mark), current_offset(0), num_delayed_rows(0)
|
2019-08-03 11:02:40 +00:00
|
|
|
, merge_tree_reader(merge_tree_reader_)
|
2019-03-27 15:57:14 +00:00
|
|
|
, index_granularity(&(merge_tree_reader->data_part->index_granularity))
|
2018-02-13 19:34:15 +00:00
|
|
|
, continue_reading(false), is_finished(false)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
size_t MergeTreeRangeReader::DelayedStream::position() const
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2019-04-01 10:34:22 +00:00
|
|
|
size_t num_rows_before_current_mark = index_granularity->getMarkStartingRow(current_mark);
|
2018-11-15 14:06:54 +00:00
|
|
|
return num_rows_before_current_mark + current_offset + num_delayed_rows;
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t MergeTreeRangeReader::DelayedStream::readRows(Columns & columns, size_t num_rows)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
if (num_rows)
|
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t rows_read = merge_tree_reader->readRows(current_mark, continue_reading, num_rows, columns);
|
2018-02-13 19:34:15 +00:00
|
|
|
continue_reading = true;
|
|
|
|
|
2018-11-14 11:26:44 +00:00
|
|
|
/// Zero rows_read maybe either because reading has finished
|
2018-02-13 19:34:15 +00:00
|
|
|
/// or because there is no columns we can read in current part (for example, all columns are default).
|
|
|
|
/// In the last case we can't finish reading, but it's also ok for the first case
|
|
|
|
/// because we can finish reading by calculation the number of pending rows.
|
|
|
|
if (0 < rows_read && rows_read < num_rows)
|
|
|
|
is_finished = true;
|
|
|
|
|
|
|
|
return rows_read;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t MergeTreeRangeReader::DelayedStream::read(Columns & columns, size_t from_mark, size_t offset, size_t num_rows)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2019-04-01 10:34:22 +00:00
|
|
|
size_t num_rows_before_from_mark = index_granularity->getMarkStartingRow(from_mark);
|
2018-11-14 11:26:44 +00:00
|
|
|
/// We already stand accurately in required position,
|
|
|
|
/// so because stream is lazy, we don't read anything
|
|
|
|
/// and only increment amount delayed_rows
|
|
|
|
if (position() == num_rows_before_from_mark + offset)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
num_delayed_rows += num_rows;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t read_rows = finalize(columns);
|
2018-02-13 19:34:15 +00:00
|
|
|
|
|
|
|
continue_reading = false;
|
|
|
|
current_mark = from_mark;
|
|
|
|
current_offset = offset;
|
|
|
|
num_delayed_rows = num_rows;
|
|
|
|
|
|
|
|
return read_rows;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t MergeTreeRangeReader::DelayedStream::finalize(Columns & columns)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-11-14 11:26:44 +00:00
|
|
|
/// We need to skip some rows before reading
|
2018-02-13 19:34:15 +00:00
|
|
|
if (current_offset && !continue_reading)
|
|
|
|
{
|
2019-03-27 15:57:14 +00:00
|
|
|
for (size_t mark_num : ext::range(current_mark, index_granularity->getMarksCount()))
|
2018-11-14 11:26:44 +00:00
|
|
|
{
|
2019-03-27 15:57:14 +00:00
|
|
|
size_t mark_index_granularity = index_granularity->getMarkRows(mark_num);
|
2018-11-14 11:26:44 +00:00
|
|
|
if (current_offset >= mark_index_granularity)
|
|
|
|
{
|
|
|
|
current_offset -= mark_index_granularity;
|
|
|
|
current_mark++;
|
|
|
|
}
|
2018-11-15 14:06:54 +00:00
|
|
|
else
|
|
|
|
break;
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-11-14 11:26:44 +00:00
|
|
|
}
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
/// Skip some rows from begin of granule.
|
2018-11-14 11:26:44 +00:00
|
|
|
/// We don't know size of rows in compressed granule,
|
2019-09-23 19:22:02 +00:00
|
|
|
/// so have to read them and throw out.
|
2018-02-13 19:34:15 +00:00
|
|
|
if (current_offset)
|
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
Columns tmp_columns;
|
|
|
|
tmp_columns.resize(columns.size());
|
|
|
|
readRows(tmp_columns, current_offset);
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t rows_to_read = num_delayed_rows;
|
|
|
|
current_offset += num_delayed_rows;
|
|
|
|
num_delayed_rows = 0;
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
return readRows(columns, rows_to_read);
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
|
|
|
|
MergeTreeRangeReader::Stream::Stream(
|
2019-08-23 18:30:04 +00:00
|
|
|
size_t from_mark, size_t to_mark, MergeTreeReader * merge_tree_reader_)
|
2018-02-13 19:34:15 +00:00
|
|
|
: current_mark(from_mark), offset_after_current_mark(0)
|
2018-11-14 11:26:44 +00:00
|
|
|
, last_mark(to_mark)
|
2019-08-03 11:02:40 +00:00
|
|
|
, merge_tree_reader(merge_tree_reader_)
|
2019-03-27 15:57:14 +00:00
|
|
|
, index_granularity(&(merge_tree_reader->data_part->index_granularity))
|
|
|
|
, current_mark_index_granularity(index_granularity->getMarkRows(from_mark))
|
2018-11-14 11:26:44 +00:00
|
|
|
, stream(from_mark, merge_tree_reader)
|
2017-06-14 10:50:22 +00:00
|
|
|
{
|
2019-03-27 15:57:14 +00:00
|
|
|
size_t marks_count = index_granularity->getMarksCount();
|
2019-03-25 13:55:24 +00:00
|
|
|
if (from_mark >= marks_count)
|
2018-12-04 14:44:42 +00:00
|
|
|
throw Exception("Trying create stream to read from mark №"+ toString(current_mark) + " but total marks count is "
|
2019-03-25 13:55:24 +00:00
|
|
|
+ toString(marks_count), ErrorCodes::LOGICAL_ERROR);
|
2018-12-04 14:44:42 +00:00
|
|
|
|
2019-03-25 13:55:24 +00:00
|
|
|
if (last_mark > marks_count)
|
2018-12-04 14:44:42 +00:00
|
|
|
throw Exception("Trying create stream to read to mark №"+ toString(current_mark) + " but total marks count is "
|
2019-03-25 13:55:24 +00:00
|
|
|
+ toString(marks_count), ErrorCodes::LOGICAL_ERROR);
|
2017-06-14 10:50:22 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
void MergeTreeRangeReader::Stream::checkNotFinished() const
|
2017-06-15 17:01:13 +00:00
|
|
|
{
|
2018-02-13 19:34:15 +00:00
|
|
|
if (isFinished())
|
|
|
|
throw Exception("Cannot read out of marks range.", ErrorCodes::LOGICAL_ERROR);
|
2017-06-15 17:01:13 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
void MergeTreeRangeReader::Stream::checkEnoughSpaceInCurrentGranule(size_t num_rows) const
|
2017-06-15 17:01:13 +00:00
|
|
|
{
|
2018-11-14 11:26:44 +00:00
|
|
|
if (num_rows + offset_after_current_mark > current_mark_index_granularity)
|
2018-02-13 19:34:15 +00:00
|
|
|
throw Exception("Cannot read from granule more than index_granularity.", ErrorCodes::LOGICAL_ERROR);
|
2017-06-15 17:01:13 +00:00
|
|
|
}
|
2017-06-14 10:50:22 +00:00
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t MergeTreeRangeReader::Stream::readRows(Columns & columns, size_t num_rows)
|
2017-06-14 10:50:22 +00:00
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t rows_read = stream.read(columns, current_mark, offset_after_current_mark, num_rows);
|
2018-02-13 19:34:15 +00:00
|
|
|
|
|
|
|
if (stream.isFinished())
|
|
|
|
finish();
|
|
|
|
|
|
|
|
return rows_read;
|
|
|
|
}
|
|
|
|
|
2018-11-14 11:26:44 +00:00
|
|
|
void MergeTreeRangeReader::Stream::toNextMark()
|
|
|
|
{
|
|
|
|
++current_mark;
|
2018-12-04 08:05:58 +00:00
|
|
|
|
2019-03-27 15:57:14 +00:00
|
|
|
size_t total_marks_count = index_granularity->getMarksCount();
|
2019-03-19 13:10:24 +00:00
|
|
|
if (current_mark < total_marks_count)
|
2019-03-27 15:57:14 +00:00
|
|
|
current_mark_index_granularity = index_granularity->getMarkRows(current_mark);
|
|
|
|
else if (current_mark == total_marks_count)
|
2019-03-19 13:10:24 +00:00
|
|
|
current_mark_index_granularity = 0; /// HACK?
|
2019-03-27 15:57:14 +00:00
|
|
|
else
|
|
|
|
throw Exception("Trying to read from mark " + toString(current_mark) + ", but total marks count " + toString(total_marks_count), ErrorCodes::LOGICAL_ERROR);
|
2018-12-04 14:44:42 +00:00
|
|
|
|
2018-11-14 11:26:44 +00:00
|
|
|
offset_after_current_mark = 0;
|
|
|
|
}
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t MergeTreeRangeReader::Stream::read(Columns & columns, size_t num_rows, bool skip_remaining_rows_in_current_granule)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-02-20 11:45:58 +00:00
|
|
|
checkEnoughSpaceInCurrentGranule(num_rows);
|
2018-02-13 19:34:15 +00:00
|
|
|
|
|
|
|
if (num_rows)
|
|
|
|
{
|
|
|
|
checkNotFinished();
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t read_rows = readRows(columns, num_rows);
|
2019-03-25 16:55:48 +00:00
|
|
|
|
2018-02-13 19:34:15 +00:00
|
|
|
offset_after_current_mark += num_rows;
|
|
|
|
|
2018-11-14 11:26:44 +00:00
|
|
|
/// Start new granule; skipped_rows_after_offset is already zero.
|
|
|
|
if (offset_after_current_mark == current_mark_index_granularity || skip_remaining_rows_in_current_granule)
|
|
|
|
toNextMark();
|
2018-02-13 19:34:15 +00:00
|
|
|
|
|
|
|
return read_rows;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/// Nothing to read.
|
|
|
|
if (skip_remaining_rows_in_current_granule)
|
|
|
|
{
|
|
|
|
/// Skip the rest of the rows in granule and start new one.
|
|
|
|
checkNotFinished();
|
2018-11-14 11:26:44 +00:00
|
|
|
toNextMark();
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
2017-07-19 13:42:21 +00:00
|
|
|
|
2018-02-13 19:34:15 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2017-07-19 16:39:18 +00:00
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
void MergeTreeRangeReader::Stream::skip(size_t num_rows)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
if (num_rows)
|
|
|
|
{
|
|
|
|
checkNotFinished();
|
2018-02-20 11:45:58 +00:00
|
|
|
checkEnoughSpaceInCurrentGranule(num_rows);
|
2017-07-19 16:39:18 +00:00
|
|
|
|
2018-02-13 19:34:15 +00:00
|
|
|
offset_after_current_mark += num_rows;
|
2017-06-14 10:50:22 +00:00
|
|
|
|
2018-11-14 11:26:44 +00:00
|
|
|
if (offset_after_current_mark == current_mark_index_granularity)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
/// Start new granule; skipped_rows_after_offset is already zero.
|
2018-11-14 11:26:44 +00:00
|
|
|
toNextMark();
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t MergeTreeRangeReader::Stream::finalize(Columns & columns)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t read_rows = stream.finalize(columns);
|
2017-06-14 10:50:22 +00:00
|
|
|
|
2018-02-13 19:34:15 +00:00
|
|
|
if (stream.isFinished())
|
|
|
|
finish();
|
2017-06-16 20:11:02 +00:00
|
|
|
|
|
|
|
return read_rows;
|
2017-06-14 10:50:22 +00:00
|
|
|
}
|
|
|
|
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
void MergeTreeRangeReader::ReadResult::addGranule(size_t num_rows_)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
rows_per_granule.push_back(num_rows_);
|
|
|
|
total_rows_per_granule += num_rows_;
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 13:37:04 +00:00
|
|
|
void MergeTreeRangeReader::ReadResult::adjustLastGranule()
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-03-05 14:41:43 +00:00
|
|
|
size_t num_rows_to_subtract = total_rows_per_granule - num_read_rows;
|
2018-02-20 13:37:04 +00:00
|
|
|
|
2018-02-13 19:34:15 +00:00
|
|
|
if (rows_per_granule.empty())
|
|
|
|
throw Exception("Can't adjust last granule because no granules were added.", ErrorCodes::LOGICAL_ERROR);
|
|
|
|
|
|
|
|
if (num_rows_to_subtract > rows_per_granule.back())
|
|
|
|
throw Exception("Can't adjust last granule because it has " + toString(rows_per_granule.back())
|
2018-11-14 11:26:44 +00:00
|
|
|
+ " rows, but try to subtract " + toString(num_rows_to_subtract) + " rows.",
|
2018-02-13 19:34:15 +00:00
|
|
|
ErrorCodes::LOGICAL_ERROR);
|
|
|
|
|
|
|
|
rows_per_granule.back() -= num_rows_to_subtract;
|
2018-03-05 14:41:43 +00:00
|
|
|
total_rows_per_granule -= num_rows_to_subtract;
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
void MergeTreeRangeReader::ReadResult::clear()
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
/// Need to save information about the number of granules.
|
2018-02-22 12:43:57 +00:00
|
|
|
num_rows_to_skip_in_last_granule += rows_per_granule.back();
|
2018-02-13 19:34:15 +00:00
|
|
|
rows_per_granule.assign(rows_per_granule.size(), 0);
|
2018-03-05 14:41:43 +00:00
|
|
|
total_rows_per_granule = 0;
|
|
|
|
filter_holder = nullptr;
|
|
|
|
filter = nullptr;
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
void MergeTreeRangeReader::ReadResult::optimize()
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-03-05 14:41:43 +00:00
|
|
|
if (total_rows_per_granule == 0 || filter == nullptr)
|
2018-02-13 19:34:15 +00:00
|
|
|
return;
|
|
|
|
|
2018-03-05 14:25:20 +00:00
|
|
|
NumRows zero_tails;
|
2018-03-05 14:41:43 +00:00
|
|
|
auto total_zero_rows_in_tails = countZeroTails(filter->getData(), zero_tails);
|
|
|
|
|
|
|
|
if (total_zero_rows_in_tails == filter->size())
|
|
|
|
{
|
|
|
|
clear();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
else if (total_zero_rows_in_tails == 0 && countBytesInFilter(filter->getData()) == filter->size())
|
|
|
|
{
|
|
|
|
filter_holder = nullptr;
|
|
|
|
filter = nullptr;
|
|
|
|
return;
|
|
|
|
}
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-03-05 14:25:20 +00:00
|
|
|
/// Just a guess. If only a few rows may be skipped, it's better not to skip at all.
|
2018-03-05 14:41:43 +00:00
|
|
|
if (2 * total_zero_rows_in_tails > filter->size())
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
|
2018-03-05 14:41:43 +00:00
|
|
|
auto new_filter = ColumnUInt8::create(filter->size() - total_zero_rows_in_tails);
|
2018-03-05 14:25:20 +00:00
|
|
|
IColumn::Filter & new_data = new_filter->getData();
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-03-05 14:25:20 +00:00
|
|
|
size_t rows_in_last_granule = rows_per_granule.back();
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-03-05 14:41:43 +00:00
|
|
|
collapseZeroTails(filter->getData(), new_data, zero_tails);
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-03-05 14:41:43 +00:00
|
|
|
total_rows_per_granule = new_filter->size();
|
2018-03-05 14:25:20 +00:00
|
|
|
num_rows_to_skip_in_last_granule += rows_in_last_granule - rows_per_granule.back();
|
2018-02-22 12:43:57 +00:00
|
|
|
|
2018-03-05 14:41:43 +00:00
|
|
|
filter = new_filter.get();
|
|
|
|
filter_holder = std::move(new_filter);
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-04 12:10:00 +00:00
|
|
|
size_t MergeTreeRangeReader::ReadResult::countZeroTails(const IColumn::Filter & filter_vec, NumRows & zero_tails) const
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-02-22 12:43:57 +00:00
|
|
|
zero_tails.resize(0);
|
|
|
|
zero_tails.reserve(rows_per_granule.size());
|
|
|
|
|
2019-01-04 12:10:00 +00:00
|
|
|
auto filter_data = filter_vec.data();
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-02-22 12:43:57 +00:00
|
|
|
size_t total_zero_rows_in_tails = 0;
|
|
|
|
|
|
|
|
for (auto rows_to_read : rows_per_granule)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-02-22 12:43:57 +00:00
|
|
|
/// Count the number of zeros at the end of filter for rows were read from current granule.
|
|
|
|
zero_tails.push_back(numZerosInTail(filter_data, filter_data + rows_to_read));
|
|
|
|
total_zero_rows_in_tails += zero_tails.back();
|
|
|
|
filter_data += rows_to_read;
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-22 12:43:57 +00:00
|
|
|
return total_zero_rows_in_tails;
|
|
|
|
}
|
|
|
|
|
2019-01-04 12:10:00 +00:00
|
|
|
void MergeTreeRangeReader::ReadResult::collapseZeroTails(const IColumn::Filter & filter_vec, IColumn::Filter & new_filter_vec,
|
2018-02-22 12:43:57 +00:00
|
|
|
const NumRows & zero_tails)
|
|
|
|
{
|
2019-01-04 12:10:00 +00:00
|
|
|
auto filter_data = filter_vec.data();
|
|
|
|
auto new_filter_data = new_filter_vec.data();
|
2018-02-22 12:43:57 +00:00
|
|
|
|
|
|
|
for (auto i : ext::range(0, rows_per_granule.size()))
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-02-22 12:43:57 +00:00
|
|
|
auto & rows_to_read = rows_per_granule[i];
|
|
|
|
auto filtered_rows_num_at_granule_end = zero_tails[i];
|
|
|
|
|
2018-02-13 19:34:15 +00:00
|
|
|
rows_to_read -= filtered_rows_num_at_granule_end;
|
|
|
|
|
|
|
|
memcpySmallAllowReadWriteOverflow15(new_filter_data, filter_data, rows_to_read);
|
|
|
|
filter_data += rows_to_read;
|
|
|
|
new_filter_data += rows_to_read;
|
|
|
|
|
|
|
|
filter_data += filtered_rows_num_at_granule_end;
|
|
|
|
}
|
|
|
|
|
2019-01-04 12:10:00 +00:00
|
|
|
new_filter_vec.resize(new_filter_data - new_filter_vec.data());
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
size_t MergeTreeRangeReader::ReadResult::numZerosInTail(const UInt8 * begin, const UInt8 * end)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
size_t count = 0;
|
|
|
|
|
2019-01-04 12:10:00 +00:00
|
|
|
#if defined(__SSE2__) && defined(__POPCNT__)
|
2018-02-13 19:34:15 +00:00
|
|
|
const __m128i zero16 = _mm_setzero_si128();
|
|
|
|
while (end - begin >= 64)
|
|
|
|
{
|
|
|
|
end -= 64;
|
|
|
|
auto pos = end;
|
|
|
|
UInt64 val =
|
|
|
|
static_cast<UInt64>(_mm_movemask_epi8(_mm_cmpgt_epi8(
|
|
|
|
_mm_loadu_si128(reinterpret_cast<const __m128i *>(pos)),
|
|
|
|
zero16)))
|
|
|
|
| (static_cast<UInt64>(_mm_movemask_epi8(_mm_cmpgt_epi8(
|
|
|
|
_mm_loadu_si128(reinterpret_cast<const __m128i *>(pos + 16)),
|
2019-09-23 19:22:02 +00:00
|
|
|
zero16))) << 16u)
|
2018-02-13 19:34:15 +00:00
|
|
|
| (static_cast<UInt64>(_mm_movemask_epi8(_mm_cmpgt_epi8(
|
|
|
|
_mm_loadu_si128(reinterpret_cast<const __m128i *>(pos + 32)),
|
2019-09-23 19:22:02 +00:00
|
|
|
zero16))) << 32u)
|
2018-02-13 19:34:15 +00:00
|
|
|
| (static_cast<UInt64>(_mm_movemask_epi8(_mm_cmpgt_epi8(
|
|
|
|
_mm_loadu_si128(reinterpret_cast<const __m128i *>(pos + 48)),
|
2019-09-23 19:22:02 +00:00
|
|
|
zero16))) << 48u);
|
2018-02-13 19:34:15 +00:00
|
|
|
if (val == 0)
|
|
|
|
count += 64;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
count += __builtin_clzll(val);
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
while (end > begin && *(--end) == 0)
|
|
|
|
{
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2018-03-05 14:41:43 +00:00
|
|
|
void MergeTreeRangeReader::ReadResult::setFilter(const ColumnPtr & new_filter)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-03-05 14:41:43 +00:00
|
|
|
if (!new_filter && filter)
|
2018-03-05 14:25:20 +00:00
|
|
|
throw Exception("Can't replace existing filter with empty.", ErrorCodes::LOGICAL_ERROR);
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-03-05 14:41:43 +00:00
|
|
|
if (filter)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-03-05 14:41:43 +00:00
|
|
|
size_t new_size = new_filter->size();
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-03-05 14:41:43 +00:00
|
|
|
if (new_size != total_rows_per_granule)
|
2018-03-05 14:25:20 +00:00
|
|
|
throw Exception("Can't set filter because it's size is " + toString(new_size) + " but "
|
2018-03-05 14:41:43 +00:00
|
|
|
+ toString(total_rows_per_granule) + " rows was read.", ErrorCodes::LOGICAL_ERROR);
|
2018-03-05 14:25:20 +00:00
|
|
|
}
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-03-05 14:41:43 +00:00
|
|
|
ConstantFilterDescription const_description(*new_filter);
|
|
|
|
if (const_description.always_false)
|
2018-02-22 11:31:15 +00:00
|
|
|
clear();
|
2018-03-05 14:41:43 +00:00
|
|
|
else if (!const_description.always_true)
|
2018-02-22 11:31:15 +00:00
|
|
|
{
|
2018-03-05 14:41:43 +00:00
|
|
|
FilterDescription filter_description(*new_filter);
|
|
|
|
filter_holder = filter_description.data_holder ? filter_description.data_holder : new_filter;
|
|
|
|
filter = typeid_cast<const ColumnUInt8 *>(filter_holder.get());
|
|
|
|
if (!filter)
|
|
|
|
throw Exception("setFilter function expected ColumnUInt8.", ErrorCodes::LOGICAL_ERROR);
|
2018-02-22 11:31:15 +00:00
|
|
|
}
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
|
|
|
|
MergeTreeRangeReader::MergeTreeRangeReader(
|
2019-08-03 11:02:40 +00:00
|
|
|
MergeTreeReader * merge_tree_reader_, MergeTreeRangeReader * prev_reader_,
|
|
|
|
ExpressionActionsPtr alias_actions_, ExpressionActionsPtr prewhere_actions_,
|
2019-09-26 17:29:41 +00:00
|
|
|
const String * prewhere_column_name_, bool remove_prewhere_column_, bool last_reader_in_chain_)
|
2019-08-03 11:02:40 +00:00
|
|
|
: merge_tree_reader(merge_tree_reader_), index_granularity(&(merge_tree_reader->data_part->index_granularity))
|
|
|
|
, prev_reader(prev_reader_), prewhere_column_name(prewhere_column_name_)
|
2019-09-26 17:29:41 +00:00
|
|
|
, alias_actions(std::move(alias_actions_)), prewhere_actions(std::move(prewhere_actions_))
|
|
|
|
, remove_prewhere_column(remove_prewhere_column_)
|
2019-08-03 11:02:40 +00:00
|
|
|
, last_reader_in_chain(last_reader_in_chain_), is_initialized(true)
|
2018-02-20 11:45:58 +00:00
|
|
|
{
|
2019-09-26 17:29:41 +00:00
|
|
|
if (prev_reader)
|
|
|
|
sample_block = prev_reader->getSampleBlock();
|
|
|
|
|
|
|
|
for (auto & name_and_type : merge_tree_reader->getColumns())
|
|
|
|
sample_block.insert({name_and_type.type->createColumn(), name_and_type.type, name_and_type.name});
|
|
|
|
|
|
|
|
if (alias_actions)
|
|
|
|
alias_actions->execute(sample_block, true);
|
|
|
|
|
|
|
|
if (prewhere_actions)
|
|
|
|
prewhere_actions->execute(sample_block, true);
|
|
|
|
|
|
|
|
if (remove_prewhere_column)
|
|
|
|
sample_block.erase(*prewhere_column_name);
|
2018-02-20 11:45:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool MergeTreeRangeReader::isReadingFinished() const
|
|
|
|
{
|
|
|
|
return prev_reader ? prev_reader->isReadingFinished() : stream.isFinished();
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t MergeTreeRangeReader::numReadRowsInCurrentGranule() const
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-02-20 11:45:58 +00:00
|
|
|
return prev_reader ? prev_reader->numReadRowsInCurrentGranule() : stream.numReadRowsInCurrentGranule();
|
|
|
|
}
|
2019-07-18 14:41:11 +00:00
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
size_t MergeTreeRangeReader::numPendingRowsInCurrentGranule() const
|
|
|
|
{
|
2018-02-22 12:43:57 +00:00
|
|
|
if (prev_reader)
|
|
|
|
return prev_reader->numPendingRowsInCurrentGranule();
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-03-05 14:41:43 +00:00
|
|
|
auto pending_rows = stream.numPendingRowsInCurrentGranule();
|
2019-03-25 16:55:48 +00:00
|
|
|
|
|
|
|
if (pending_rows)
|
|
|
|
return pending_rows;
|
|
|
|
|
2019-03-28 15:03:49 +00:00
|
|
|
return numRowsInCurrentGranule();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
size_t MergeTreeRangeReader::numRowsInCurrentGranule() const
|
|
|
|
{
|
2018-02-22 12:43:57 +00:00
|
|
|
/// If pending_rows is zero, than stream is not initialized.
|
2019-03-25 16:55:48 +00:00
|
|
|
if (stream.current_mark_index_granularity)
|
|
|
|
return stream.current_mark_index_granularity;
|
|
|
|
|
|
|
|
/// We haven't read anything, return first
|
|
|
|
size_t first_mark = merge_tree_reader->getFirstMarkToRead();
|
2019-03-27 15:57:14 +00:00
|
|
|
return index_granularity->getMarkRows(first_mark);
|
2018-11-14 11:26:44 +00:00
|
|
|
}
|
|
|
|
|
2019-03-28 15:03:49 +00:00
|
|
|
size_t MergeTreeRangeReader::currentMark() const
|
|
|
|
{
|
|
|
|
return stream.currentMark();
|
|
|
|
}
|
|
|
|
|
2019-03-28 08:52:09 +00:00
|
|
|
size_t MergeTreeRangeReader::Stream::numPendingRows() const
|
|
|
|
{
|
|
|
|
size_t rows_between_marks = index_granularity->getRowsCountInRange(current_mark, last_mark);
|
2018-11-15 14:06:54 +00:00
|
|
|
return rows_between_marks - offset_after_current_mark;
|
2018-02-20 11:45:58 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 11:50:33 +00:00
|
|
|
bool MergeTreeRangeReader::isCurrentRangeFinished() const
|
2018-02-20 11:45:58 +00:00
|
|
|
{
|
|
|
|
return prev_reader ? prev_reader->isCurrentRangeFinished() : stream.isFinished();
|
|
|
|
}
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, MarkRanges & ranges)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
if (max_rows == 0)
|
|
|
|
throw Exception("Expected at least 1 row to read, got 0.", ErrorCodes::LOGICAL_ERROR);
|
|
|
|
|
|
|
|
ReadResult read_result;
|
2018-02-20 14:26:22 +00:00
|
|
|
size_t prev_bytes = 0;
|
2018-02-13 19:34:15 +00:00
|
|
|
|
|
|
|
if (prev_reader)
|
2018-02-20 11:45:58 +00:00
|
|
|
{
|
|
|
|
read_result = prev_reader->read(max_rows, ranges);
|
2019-09-23 19:22:02 +00:00
|
|
|
prev_bytes = read_result.numBytesRead();
|
|
|
|
|
|
|
|
size_t num_read_rows;
|
|
|
|
Columns columns = continueReadingChain(read_result, num_read_rows);
|
|
|
|
|
|
|
|
/// Nothing to do. Return empty result.
|
|
|
|
if (read_result.num_rows == 0)
|
|
|
|
return read_result;
|
|
|
|
|
|
|
|
bool has_columns = false;
|
|
|
|
for (auto & column : columns)
|
2019-10-31 11:32:24 +00:00
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
if (column)
|
|
|
|
has_columns = true;
|
2019-10-31 11:32:24 +00:00
|
|
|
}
|
2018-02-22 12:43:57 +00:00
|
|
|
|
|
|
|
bool should_evaluate_missing_defaults = false;
|
2019-09-23 19:22:02 +00:00
|
|
|
|
|
|
|
if (has_columns)
|
2018-02-22 12:43:57 +00:00
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
/// num_read_rows >= read_result.num_rows
|
|
|
|
/// We must filter block before adding columns to read_result.block
|
2018-02-22 12:43:57 +00:00
|
|
|
|
|
|
|
/// Fill missing columns before filtering because some arrays from Nested may have empty data.
|
2019-09-23 19:22:02 +00:00
|
|
|
merge_tree_reader->fillMissingColumns(columns, should_evaluate_missing_defaults, num_read_rows);
|
2018-02-22 12:43:57 +00:00
|
|
|
|
2018-03-05 14:41:43 +00:00
|
|
|
if (read_result.getFilter())
|
2019-09-23 19:22:02 +00:00
|
|
|
filterColumns(columns, read_result.getFilter()->getData());
|
2018-02-22 12:43:57 +00:00
|
|
|
}
|
2018-10-03 13:55:21 +00:00
|
|
|
else
|
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t num_rows = read_result.num_rows;
|
2018-10-04 08:58:19 +00:00
|
|
|
|
2018-10-03 13:55:21 +00:00
|
|
|
/// If block is empty, we still may need to add missing columns.
|
|
|
|
/// In that case use number of rows in result block and don't filter block.
|
2018-10-04 08:58:19 +00:00
|
|
|
if (num_rows)
|
2019-09-23 19:22:02 +00:00
|
|
|
merge_tree_reader->fillMissingColumns(columns, should_evaluate_missing_defaults, num_rows);
|
2018-10-03 13:55:21 +00:00
|
|
|
}
|
|
|
|
|
2019-10-02 11:57:17 +00:00
|
|
|
if (!columns.empty() && should_evaluate_missing_defaults)
|
|
|
|
merge_tree_reader->evaluateMissingDefaults(
|
|
|
|
prev_reader->getSampleBlock().cloneWithColumns(read_result.columns), columns);
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
read_result.columns.reserve(read_result.columns.size() + columns.size());
|
|
|
|
for (auto & column : columns)
|
|
|
|
read_result.columns.emplace_back(std::move(column));
|
2018-02-20 11:45:58 +00:00
|
|
|
}
|
|
|
|
else
|
2018-02-22 12:43:57 +00:00
|
|
|
{
|
2018-02-20 11:45:58 +00:00
|
|
|
read_result = startReadingChain(max_rows, ranges);
|
2019-09-23 19:22:02 +00:00
|
|
|
read_result.num_rows = read_result.numReadRows();
|
|
|
|
|
|
|
|
if (read_result.num_rows)
|
2018-02-22 12:43:57 +00:00
|
|
|
{
|
|
|
|
bool should_evaluate_missing_defaults;
|
2019-09-23 19:22:02 +00:00
|
|
|
merge_tree_reader->fillMissingColumns(read_result.columns, should_evaluate_missing_defaults,
|
|
|
|
read_result.num_rows);
|
2018-02-22 12:43:57 +00:00
|
|
|
|
|
|
|
if (should_evaluate_missing_defaults)
|
2019-10-02 11:57:17 +00:00
|
|
|
merge_tree_reader->evaluateMissingDefaults({}, read_result.columns);
|
2018-02-22 12:43:57 +00:00
|
|
|
}
|
2019-09-23 19:22:02 +00:00
|
|
|
else
|
|
|
|
read_result.columns.clear();
|
2018-02-22 12:43:57 +00:00
|
|
|
}
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
if (read_result.num_rows == 0)
|
2018-02-13 19:34:15 +00:00
|
|
|
return read_result;
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t total_bytes = 0;
|
|
|
|
for (auto & column : read_result.columns)
|
|
|
|
total_bytes += column->byteSize();
|
2018-02-20 14:26:22 +00:00
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
read_result.addNumBytesRead(total_bytes - prev_bytes);
|
2018-09-10 15:28:03 +00:00
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
executePrewhereActionsAndFilterColumns(read_result);
|
2018-09-10 15:28:03 +00:00
|
|
|
|
2018-02-13 19:34:15 +00:00
|
|
|
return read_result;
|
|
|
|
}
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
void MergeTreeRangeReader::filterColumns(Columns & columns, const IColumn::Filter & filter) const
|
2018-02-22 12:43:57 +00:00
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
for (auto & column : columns)
|
2018-02-22 12:43:57 +00:00
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
if (column)
|
2018-03-05 14:41:43 +00:00
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
column = column->filter(filter, -1);
|
2018-03-05 14:41:43 +00:00
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
if (column->empty())
|
2018-03-05 14:41:43 +00:00
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
columns.clear();
|
2018-03-05 14:41:43 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2018-02-22 12:43:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
MergeTreeRangeReader::ReadResult MergeTreeRangeReader::startReadingChain(size_t max_rows, MarkRanges & ranges)
|
|
|
|
{
|
|
|
|
ReadResult result;
|
2019-09-23 19:22:02 +00:00
|
|
|
result.columns.resize(merge_tree_reader->getColumns().size());
|
2018-02-20 11:45:58 +00:00
|
|
|
|
|
|
|
/// Stream is lazy. result.num_added_rows is the number of rows added to block which is not equal to
|
|
|
|
/// result.num_rows_read until call to stream.finalize(). Also result.num_added_rows may be less than
|
|
|
|
/// result.num_rows_read if the last granule in range also the last in part (so we have to adjust last granule).
|
|
|
|
{
|
|
|
|
size_t space_left = max_rows;
|
2018-02-20 12:02:25 +00:00
|
|
|
while (space_left && (!stream.isFinished() || !ranges.empty()))
|
2018-02-20 11:45:58 +00:00
|
|
|
{
|
|
|
|
if (stream.isFinished())
|
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
result.addRows(stream.finalize(result.columns));
|
2018-11-14 11:26:44 +00:00
|
|
|
stream = Stream(ranges.back().begin, ranges.back().end, merge_tree_reader);
|
2018-02-20 11:45:58 +00:00
|
|
|
result.addRange(ranges.back());
|
|
|
|
ranges.pop_back();
|
|
|
|
}
|
|
|
|
|
|
|
|
auto rows_to_read = std::min(space_left, stream.numPendingRowsInCurrentGranule());
|
|
|
|
bool last = rows_to_read == space_left;
|
2019-09-23 19:22:02 +00:00
|
|
|
result.addRows(stream.read(result.columns, rows_to_read, !last));
|
2018-02-20 11:45:58 +00:00
|
|
|
result.addGranule(rows_to_read);
|
|
|
|
space_left -= rows_to_read;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
result.addRows(stream.finalize(result.columns));
|
2018-02-20 11:45:58 +00:00
|
|
|
|
|
|
|
/// Last granule may be incomplete.
|
2018-02-20 13:37:04 +00:00
|
|
|
result.adjustLastGranule();
|
2018-02-20 11:45:58 +00:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
Columns MergeTreeRangeReader::continueReadingChain(ReadResult & result, size_t & num_rows)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
Columns columns;
|
|
|
|
num_rows = 0;
|
2018-02-22 12:43:57 +00:00
|
|
|
|
|
|
|
if (result.rowsPerGranule().empty())
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
/// If zero rows were read on prev step, than there is no more rows to read.
|
|
|
|
/// Last granule may have less rows than index_granularity, so finish reading manually.
|
|
|
|
stream.finish();
|
2019-09-23 19:22:02 +00:00
|
|
|
return columns;
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2019-10-31 11:32:24 +00:00
|
|
|
columns.resize(merge_tree_reader->numColumnsInResult());
|
2019-09-23 19:22:02 +00:00
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
auto & rows_per_granule = result.rowsPerGranule();
|
|
|
|
auto & started_ranges = result.startedRanges();
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
size_t next_range_to_start = 0;
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
auto size = rows_per_granule.size();
|
|
|
|
for (auto i : ext::range(0, size))
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-02-20 11:45:58 +00:00
|
|
|
if (next_range_to_start < started_ranges.size()
|
|
|
|
&& i == started_ranges[next_range_to_start].num_granules_read_before_start)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
num_rows += stream.finalize(columns);
|
2018-02-20 11:45:58 +00:00
|
|
|
auto & range = started_ranges[next_range_to_start].range;
|
2018-02-22 11:31:15 +00:00
|
|
|
++next_range_to_start;
|
2018-11-14 11:26:44 +00:00
|
|
|
stream = Stream(range.begin, range.end, merge_tree_reader);
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
bool last = i + 1 == size;
|
2019-09-23 19:22:02 +00:00
|
|
|
num_rows += stream.read(columns, rows_per_granule[i], !last);
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
2018-02-20 11:45:58 +00:00
|
|
|
|
2018-02-22 12:43:57 +00:00
|
|
|
stream.skip(result.numRowsToSkipInLastGranule());
|
2019-09-23 19:22:02 +00:00
|
|
|
num_rows += stream.finalize(columns);
|
2018-02-20 11:45:58 +00:00
|
|
|
|
|
|
|
/// added_rows may be zero if all columns were read in prewhere and it's ok.
|
2019-09-23 19:22:02 +00:00
|
|
|
if (num_rows && num_rows != result.totalRowsPerGranule())
|
|
|
|
throw Exception("RangeReader read " + toString(num_rows) + " rows, but "
|
2018-03-05 14:41:43 +00:00
|
|
|
+ toString(result.totalRowsPerGranule()) + " expected.", ErrorCodes::LOGICAL_ERROR);
|
2018-02-22 12:43:57 +00:00
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
return columns;
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
void MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(ReadResult & result)
|
2017-06-15 17:01:13 +00:00
|
|
|
{
|
2018-03-05 14:41:43 +00:00
|
|
|
if (!prewhere_actions)
|
|
|
|
return;
|
2018-02-22 12:43:57 +00:00
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
auto & header = merge_tree_reader->getColumns();
|
|
|
|
size_t num_columns = header.size();
|
|
|
|
|
|
|
|
if (result.columns.size() != num_columns)
|
|
|
|
throw Exception("Invalid number of columns passed to MergeTreeRangeReader. "
|
|
|
|
"Expected " + toString(num_columns) + ", "
|
|
|
|
"got " + toString(result.columns.size()), ErrorCodes::LOGICAL_ERROR);
|
|
|
|
|
|
|
|
ColumnPtr filter;
|
|
|
|
size_t prewhere_column_pos;
|
|
|
|
|
|
|
|
{
|
|
|
|
/// Restore block from columns list.
|
|
|
|
Block block;
|
2019-10-02 11:57:17 +00:00
|
|
|
size_t pos = 0;
|
|
|
|
|
|
|
|
if (prev_reader)
|
|
|
|
{
|
|
|
|
for (auto & col : prev_reader->getSampleBlock())
|
|
|
|
{
|
|
|
|
block.insert({result.columns[pos], col.type, col.name});
|
|
|
|
++pos;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto name_and_type = header.begin(); pos < num_columns; ++pos, ++name_and_type)
|
2019-09-23 19:22:02 +00:00
|
|
|
block.insert({result.columns[pos], name_and_type->type, name_and_type->name});
|
|
|
|
|
|
|
|
if (alias_actions)
|
|
|
|
alias_actions->execute(block);
|
|
|
|
|
|
|
|
prewhere_actions->execute(block);
|
|
|
|
|
|
|
|
prewhere_column_pos = block.getPositionByName(*prewhere_column_name);
|
2018-09-03 17:24:46 +00:00
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
result.columns.clear();
|
2019-10-02 11:57:17 +00:00
|
|
|
result.columns.reserve(block.columns());
|
2019-09-23 19:22:02 +00:00
|
|
|
for (auto & col : block)
|
|
|
|
result.columns.emplace_back(std::move(col.column));
|
|
|
|
|
|
|
|
filter.swap(result.columns[prewhere_column_pos]);
|
|
|
|
}
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-03-05 14:41:43 +00:00
|
|
|
if (result.getFilter())
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
/// TODO: implement for prewhere chain.
|
|
|
|
/// In order to do it we need combine filter and result.filter, where filter filters only '1' in result.filter.
|
2018-02-20 11:45:58 +00:00
|
|
|
throw Exception("MergeTreeRangeReader chain with several prewhere actions in not implemented.",
|
2018-02-13 19:34:15 +00:00
|
|
|
ErrorCodes::LOGICAL_ERROR);
|
|
|
|
}
|
|
|
|
|
2018-03-05 14:41:43 +00:00
|
|
|
result.setFilter(filter);
|
|
|
|
if (!last_reader_in_chain)
|
|
|
|
result.optimize();
|
|
|
|
|
|
|
|
bool filter_always_true = !result.getFilter() && result.totalRowsPerGranule() == filter->size();
|
|
|
|
|
|
|
|
if (result.totalRowsPerGranule() == 0)
|
2019-09-23 19:22:02 +00:00
|
|
|
{
|
|
|
|
result.columns.clear();
|
|
|
|
result.num_rows = 0;
|
|
|
|
}
|
2018-03-05 14:41:43 +00:00
|
|
|
else if (!filter_always_true)
|
|
|
|
{
|
|
|
|
FilterDescription filter_description(*filter);
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t num_bytes_in_filter = 0;
|
|
|
|
bool calculated_num_bytes_in_filter = false;
|
|
|
|
|
|
|
|
auto getNumBytesInFilter = [&]()
|
|
|
|
{
|
|
|
|
if (!calculated_num_bytes_in_filter)
|
|
|
|
num_bytes_in_filter = countBytesInFilter(*filter_description.data);
|
|
|
|
|
|
|
|
calculated_num_bytes_in_filter = true;
|
|
|
|
return num_bytes_in_filter;
|
|
|
|
};
|
|
|
|
|
2018-03-05 14:41:43 +00:00
|
|
|
if (last_reader_in_chain)
|
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t bytes_in_filter = getNumBytesInFilter();
|
|
|
|
if (bytes_in_filter == 0)
|
|
|
|
{
|
|
|
|
result.columns.clear();
|
|
|
|
result.num_rows = 0;
|
|
|
|
}
|
|
|
|
else if (bytes_in_filter == filter->size())
|
2018-03-05 14:41:43 +00:00
|
|
|
filter_always_true = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!filter_always_true)
|
2019-09-23 19:22:02 +00:00
|
|
|
{
|
|
|
|
filterColumns(result.columns, *filter_description.data);
|
|
|
|
|
2019-10-02 11:57:17 +00:00
|
|
|
/// Get num rows after filtration.
|
|
|
|
bool has_column = false;
|
|
|
|
|
|
|
|
for (auto & column : result.columns)
|
|
|
|
{
|
|
|
|
if (column)
|
|
|
|
{
|
|
|
|
has_column = true;
|
|
|
|
result.num_rows = column->size();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!has_column)
|
2019-09-23 19:22:02 +00:00
|
|
|
result.num_rows = getNumBytesInFilter();
|
|
|
|
}
|
2018-03-05 14:41:43 +00:00
|
|
|
}
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
if (result.num_rows == 0)
|
2018-03-05 14:41:43 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (remove_prewhere_column)
|
2019-09-23 19:22:02 +00:00
|
|
|
result.columns.erase(result.columns.begin() + prewhere_column_pos);
|
2018-03-05 14:41:43 +00:00
|
|
|
else
|
2019-09-23 19:22:02 +00:00
|
|
|
result.columns[prewhere_column_pos] =
|
|
|
|
DataTypeUInt8().createColumnConst(result.num_rows, 1u)->convertToFullColumnIfConst();
|
2017-06-15 17:01:13 +00:00
|
|
|
}
|
|
|
|
|
2017-06-14 10:50:22 +00:00
|
|
|
}
|